blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2d54ab8406d48e4b2766eecf8de496fbe0112961 | Shell | petronny/aur3-mirror | /libnodave/PKGBUILD | UTF-8 | 727 | 2.71875 | 3 | [] | no_license | # Contributor: Andre Klitzing <aklitzing () online () de>
pkgname=libnodave
pkgver=0.8.4.4
pkgrel=1
pkgdesc="Exchange data with Siemens PLCs"
arch=('i686' 'x86_64')
url="http://sourceforge.net/projects/libnodave"
license=('LGPL')
source=(http://downloads.sourceforge.net/$pkgname/${pkgname}-${pkgver}.zip amd64_fpic.patch)
md5sums=('d3f960408bbfc811e107b61d387f8bba'
'eab02468487aef078bfda2e60a15cb60')
build() {
cd "$srcdir"
if [ "${CARCH}" = "x86_64" ]; then
patch -Np0 -i amd64_fpic.patch || return 1
fi
make libnodave.so || return 1
install -D -m 644 libnodave.so $pkgdir/usr/lib/libnodave.so || return 1
install -D -m 644 nodave.h $pkgdir/usr/include/nodave.h || return 1
}
# vim:set ts=2 sw=2 et:
| true |
b2c21ad63d59edc5a256da8eddf909cb2631d7d2 | Shell | rlaneyjr/prezto | /runcoms/zpreztorc | UTF-8 | 6,818 | 2.59375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# vim: noai:et:tw=80:ts=2:ss=2:sts=2:sw=2:ft=zsh
#
# Sets Prezto options.
#
# Authors:
# Ricky Laney <rlaneyjr@gmail.com>
#
# ### zpreztorc
#
# This file configures Prezto. Should ONLY contain Prezto settings!
########################################
# The configuration files are read in the following order:
#
# 01. /etc/zshenv
# 02. ~/.zshenv
# 03. /etc/zprofile
# 04. ~/.zprofile
# 05. /etc/zshrc
# 06. ~/.zshrc
# 07. ~/.zpreztorc
# 08. /etc/zlogin
# 09. ~/.zlogin
# 10. ~/.zlogout
# 11. /etc/zlogout
########################################
# General
#
# Set case-sensitivity for completion, history lookup, etc.
# zstyle ':prezto:*:*' case-sensitive 'yes'
# Color output (auto set to 'no' on dumb terminals).
zstyle ':prezto:*:*' color 'yes'
# Add additional directories to load prezto modules from
#zstyle ':prezto:load' pmodule-dirs $HOME/.zsh/plugins/wd
# Must be defined for zsh/complist
#export ZLS_COLORS=true
zmodload zsh/complist
# Set the Zsh modules to load (man zshmodules).
#zstyle ':prezto:load' zmodule 'attr' 'stat' 'complist'
zstyle ':prezto:load' zmodule 'complist'
# Set the Zsh functions to load (man zshcontrib).
#zstyle ':prezto:load' zfunction 'zargs' 'zmv' 'cdr'
#zstyle ':prezto:load' zfunction 'cdr'
#zstyle ':completion:*:*:cdr:*:*' menu selection
#zstyle ':chpwd:*' recent-dirs-max 0
# Set the Prezto modules to load (browse modules).
# The order matters.
zstyle ':prezto:load' pmodule \
'environment' \
'directory' \
'history' \
'editor' \
'completion' \
'spectrum' \
'git' \
'rsync' \
'archive' \
'python' \
'syntax-highlighting' \
'history-substring-search' \
'command-not-found' \
'alias-tips' \
'contrib-clipboard' \
'contrib-kubernetes' \
'contrib-prompt' \
'prompt'
# Removed
# 'zsh-better-npm-completion' \
# 'zsh-nvm' \
# 'homebrew' \
# Autosuggestions
#
# Set the query found color.
#zstyle ':prezto:module:autosuggestions:color' found ''
# Completions
#
# Set the entries to ignore in static */etc/hosts* for host completion.
# zstyle ':prezto:module:completion:*:hosts' etc-host-ignores \
# '0.0.0.0' '127.0.0.1'
# Editor
#
# Set the key mapping style to 'emacs' or 'vi'.
zstyle ':prezto:module:editor' key-bindings 'vi'
# Auto convert .... to ../..
#zstyle ':prezto:module:editor' dot-expansion 'yes'
# Allow the zsh prompt context to be shown.
zstyle ':prezto:module:editor' ps-context 'yes'
# Git
#
# Ignore submodules when they are 'dirty', 'untracked', 'all', or 'none'.
zstyle ':prezto:module:git:status:ignore' submodules 'none'
#zstyle ':prezto:module:git:alias' skip 'yes'
# The format of the [git-log][8] output is configurable via the following style,
# where context is *brief*, *oneline*, and *medium*, which will be passed to the
# `--pretty=format:` switch.
zstyle ':prezto:module:git:log:context' format 'oneline'
# GNU Utility
#
# Set the command prefix on non-GNU systems.
# zstyle ':prezto:module:gnu-utility' prefix 'g'
# History Substring Search
#
zstyle ':prezto:module:history-substring-search' color 'yes'
# Set the query found color.
zstyle ':prezto:module:history-substring-search:color' found ''
# Set the query not found color.
zstyle ':prezto:module:history-substring-search:color' not-found ''
# Set the search globbing flags.
zstyle ':prezto:module:history-substring-search' globbing-flags ''
# macOS
#
# Set the keyword used by `mand` to open man pages in Dash.app
#zstyle ':prezto:module:osx:man' dash-keyword 'manpages'
# Pacman
#
# Set the Pacman frontend.
# zstyle ':prezto:module:pacman' frontend 'yaourt'
# Prompt
#
# Set the prompt theme to load.
# Setting it to 'random' loads a random theme.
# Auto set to 'off' on dumb terminals.
#zstyle ':prezto:module:prompt' theme 'off'
#zstyle ':prezto:module:prompt' theme 'sorin'
#zstyle ':prezto:module:prompt' theme 'belak'
#zstyle ':prezto:module:contrib-prompt' theme 'spaceship'
zstyle ':prezto:module:prompt' theme 'sorin_contrib'
#zstyle ':prezto:module:prompt' theme 'josh'
#zstyle ':prezto:module:prompt' theme 'steeef'
# Set the working directory prompt display length.
# By default, it is set to 'short'. Set it to 'long' (without '~' expansion)
# for longer or 'full' (with '~' expansion) for even longer prompt display.
# zstyle ':prezto:module:prompt' pwd-length 'short'
# Set the prompt to display the return code along with an indicator for non-zero
# return codes. This is not supported by all prompts.
# zstyle ':prezto:module:prompt' show-return-val 'yes'
# Python
#
# Auto switch the Python virtualenv on directory change.
# zstyle ':prezto:module:python:virtualenv' auto-switch 'yes'
# Automatically initialize virtualenvwrapper if pre-requisites are met.
#zstyle ':prezto:module:python:virtualenv' initialize 'yes'
# Ruby
#
# Auto switch the Ruby version on directory change.
# zstyle ':prezto:module:ruby:chruby' auto-switch 'yes'
# Screen
#
# Auto start a session when Zsh is launched in a local terminal.
# zstyle ':prezto:module:screen:auto-start' local 'yes'
# Auto start a session when Zsh is launched in a SSH connection.
# zstyle ':prezto:module:screen:auto-start' remote 'yes'
# SSH
#
# Set the SSH identities to load into the agent.
#zstyle ':prezto:module:ssh:load' identities 'cloud_id' 'id_rsa' 'ssh_rsa' 'het_rsa' 'HSC-ETNOC'
# Syntax Highlighting
#
# Set syntax highlighters.
# By default, only the main highlighter is enabled.
zstyle ':prezto:module:syntax-highlighting' highlighters \
'main' \
'brackets' \
'pattern' \
'line' \
'cursor' \
'root'
# Set syntax highlighting styles.
#zstyle ':prezto:module:syntax-highlighting' styles \
# 'builtin' 'bg=blue' \
# 'command' 'bg=blue' \
# 'function' 'bg=blue'
# Set syntax pattern styles.
#zstyle ':prezto:module:syntax-highlighting' pattern \
# 'rm*-rf*' 'fg=white,bold,bg=red'
# Terminal
#
# Auto set the tab and window titles.
#zstyle ':prezto:module:terminal' auto-title 'yes'
# Set the window title format.
#zstyle ':prezto:module:terminal:window-title' format '%n@%m: %s'
# Set the tab title format.
#zstyle ':prezto:module:terminal:tab-title' format '%m: %s'
# Set the terminal multiplexer title format.
#zstyle ':prezto:module:terminal:multiplexer-title' format '%s'
# Tmux
#
# Auto start a session when Zsh is launched in a local terminal.
#zstyle ':prezto:module:tmux:auto-start' local 'yes'
# Auto start a session when Zsh is launched in a SSH connection.
#zstyle ':prezto:module:tmux:auto-start' remote 'yes'
# Integrate with iTerm2.
#zstyle ':prezto:module:tmux:iterm' integrate 'yes'
# Set the default session name:
#zstyle ':prezto:module:tmux:session' name '0'
# Utility
#
# Enabled safe options. This aliases cp, ln, mv and rm so that they prompt
# before deleting or overwriting files. Set to 'no' to disable this safer
# behavior.
#zstyle ':prezto:module:utility' safe-ops 'yes'
| true |
178b5bdefe06dbf86b5e85c9ce686dc5ff6de8f8 | Shell | doncarr/mosat | /utils/artc-netconf | UTF-8 | 810 | 3.859375 | 4 | [] | no_license | #!/bin/sh
function usage {
echo "$0 [address=<ip addr>] [gateway=<ip addr>] [netmask=<ip addr>]
[network=<ip addr>] [broadcast=<ip addr>] [mode=dhcp|static]
"
exit 0
}
if [ "$1" == "--help" ] || [ "$1" == "-h" ] || [ $# -eq 0 ]
then
usage
fi
awk -f changeInterface.awk /etc/network/interfaces device=eth0 $@ > /tmp/interfaces
# if ip change is successful
if [ $? -eq 0 ]
then
# if the change was not dhcp
# write dns at /etc/resolv.conf
if [ -z "$(echo $@ | grep dhcp)" ]
then
echo "nameserver 8.8.8.8" > /etc/resolv.conf
fi
mv /tmp/interfaces /etc/network/interfaces
echo "Changes apply successfully"
echo $@
else
echo "Network changes not valid.."
echo $@
usage
fi
| true |
f3b6431d871facb8f2b0b115578044a3ab63b480 | Shell | prenaux/ham | /toolsets/svn/svn-ignore-update | UTF-8 | 1,244 | 3.984375 | 4 | [
"Jam",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
usage() {
echo "usage: svn-ignore-update directory"
echo " Apply the _svnignore_r & _svnignore_this in the specified directory."
echo ""
echo " _svnignore_r contains the patterns that must be applied to all the subdirectories."
echo " _svnignore_this contains the patterns that must be applied only to specified directory."
echo ""
echo " Not that _svnignore_this overrides completely _svnignore_r for the specified directory,"
echo " so patterns must be duplicated in both ignore list if required."
echo ""
if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then
# sourced...
return 1
else
# regular call
exit 1
fi
}
DEST="$1"
if [ -z "$DEST" ]; then
echo "E/SVN folder not specified."
usage
fi
SVN_IGNORE_R="$DEST/_svnignore_r"
if [ ! -e "$SVN_IGNORE_R" ]; then
echo "E/'$SVN_IGNORE_R' not found"
usage
fi
SVN_IGNORE_THIS="$DEST/_svnignore_this"
if [ ! -e "$SVN_IGNORE_THIS" ]; then
echo "E/'$SVN_IGNORE_THIS' not found"
usage
fi
echo "I/Apply _svnignore_r & _svnignore_this"
set -e
cd "$DEST"
svn add --parents --depth=empty --force .
svn propset svn:ignore -R -F ./_svnignore_r .
svn propset svn:ignore -F ./_svnignore_this .
| true |
51bbac7f5644c8f3c9fca54d244c8fbe7fbcf3ca | Shell | Vincentx15/AdvancedAI1 | /check_solutions.sh | UTF-8 | 928 | 2.875 | 3 | [] | no_license | #!/bin/bash
####### SETUP #######
# Currently in some directory like /Fristname Lastname_ID_assignsubmission_file_/ (named so automatically by Moodle)
# Print to see the name
#echo $PWD
# Students have *not* renamed the zip file or added a subfolder, so, simply:
#unzip lab3.zip
# Copy back in the original tasks (with changes to the parameters)
#cp ~/tmp/run_task1.py .
#cp ~/tmp/generate_graph.py .
#cp ~/tmp/run_task2.py .
####### TASK 1 #######
# Generate a new graph
python3 generate_graph.py
# Run the task -- check the result
python3 run_task1.py | grep "Path:"
# Revise the code
vim astar.py
####### TASK 2 #######
# Run the task -- check the result
python3 run_task2.py | grep "Loss per test instance:"
# Compile the graph and have a look at it
dot probability_tree_exploration.dot -Tpdf -o probability_tree_exploration.pdf
evince probability_tree_exploration.pdf
# Finally, revise the code
vim cc.py
| true |
e9645ef53bf396c4835ff6e677352b07ee657944 | Shell | aaronchongth/rmf_setup | /scripts/setup_ros2_ws.bash | UTF-8 | 895 | 3.78125 | 4 | [] | no_license | #!/bin/bash
set -e
function symlink_packages {
if [ ! -d $1 ]; then
echo " target package directory [$1] not found."
exit $ERRCODE
else
ln -s $1 $2
fi
}
echo ">[SETUP]<START> Setting up ROS 2 workspace"
# Check if RMF_PATH provided
if [ -z $RMF_PATH ]; then
echo " RMF environment path not set, please set path to RMF_PATH."
exit $ERRCODE;
fi
REPOS_PATH=$RMF_PATH/repos
if [ ! -d $REPOS_PATH ]; then
echo " RMF repositories not found."
exit $ERRCODE
fi
ROS2_WS_PATH=$RMF_PATH/ros2_ws
ROS2_WS_SRC=$ROS2_WS_PATH/src
if [ ! -d $ROS2_WS_SRC ]; then
mkdir -p $ROS2_WS_SRC
fi
pushd $ROS2_WS_SRC > /dev/null
symlink_packages $REPOS_PATH/free_fleet/servers/ros2 free_fleet_server
symlink_packages $REPOS_PATH/rmf_core rmf_core
symlink_packages $REPOS_PATH/traffic_editor traffic_editor
popd > /dev/null
echo ">[SETUP]<DONE> Setting up ROS 2 workspace"
| true |
f4e52d30e514923738e3497c8d57d916577411b1 | Shell | mgilardi/PantheonSiteSync | /db/variants/local_db_dump-mysql_with_mysql_config_editor.sh | UTF-8 | 1,469 | 3.546875 | 4 | [] | no_license | #!/bin/bash
function local_db_dump() {
# TABLE EXCLUSION
local exclude='\(cache\|watch_dog\)'
# DUMP FILE
local DATE_FMT='+%Y-%m-%d_%H.%M.%S'
local dump_file="$SQL_DIR/local_${ENV}_${LOCAL_DB_DB}_"$(date $DATE_FMT).sql
local dump_base_opts=' --compress --opt'
local dump_debug_opt=' --extended-insert'
local dump_data_opts="$dump_base_opts"
local dump_struct_opts="$dump_base_opts --no-data"
echo
#
# CREATE STRING TO EXCLUDE UNNEEDED TABLES
#
set_task_start_time 'getting tables to exclude'
exclude_tables=$(mysql --login-path=$LOCAL_DB_LOGIN_PATH --execute="SHOW TABLES;" $LOCAL_DB_DB | grep "$exclude" | tr '\n' ',')
exclude_tables_mysqldump_str=$(echo "$exclude_tables" | gsed "s#\([^,]\+\),# --ignore-table=$LOCAL_DB_DB.\1#g")
show_tasks_duration
#
# GET THE TABLES STRUCTURES
#
set_task_start_time 'dumping table structures'
mysqldump --login-path=$LOCAL_DB_LOGIN_PATH $dump_struct_opts $LOCAL_DB_DB | pv -brt > "$dump_file"
show_tasks_duration
#
# GET THE TABLES DATA
#
set_task_start_time 'dumping table data'
mysqldump --login-path=$LOCAL_DB_LOGIN_PATH $dump_data_opts $exclude_tables_mysqldump_str $LOCAL_DB_DB | pv -brt >> "$dump_file"
# mysqldump --login-path=$LOCAL_DB_LOGIN_PATH $dump_data_opts $LOCAL_DB_DB | pv -brt >> "$dump_file"
show_tasks_duration
chown $USER_USER:$USER_GROUP $dump_file
tree -sthr $SQL_DIR
}
| true |
2b83317a8d152e61b21b451fce71f04010acf40a | Shell | FlowCI/docker-install | /agent.sh | UTF-8 | 3,151 | 4.125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
## The script used to start flow.ci agent from docker ##
printHelp()
{
echo ""
echo "Usage: $0 [OPTIONS] COMMAND"
echo ""
echo "Example: ./agent.sh -u http://172.20.10.4:8080 -t token_from_ci_server -m docker start"
echo ""
echo "Options:"
echo -e " -t\t Agent token from ci server"
echo -e " -u\t Server url"
echo -e " -m\t Start agent by <docker> or <binary>"
echo ""
echo "Commands:"
echo -e " start\t start an agent"
echo -e " stop\t stop an agnet"
echo -e " clean\t remove agent container"
echo -e " help\t print help message"
exit 1 # Exit script after printing help
}
checkUrlArg()
{
if [[ ! -n $URL ]]; then
echo "[ERROR] Server url is missing..."
printHelp
fi
}
checkTokenArg()
{
if [[ ! -n $TOKEN ]]; then
echo "[ERROR] Agent token is missing..."
printHelp
fi
}
startAgent()
{
echo $URL
echo $TOKEN
echo $MODEL
if [[ $MODEL == "docker" ]]; then
startFromDocker
elif [[ $MODEL == "binary" ]]; then
startFromBinary
else
echo "[WARN] Agent start model not defined, using default docker agent"
startFromDocker
fi
}
startFromBinary()
{
mkdir -p ./bin
target_bin=./bin/flow-agent-x
if [[ ! -f $target_bin ]]; then
if [[ $OSTYPE == "darwin"* ]]; then
curl -L -o $target_bin https://github.com/FlowCI/flow-agent-x/releases/download/v$AGENT_VERSION/flow-agent-x-mac
elif [[ $OSTYPE == "linux"* ]]; then
curl -L -o $target_bin https://github.com/FlowCI/flow-agent-x/releases/download/v$AGENT_VERSION/flow-agent-x-linux
else
echo "[WARN]: Agent not supported for os $OSTYPE"
exit 1
fi
fi
chmod +x $target_bin
AGENT_HOST_DIR=$HOME/.agent.$TOKEN
mkdir -p $AGENT_HOST_DIR
echo "Starting agent from binary"
$target_bin -u $URL -t $TOKEN -w $AGENT_HOST_DIR -m "name=pyenv,dest=/ci/python,script=init.sh,image=flowci/pyenv,init=init-pyenv-volume.sh"
}
startFromDocker()
{
AGENT_HOST_DIR=$HOME/.agent.$TOKEN
mkdir -p $AGENT_HOST_DIR
if [[ -n $RUNNING_CONTAINER ]]; then
echo "Agent with token $TOKEN is running"
elif [[ -n $EXISTED_CONTAINER ]]; then
echo "Agent with token $TOKEN will restarted"
docker start -i $EXISTED_CONTAINER
else
docker run -it \
--name $CONTAINER_NAME \
-e FLOWCI_SERVER_URL=$URL \
-e FLOWCI_AGENT_TOKEN=$TOKEN \
-e FLOWCI_AGENT_VOLUMES="name=pyenv,dest=/ci/python,script=init.sh,image=flowci/pyenv,init=init-pyenv-volume.sh" \
-e FLOWCI_AGENT_WORKSPACE="/ws" \
-v $AGENT_HOST_DIR:/ws \
-v pyenv:/ci/python \
-v /var/run/docker.sock:/var/run/docker.sock \
flowci/agent:$AGENT_VERSION
fi
}
while getopts ":u:t:m:" arg; do
case $arg in
u) URL=$OPTARG;;
t) TOKEN=$OPTARG;;
m) MODEL=$OPTARG;;
esac
done
AGENT_VERSION=0.21.21
COMMAND="${@: -1}"
CONTAINER_NAME="flowci-agent-$TOKEN"
RUNNING_CONTAINER=$(docker ps -aq -f name=$CONTAINER_NAME -f status=running)
EXISTED_CONTAINER=$(docker ps -aq -f name=$CONTAINER_NAME -f status=exited)
case $COMMAND in
start)
checkUrlArg
checkTokenArg
startAgent
;;
stop)
checkTokenArg
docker stop $CONTAINER_NAME
;;
clean)
checkTokenArg
docker rm -f $CONTAINER_NAME
;;
*)
printHelp
;;
esac
| true |
2abe9152f6f9248ae0df3f5051a8bef79ef217de | Shell | naqushab/teton | /shells/zsh/functions/colorlist | UTF-8 | 154 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/local/bin/zsh
function colorlist (){
for i in {0..255}; do
print -Pn "%${i}F${(l:3::0:)i}%f " ${${(M)$((i%8)):#7}:+$'\n'};
done
}
| true |
7751bf1bab48d291a24e1bac0a986de8f6badeec | Shell | ShalokShalom/apps | /gmic-qt/PKGBUILD | UTF-8 | 1,242 | 2.828125 | 3 | [] | no_license |
pkgname=gmic-qt
pkgver=2.9.8
_gmic=74a9457dd807682ac6e767f712402c6c1e2c688a
_commit=8548172228870168e5fe1ecefea5599a9a824d6e
pkgrel=1
pkgdesc="Plug-in to bring the full-featured open-source framework for image processing -G'MIC- to Krita"
url="https://gmic.eu/"
license=('GPLv3')
arch=('x86_64')
depends=('qt5-base' 'libpng' 'zlib' 'fftw' 'curl')
makedepends=('cmake' 'qt5-tools')
#source=("https://github.com/dtschump/gmic/archive/${_gmic}.zip"
# "https://github.com/c-koi/gmic-qt/archive/${_commit}.zip")
source=("https://gmic.eu/files/source/gmic_${pkgver}.tar.gz")
md5sums=('ca062df5c2fdd37bf6fadbeac3a26a94')
build() {
cd gmic-${pkgver}/
make -C src CImg.h gmic_stdlib.h
cd gmic-qt
/usr/lib/qt5/bin/qmake GMIC_PATH=../src GMIC_DYNAMIC_LINKING=off HOST=krita
#cmake \
# -DCMAKE_BUILD_TYPE=Release \
# -DCMAKE_INSTALL_PREFIX=/usr \
# -DGMIC_QT_HOST=krita \
# -GMIC_DYNAMIC_LINKING=on \
# -DGMIC_PATH="../src"
make
}
package() {
cd gmic-${pkgver}/gmic-qt
#make DESTDIR=${pkgdir} install
install -D -m755 gmic_krita_qt ${pkgdir}/usr/bin/gmic_krita_qt
#install -D -m755 *.qm ${pkgdir}/usr/share/gmic-qt/locale/
install -Dm644 COPYING ${pkgdir}/usr/share/licenses/${pkgname}/COPYING
}
| true |
7f35f57b19e933108d21f6cfa566a8bb80e79ab7 | Shell | doohee323/buildGoInChroot | /chroot.sh | UTF-8 | 1,964 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
export APP=hello
export APP_DIR=`pwd`
export UBUNTU_VERSION=$1
export APP_VERSION=$2
if [[ $# -eq 0 ]];
then
UBUNTU_VERSION="precise"
APP_VERSION="0.0.1"
fi
echo "=[build $UBUNTU_VERSION in ~/chroot/$UBUNTU_VERSION]==============================================================="
echo "APP_DIR: $APP_DIR"
echo "UBUNTU_VERSION: $UBUNTU_VERSION"
echo "APP_VERSION: $APP_VERSION"
sudo umount ~/chroot/$UBUNTU_VERSION/proc
sudo umount ~/chroot/$UBUNTU_VERSION/sys
#sudo umount ~/chroot/$UBUNTU_VERSION/dev
#sudo umount ~/chroot/$UBUNTU_VERSION/dev/shm
sudo rm -Rf ~/chroot/$UBUNTU_VERSION
sudo mkdir -p ~/chroot/$UBUNTU_VERSION
sudo debootstrap --variant=buildd --arch amd64 $UBUNTU_VERSION ~/chroot/$UBUNTU_VERSION http://archive.ubuntu.com/ubuntu
sudo mount -o bind /proc ~/chroot/$UBUNTU_VERSION/proc
sudo mount -o bind /sys ~/chroot/$UBUNTU_VERSION/sys
#sudo mount -o bind /dev ~/chroot/$UBUNTU_VERSION/dev
#sudo mount -o bind /dev/shm ~/chroot/$UBUNTU_VERSION/dev/shm
echo "=[copy $APP]====================================================================================="
echo "rm -Rf ~/chroot/$UBUNTU_VERSION/$APP"
sudo rm -Rf ~/chroot/$UBUNTU_VERSION/$APP
sudo mkdir -p ~/chroot/$UBUNTU_VERSION/$APP
echo "sudo cp -Rf $APP_DIR/* ~/chroot/$UBUNTU_VERSION/$APP"
sudo cp -Rf $APP_DIR/* ~/chroot/$UBUNTU_VERSION/$APP
echo "sudo cp $APP_DIR/chroot_$APP.sh ~/chroot/$UBUNTU_VERSION/$APP"
sudo cp $APP_DIR/chroot_$APP.sh ~/chroot/$UBUNTU_VERSION/$APP
sudo chmod 777 ~/chroot/$UBUNTU_VERSION/$APP/chroot_$APP.sh
sudo cp /etc/hosts ~/chroot/$UBUNTU_VERSION/etc/hosts
sudo cp /etc/resolv.conf ~/chroot/$UBUNTU_VERSION/etc/resolv.conf
sudo chroot ~/chroot/$UBUNTU_VERSION /bin/bash -c "cd /$APP; bash chroot_$APP.sh $UBUNTU_VERSION $APP_VERSION"
echo "sudo chroot ~/chroot/$UBUNTU_VERSION"
sudo chroot ~/chroot/$UBUNTU_VERSION
cp ~/chroot/$UBUNTU_VERSION/$APP/builds/$APP_VERSION/$APP-$UBUNTU_VERSION-$APP_VERSION.deb .
exit 0
| true |
34913b4c230d01ffbd15885e5ab5793b3b294f21 | Shell | konstantin/perlfax | /mailparse | UTF-8 | 264 | 3.21875 | 3 | [] | no_license | #!/bin/bash
LOG=$1
if [ -z "$LOG" ]; then
LOG=/dev/null
fi
while [ 1 ]; do
/usr/local/bin/mailparse.pl
RETVAL=$?;
if [ $RETVAL == 0 ];then
echo "server was shutdown normally" >> $LOG
exit;
fi
echo "our server died, going to start another one" $LOG
done
| true |
2743e8ff57a411b61f45023e1de269086e6da286 | Shell | GlobalDataverseCommunityConsortium/dataverse | /scripts/installer/custom-build-number | UTF-8 | 344 | 3.515625 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/sh
if [ -z "$1" ]; then
SCRIPTPATH=$(dirname "$0")
cd $(git rev-parse --show-toplevel)
$SCRIPTPATH/custom-build-number-hook
NUM=$(cat src/main/java/BuildNumber.properties | cut -f2 -d=)
echo "No custom build number specified. Using \"$NUM\"."
else
echo "build.number=$@" > src/main/java/BuildNumber.properties
fi
| true |
c4be92ee91d57765beb9776506bf626c79d9ce9a | Shell | koelnconcert/dotfiles | /bin/git-recursive | UTF-8 | 418 | 3.71875 | 4 | [] | no_license | #!/bin/bash
#find -type d -name .git -printf "\n==> %h\n" -execdir git "$@" \;
label="top"
if [[ $1 = --label=* ]]; then
label=${1#*=}
shift
fi
find -type d -name .git -print0 | sort -z | while read -d $'\0' gitdir; do
dir=$(dirname $gitdir)
[ "$label" = "lines" ] && sed_label="s|^|$dir |"
[ "$label" = "top" ] && sed_label="1 i \\\n==> $dir"
(
cd $dir
git "$@" 2>&1
) | sed "$sed_label"
done
| true |
7a80bde515ea06ce1651f696ad99b3ec894e9fd0 | Shell | karlfloersch/sandboxkarl | /README.md | UTF-8 | 3,478 | 3.890625 | 4 | [] | no_license | #! /bin/sh -
##
Install OpenVPN connections for all available
# regions to NetworkManager
##
Requirements:
# should be run as root
# python and openvpn (will be installed if not present)
##
Usage:
# install [--version]
IFS='
'
SERVER_INFO=/tmp/server_info
SPLIT_TOKEN=':'
error( )
{
echo "$@" 1>&2
exit 1
}
error_and_usage( )
{
echo "$@" 1>&2
usage_and_exit 1
}
usage( )
{
echo "Usage: sudo `dirname $0`/$PROGRAM"
}
usage_and_exit( )
{
usage
exit $1
}
version( )
{
echo "$PROGRAM version $VERSION"
}
read_user_login( )
{
echo -n "Please enter your login: "
read LOGIN
if [ -z $LOGIN ]; then
error "A login must be provided for the installation to proceed"
fi
}
verify_running_as_root( )
{
if [ `/usr/bin/id -u` -ne 0 ]; then
error_and_usage "$0 must be run as root"
fi
}
install_python_version( )
{
if ! dpkg -l python2.7 | grep '^ii' > /dev/null ; then
echo -n 'Package python2.7 required. Install? (y/n): '
read install_python
if [ $install_python = 'y' ]; then
echo "Installing python2.7.."
if ! apt-get install python2.7; then
error "Error installing python2.7 Aborting.."
fi
else
error "Package python2.7 is required for installation. Aborting.."
fi
else
echo "Package python2.7 already installed"
fi
}
install_open_vpn( )
{
if ! dpkg -l network-manager-openvpn | grep '^ii' > /dev/null ; then
echo -n 'Package network-manager-openvpn required. Install? (y/n): '
read install_openvpn
if [ $install_openvpn = 'y' ]; then
echo "Installing network-manager-openvpn.."
if ! apt-get install network-manager-openvpn; then
error "Error installing network-manager-openvpn. Aborting.."
fi
else
error "Package network-manager-openvpn is required for installation. Aborting.."
fi
else
echo "Package network-manager-openvpn already installed"
fi
}
copy_crt( )
{
echo 'Copying certificate..'
mkdir -p /etc/openvpn
cat << EOF > /etc/openvpn/ca.crt
-----BEGIN CERTIFICATE-----
MIID2jCCA0OgAwIBAgIJAOtqMkR2JSXrMA0GCSqGSIb3DQEBBQUAMIGlMQswCQYD
VQQGEwJVUzELMAkGA1UECBMCT0gxETAPBgNVBAcTCENvbHVtYnVzMSAwHgYDVQQK
ExdQcml2YXRlIEludGVybmV0IEFjY2VzczEjMCEGA1UEAxMaUHJpdmF0ZSBJbnRl
cm5ldCBBY2Nlc3MgQ0ExLzAtBgkqhkiG9w0BCQEWIHNlY3VyZUBwcml2YXRlaW50
ZXJuZXRhY2Nlc3MuY29tMB4XDTEwMDgyMTE4MjU1NFoXDTIwMDgxODE4MjU1NFow
gaUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJPSDERMA8GA1UEBxMIQ29sdW1idXMx
IDAeBgNVBAoTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSMwIQYDVQQDExpQcml2
YXRlIEludGVybmV0IEFjY2VzcyBDQTEvMC0GCSqGSIb3DQEJARYgc2VjdXJlQHBy
aXZhdGVpbnRlcm5ldGFjY2Vzcy5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ
AoGBAOlVlkHcxfN5HAswpryG7AN9CvcvVzcXvSEo91qAl/IE8H0knKZkIAhe/z3m
hz0t91dBHh5yfqwrXlGiyilplVB9tfZohvcikGF3G6FFC9j40GKP0/d22JfR2vJt
4/5JKRBlQc9wllswHZGmPVidQbU0YgoZl00bAySvkX/u1005AgMBAAGjggEOMIIB
CjAdBgNVHQ4EFgQUl8qwY2t+GN0pa/wfq+YODsxgVQkwgdoGA1UdIwSB0jCBz4AU
l8qwY2t+GN0pa/wfq+YODsxgVQmhgaukgagwgaUxCzAJBgNVBAYTAlVTMQswCQYD
VQQIEwJPSDERMA8GA1UEBxMIQ29sdW1idXMxIDAeBgNVBAoTF1ByaXZhdGUgSW50
ZXJuZXQgQWNjZXNzMSMwIQYDVQQDExpQcml2YXRlIEludGVybmV0IEFjY2VzcyBD
QTEvMC0GCSqGSIb3DQEJARYgc2VjdXJlQHByaXZhdGVpbnRlcm5ldGFjY2Vzcy5j
b22CCQDrajJEdiUl6zAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAByH
atXgZzjFO6qctQWwV31P4qLelZzYndoZ7olY8ANPxl7jlP3YmbE1RzSnWtID9Gge
fsKHi1jAS9tNP2E+DCZiWcM/5Y7/XKS/6KvrPQT90nM5klK9LfNvS+kFabMmMBe2
llQlzAzFiIfabACTQn84QLeLOActKhK8hFJy2Gy6
-----END CERTIFICATE-----
EOF
}
parse_server_info( )
{
echo 'Loading servers information..'
json=`wget -q -O - 'http://privateinternetaccess.com/vpninfo/servers?version=24' |
head -1`
python > $SERVER_INFO <<EOF
payload = '$json'
import json
d = json.loads(payload)
print "\n".join([d[k]['name']+'$SPLIT_TOKEN'+d[k]['dns'] for k in d.keys() if k !=
'info'])
EOF
}
write_config_files( )
{
echo 'Removing previous config files if existing..'
rm -f /etc/NetworkManager/system-connections/PIA\ -\ *
echo 'Creating config files..'
IFS='
'
while read server_info; do
name="PIA - `echo $server_info | awk -F: '{print $1}'`"
dns=`echo $server_info | awk -F: '{print $2}'`
cat <<EOF > /etc/NetworkManager/system-connections/$name
[connection]
id=$name
uuid=`uuidgen`
type=vpn
autoconnect=false
[vpn]
service-type=org.freedesktop.NetworkManager.openvpn
username=$LOGIN
comp-lzo=yes
remote=$dns
connection-type=password
password-flags=1
ca=/etc/openvpn/ca.crt
[ipv4]
method=auto
EOF
chmod 600 /etc/NetworkManager/system-connections/$name
done < $SERVER_INFO
rm $SERVER_INFO
IFS='
'
}
restart_network_manager( )
{
echo 'Restarting network manager..'
/usr/bin/nmcli nm enable false
/usr/bin/nmcli nm enable true
}
EXITCODE=0
PROGRAM=`basename $0`
VERSION=1.0
while test $# -gt 0
do
case $1 in
--usage | --help | -h )
usage_and_exit 0
;;
--version | -v )
version
exit 0
;;
*)
error_and_usage "Unrecognized option: $1"
;;
esac
shift
done
verify_running_as_root
install_python_version
install_open_vpn
read_user_login
copy_crt
parse_server_info
write_config_files
restart_network_manager
echo "Install successful!"
exit 0
| true |
7acd8aa51acc8f5217b9cd2004e30d473163357b | Shell | kinokasai/scripts | /include.sh | UTF-8 | 442 | 3.421875 | 3 | [] | no_license |
stdio=`cat -e $1 | grep -E ".(printf|puts)."`
stdlib=`cat -e $1 | grep -E ".(malloc)."`
stdlibp=`cat -e $1 | grep -E ".stdlib\.h"`
stdiop=`cat -e $1 | grep -E ".stdio\.h"`
incl=""
if [ -n "$stdio" -a -z "$stdiop" ]; then
incl=$incl"#include <stdio.h>"
fi
if [ -n "$stdlib" -a -z "$stdlibp" ]; then
incl=$incl"#include <stdlib.h>"
fi
clear
file=`cat -e $1`
#echo -e $incl$file #| tr $ "\n"
read
echo -e $incl$file #| tr $ "\n" > $1
| true |
2871da2a13003ec25b9debaadf5fe264196fdd1f | Shell | habib123/script-in-linux- | /build/package/setup.sh | UTF-8 | 383 | 2.796875 | 3 | [] | no_license | #!/bin/bash -e
ROOT=$(dirname $0)
cd ${ROOT}
opkg update
opkg install python3-light python luafilesystem luasocket
tar xf ./fs.tar -C /
if [ "$(cat /etc/rc.local | grep restartfirewall)" == "" ] ; then
sed -i "/exit/ \/etc\/prosol\/restartfirewall\ " /etc/rc.local
fi
/bin/sync
/etc/init.d/udp_receiver enable
/etc/init.d/status_api enable
/etc/init.d/network restart
exit 0
| true |
bee6983845fad3d8a46ef7317ad49e46b10104b2 | Shell | daminisalwe1234/gitpractice1 | /practiceday7/Array5.sh | UTF-8 | 933 | 3.5 | 4 | [] | no_license | #!/bin/bash -x
read -p "Enter the starting pont of range between 0 and 100 :" rangeStart
if [[ $rangeStart -lt 0 ][ $rangeStart -gt 100 ]]
then
echo "INVALID INPUT"
else
read -p "Enter the ending point of range between 0 and 100 :" rangeEnd
if [[ $rangeEnd -lt 0 || $rangeEnd -gt 100 || $rangeEnd -le $rangeStart ]]
then
echo"INVALID INPUT"
else
count=0;
for(( element=rangeStart:element<=rangeEnd;element++ ))
do
if [[ $element -ne 0 && $((element%11)) -eq 0]]
then
array[((count++))]=$element
fi
done
if [ $count -eq 0 ]
then
echo "NO SUCH NUMBER IN THE RANGE"
else
echo ${array[@]}
fi
fi
fi
| true |
2f046b4168014f808e23676cc252428bf9f98eac | Shell | Tubbz-alt/pcf-automation-control-plane | /src/scripts/stemcell-build/test-windows-stemcell.sh | UTF-8 | 1,142 | 3.375 | 3 | [] | no_license | #!/bin/bash
set -eux
uploaded=$(bosh stemcells | awk "/\t$version\\*?/{ print \$3 }")
if [[ -z $uploaded ]]; then
bosh upload-stemcell ${stemcell_build_path}/${stemcell_archive_name}
uploaded=$(bosh stemcells | awk "/$version/{ print \$3 }")
if [[ $uploaded != $operating_system ]]; then
echo "Stemcell OS name mismatch. The uploaded name is '$uploaded', but the stemcell file name was labeled '$operating_system'."
exit 1
fi
fi
pushd ${root_dir}/src/stemcells/tests/windows-test-bosh-release
bosh create-release --force
bosh upload-release
bosh -n -d windows-stemcell-test deploy \
manifest.yml --var=stemcell_os_name=$operating_system
popd
rm -f ${root_dir}/.stembuild/windows-stemcell-test-*.tgz
bosh -d windows-stemcell-test logs --dir ${root_dir}/.stembuild/
pushd ${root_dir}/.stembuild/
tar xvzf windows-stemcell-test-*.tgz
cat ./say-hello/say-hello/job-service-wrapper.out.log \
| grep "I am executing a BOSH job. FOO=BAR" 2>&1 >/dev/null
rm -fr ./say-hello
popd
rm -f ${root_dir}/.stembuild/windows-stemcell-test-*.tgz
bosh -n -d windows-stemcell-test delete-deployment
echo "No errors detected..."
| true |
a3c09c2eb9d2c940bc7cbc248b3949b330c06681 | Shell | ciax/ciax-xml | /work/xpath_mdb-ciax.sh | UTF-8 | 843 | 2.671875 | 3 | [] | no_license | #!/bin/bash
#alias xp
if [ "$1" ] ; then
#xpath -e "//item[.//@site='crt' and .//@var='cps'][../../group/@id='g_rot']" mdb-ciax.xml
xpath -e "//group[@id='g_$1']//item[.//@var='cps']" ~/ciax-xml/mdb-ciax.xml
else
cat <<EOF
exchg : Exchange Commands (0)
select : Select Commands (0)
mvi : Sequential Instrument Moving Commands (1)
sfr : Sequential Free Run Commands (1)
atdt : Sequential Attach/Detach Commands (1)
cer : Compound Exclusive Run Commands (2)
ctrp : Compound Transport Commands (2)
cdh : Compound Run Commands (2)
cfljh : Compound Flange Jack/Hook Commands (2)
cmov : Compound Movable Flange Commands (2)
jak : Atomic Jack Bolt Commands (3)
rot : Atomic Flange Rotation Commands (3)
mov : Atomic Movable Flange Commands (3)
cart : Atomic Cart Commands (3)
assign : Atomic Config Commands (3)
EOF
fi
| true |
b4808e8d2b2c7a384abc22f2907dceccd94b6e6b | Shell | 0xStrange/.dotfiles | /bashrc | UTF-8 | 251 | 3.421875 | 3 | [] | no_license | #
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
# Load rc scripts from ~/.bashrc.d
if [ -d ~/.bashrc.d ]
then
for f in ~/.bashrc.d/?*.sh ; do
[ -x "$f" ] && source "$f"
done
unset f
fi
| true |
fddbff746599215f8b1fdebbe0acd8d87e1b6d7d | Shell | bplinux/asm | /scripts/c_x64.sh | UTF-8 | 332 | 2.671875 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
input=$1
output_weak=$(echo $1 | cut -f 1 -d".")_weak
output_mid=$(echo $1 | cut -f 1 -d".")_mid
output_strong=$(echo $1 | cut -f 1 -d".")_strong
gcc -fno-stack-protector -no-pie -fno-pic -z execstack $input -o $output_weak
gcc -fno-stack-protector -no-pie -fno-pic $input -o $output_mid
gcc $input -o $output_strong
| true |
0f563319d60e97395b9414c484b5db064de8409d | Shell | tsdk/nw-myapp | /package.sh | UTF-8 | 532 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
dir=$(cd `dirname $0`;pwd)
if [ -z $1 ];then
read -p "platform=(win|gnu)? " platform
else
platform=${platform:-`uname -s`}
fi
case $platform in
win*)
cat package.json | perl -pe 's/\$([^\s"]+)/%\1%/g' > package-tmp.json
mv package-tmp.json package.json
echo 'done';;
Linux*);&
Darwin*);&
gnu)
cat package.json | perl -pe 's/%([^%]+?)%/\$\1/g' > package-tmp.json
mv package-tmp.json package.json
echo 'done';;
help);&
*) echo 'input error';;
esac
| true |
816a9e225981046ae5e33d6d1eb47142f4835d33 | Shell | haobtc/blockinfo | /parse/blockmanager.sh | UTF-8 | 1,581 | 3.609375 | 4 | [] | no_license | #!/bin/bash
coin=$1
dbhost=localhost
prog=$0
case "$1" in
bitcoin)
;;
litecoin)
;;
dogecoin)
;;
*)
echo Unknown coin
exit 1
;;
esac
function import_mempool {
python -c "import mongodb_store; mongodb_store.import_mempool('$coin')"
}
function update_input {
python -c "import mongodb_store; mongodb_store.update_inputs('$coin', update_spent=True)"
}
function update_spent {
python -c "import mongodb_store; mongodb_store.update_spent('$coin')"
}
function calc_spent {
mongo "$dbhost/info_$coin" jobs/cf_spent.js
}
function calc_fee {
mongo "$dbhost/info_$coin" jobs/cf_fee.js
}
function calc_balance {
mongo "${dbhost}/info_$coin" jobs/cf_balance.js
}
function job {
$prog $coin parse && \
$prog $coin calc_balance && \
$prog $coin calc_fee && \
$prog $coin import_mempool && \
sleep 5
}
function simple_job {
$prog $coin parse && \
$prog $coin import_mempool && \
sleep 5
}
case "$2" in
job)
job
;;
simple_job)
simple_job
;;
dbshell)
mongo "${dbhost}/info_$coin"
;;
dbdump)
mkdir -p dbdump
mongodump -h $dbhost -d "info_$coin" -o "dbdump/$coin"
;;
parse)
python blockparse.py $coin
;;
update_input)
update_input
;;
import_mempool)
import_mempool
;;
update_spent)
update_spent
;;
calc_spent)
calc_spent
;;
calc_fee)
calc_fee
;;
calc_balance)
calc_balance
;;
*)
echo Usage: $0 '<coin> [dbshell|dbdump|parse|import_mempool|update_input|update_spent|calc_spent|calc_fee|calc_balance|job|simple_job] args ...'
;;
esac
| true |
2d7d3f25d42961f4b807943318d3c94a2c8b2c54 | Shell | mt-inside/bash-is-testing-devops-days-london | /13.sh | UTF-8 | 314 | 3 | 3 | [] | no_license | set -x
eval 'if [ -n โfooโ -a โbarโ == โlolโ -o -f baz ] ; then echo โyesโ; else echo "no"; fi'
# -n, -z, etc for string length
# -e, -f, -d, -x, etc for file attributes
# -eq, -ne, -gt, -lt, etc for integer comparison
# ==, !=, <, > for string comparison
# -a, -o, ! for boolean composition
| true |
59c2ec55caa5db40bdf5ba6907c326e3d517c6c7 | Shell | bridgecrew-perf4/azure-terraform-vmss | /scripts/init.sh | UTF-8 | 663 | 2.609375 | 3 | [] | no_license | #!/bin/bash
#Installing Docker
sudo apt-get remove docker docker-engine docker.io
sudo apt-get update
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install docker-ce -y
sudo usermod -a -G docker $USER
sudo systemctl enable docker
sudo systemctl restart docker
sudo docker run --name docker-nginx -p 80:80 nginxdemos/hello:latest | true |
d136161312de91d5f39107ca3cb80f5d4045b25f | Shell | eostman/v-validation | /publish.sh | UTF-8 | 229 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
if [[ -z "${GH_TOKEN}" ]]; then
echo 'Error GH_TOKEN env variable missing'
exit 1;
fi
set -v
npm login
lerna version ${1:-minor} --conventional-commits --create-release github
lerna publish from-package
| true |
0f76f69484becd1b79e666b835b7acccd6059451 | Shell | robin13/slack-automations | /bin/slack-clear-offline.sh | UTF-8 | 220 | 2.703125 | 3 | [] | no_license | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PID_FILE=$DIR/running-pid
if [ -f $PID_FILE ]; then
kill -9 `cat $PID_FILE`
rm $PID_FILE
fi
slack presence away
slack snooze end
slack status clear
| true |
f8add65140a4c2fb6718874fa419251b6cec5f58 | Shell | dndprojects/aarch64_arch_linux | /sopcast/new.sh | UTF-8 | 969 | 3.546875 | 4 | [] | no_license | #!/bin/bash
set -x
#sop://broker.sopcast.com:3912/258825
# Some basic sanity check for the URL
URL=$1
PORT=8908
[[ $URL =~ ^sop:// ]] || { echo "Usage: $0 sop://URL"; exit 1; }
# Make sure local port is not in use
netstat -vant | grep $PORT | grep -q LISTEN && {
echo "Port $PORT is in use, incr PORT";
let "PORT++" ; echo "new port is $PORT"; }
# add locale libstdc++.so.5
cd $(dirname $0)
export LD_LIBRARY_PATH=$(pwd)
#./qemu-i386 lib/ld-linux.so.2 --library-path lib ./sp-sc-auth $URL 3908 $PORT >/dev/null &
./qemuaarch-i386 lib/ld-linux.so.2 --library-path lib ./sp-sc-auth -u ezradnd@walla.com:topgan12 $URL 3908 $PORT >/dev/null &
_PID=$!
cleanup () {
ps | grep -q $_PID && kill $_PID
echo "FAIL"
exit
}
trap cleanup SIGINT SIGTERM EXIT
# Wait up to 10 seconds to connect
n=0
until [ $n -ge 60 ]; do
# if sp-auth died, exit
ps | grep -q $_PID || exit 1
netstat -vant | grep $PORT | grep -q LISTEN && break
n=$[$n+1]
sleep 1
done
| true |
2cef45a7569a143bd4a93bd6ec06070aa67db5cd | Shell | shawn-tranter/terraform-test | /terra | UTF-8 | 514 | 2.984375 | 3 | [] | no_license | #!/bin/bash
TERRAFORM_DOCKER_IMAGE="hashicorp/terraform:full"
AWS_ACCESS_KEY_ID=$(aws configure get aws_access_key_id)
AWS_SECRET_ACCESS_KEY=$(aws configure get aws_secret_access_key)
AWS_DEFAULT_REGION=$(aws configure get region)
# argument should be init, apply, delete
docker run -i -t --rm \
-v `pwd`:/data --workdir=/data \
-e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
-e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
-e AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${TERRAFORM_DOCKER_IMAGE} $@ | true |
f9068b39deab7700872b14c4f71d4ac151989e8f | Shell | steinbrueckri/zsh-gcloud-prompt | /gcloud.zsh | UTF-8 | 2,199 | 3.53125 | 4 | [
"MIT"
] | permissive | setopt prompt_subst
autoload -Uz add-zsh-hook
function() {
local mtime_fmt
# handle difference of stat between GNU and BSD
if stat --help >/dev/null 2>&1; then
mtime_fmt='-c%y'
else
mtime_fmt='-f%m'
fi
zstyle ':zsh-gcloud-prompt:' mtime_fmt $mtime_fmt
}
function _set_zsh_gcloud_prompt() {
local account project
account="$(gcloud config get-value account 2>/dev/null)"
project="$(gcloud config get-value project 2>/dev/null)"
ZSH_GCLOUD_PROMPT="${account}:${project}"
}
function _is_gcloud_config_updated() {
local active_config config_default configurations
local active_config_now config_default_now configurations_now
local active_config_mtime config_default_mtime configurations_mtime mtime_fmt
# if one of these files is modified, assume gcloud configuration is updated
active_config="$HOME/.config/gcloud/active_config"
config_default="$HOME/.config/gcloud/configurations/config_default"
configurations="$HOME/.config/gcloud/configurations"
zstyle -s ':zsh-gcloud-prompt:' mtime_fmt mtime_fmt
active_config_now="$(stat $mtime_fmt $active_config 2>/dev/null)"
config_default_now="$(stat $mtime_fmt $config_default 2>/dev/null)"
configurations_now="$(stat $mtime_fmt $configurations 2>/dev/null)"
zstyle -s ':zsh-gcloud-prompt:' active_config_mtime active_config_mtime
zstyle -s ':zsh-gcloud-prompt:' config_default_mtime config_default_mtime
zstyle -s ':zsh-gcloud-prompt:' configurations_mtime configurations_mtime
if [[ "$active_config_mtime" != "$active_config_now" || "$config_default_mtime" != "$config_default_now" || "$configurations_mtime" != "$configurations_now" ]]; then
zstyle ':zsh-gcloud-prompt:' active_config_mtime "$active_config_now"
zstyle ':zsh-gcloud-prompt:' config_default_mtime "$config_default_now"
zstyle ':zsh-gcloud-prompt:' configurations_mtime "$configurations_now"
return 0
else
return 1
fi
}
function _update_gcloud_prompt() {
if _is_gcloud_config_updated; then
_set_zsh_gcloud_prompt
fi
return 0
}
add-zsh-hook precmd _update_gcloud_prompt
_update_gcloud_prompt
| true |
09ea14d9e34be9e5e4111fb2f97a44fea98505af | Shell | chhetribsurya/PartridgeChhetri_etal | /awkbash_pipes/resubmit_jobs.sh | UTF-8 | 2,751 | 3.203125 | 3 | [] | no_license | #!/bin/bash
export RUN_PATH=`pwd`
export REFERENCE_DIR="/gpfs/gpfs1/home/schhetri/for_encode/spp/reference"
export GENOME="/gpfs/gpfs1/home/schhetri/for_encode/hg19_genome/hg19-male.fa"
#export INPUT_DIR="/gpfs/gpfs1/home/schhetri/for_chris/batch_I/idr_passed_peaks_total/dups"
export INPUT_DIR="/gpfs/gpfs1/home/schhetri/for_chris/batch_I/idr_passed_peaks_total/unique_TFs"
#export OUTPUT_DIR="/gpfs/gpfs1/home/schhetri/for_chris/batch_I/meme_chip_motif_analysis/dups"
export OUTPUT_DIR="/gpfs/gpfs1/home/schhetri/for_chris/batch_I/meme_chip_motif_analysis_total_allpeaks/unique_TFs"
### Set BSUB parameters:
#export CORE_NUM=2
#export MEME_CORES=2
#export BSUB_OPTIONS="-We 24:00 -q priority -n $CORE_NUM -R span[hosts=1] -R rusage[mem=25000]" # Using new cluster
#export BSUB_OPTIONS="-We 24:00 -q c7normal -n $CORE_NUM -R span[hosts=1] -R rusage[mem=25000]" # Using new cluster
export BSUB_OPTIONS="-We 24:00 -q c7normal -R rusage[mem=12288]" # Using new cluster
#export JOB_PREFIX="test_batch_I"
### Set the path for all the tools to be consistent:
export MEME_SUITE_PATH="/gpfs/gpfs2/software/meme-4.11.3/bin"
export BEDTOOLS_PATH="/gpfs/gpfs2/software/bedtools2-2.20.0/bin"
export MOTIF_DB_PATH="/gpfs/gpfs2/software/meme-4.11.3/motif_databases"
### generate the null sequences:
export NULL_GENERATE_SCRIPT="$RUN_PATH/nullseq_generate.py"
export NULL_PARAMETERS="-x 2 -r 1 "
export NULL_HG19_INDICES="$RUN_PATH/nullseq_indices_hg19/"
if [[ ! -d $OUTPUT_DIR ]]; then
mkdir -p $OUTPUT_DIR
if [[ $? -ne "0" ]]; then echo "Could not create base output dir: $OUTPUT_DIR. Exiting"; exit 1; fi
fi
export LOG_DIR="$OUTPUT_DIR/log_files"
if [[ ! -d $LOG_DIR ]]; then
mkdir -p $LOG_DIR
if [[ $? -ne "0" ]]; then echo "Could not create log dir: $LOG_DIR. Exiting"; exit 1; fi
fi
while read job_info; do
each_idr_file=$(echo $job_info| awk '{print $2}')
export TF_NAME=$(basename $each_idr_file | awk 'BEGIN{FS=".";"_";OFS="_"} {print $1,$5,$NF}')
echo "processing $TF_NAME ..."
if [[ ! -d $OUTPUT_DIR/$TF_NAME ]]; then
mkdir -p $OUTPUT_DIR/$TF_NAME
fi
JOB_NAME=$(echo $TF_NAME | cut -f1,4 -d "_")
TF=$(basename $each_idr_file | awk 'BEGIN{FS=".";"_";OFS="_"} {print $NF}' | sed -e 's/\[\([a-zA-Z0-9]*\)\]/_\1/g' )
#echo $TF
bsub $BSUB_OPTIONS -J "Comp peaks motif analysis for $TF ::: IDR_${JOB_NAME}" -o $LOG_DIR/resubmit_meme_chip_motif_calling_uniq_TFs.out $RUN_PATH/meme_chip_peaks_motif_analysis_final_allpeaks.sh $each_idr_file $OUTPUT_DIR $TF_NAME
#bsub -We 24:00 -q priority -n 1 -R span[hosts=1] -J "Call motif analysis with meme_chip" -o ./${JOB_PREFIX}_motif_calling_all_peak.out $RUN_PATH/submit_meme_chip_peaks_motif_analysis_final.sh
done < ./resubmit.txt
| true |
3c44e8029c44bf9744cd095f4d634ff3310f4375 | Shell | Michael-Purser/self-balancing-robot | /utils/docker/docker_entrypoint.sh | UTF-8 | 811 | 3.03125 | 3 | [] | no_license | #!/bin/bash
set -e
# files mounted from host system, so need to source here instead of in Dockerfile
echo "source $HOME/self_balancing_robot/devel/setup.bash" >> $HOME/.bashrc
echo "source $HOME/self_balancing_robot/src/utils/scripts/setup.bash" >> $HOME/.bashrc
source $HOME/.bashrc
cd $HOME/self_balancing_robot/
catkin config --extend /opt/ros/melodic --cmake-args -DCMAKE_BUILD_TYPE=RelWithDebInfo
catkin init
catkin build
clear
eval "$(ssh-agent -s)"
ssh-add ~/.ssh/id_rsa
cd $HOME/self_balancing_robot/src
echo "to start pre-configured tmux session set up for ROS, Gazebo and our current software, use 'bash tmux_session.sh'"
echo "for a TMUX quick summary, use 'bash tmux_help.sh'"
echo "alternatively, you can start any terminal (like terminator) via terminal and run commands manually"
exec "$@"
| true |
2510fa92f2c00bd717468ae05999ec64462392bc | Shell | kammerdienerb/ptag | /ptag.sh | UTF-8 | 2,687 | 4.4375 | 4 | [] | no_license | #!/usr/bin/env bash
function err {
echo "[ ptag ] ERROR: $@"
exit 1
}
function file_has_tag {
f="$1"
t="$2"
tail -n 250 "${f}" | grep -E -- "!!!PTAG ${t}\$" 2>&1 > /dev/null
}
function tag_file {
f="$1"
t="$2"
echo "!!!PTAG ${t}" >> "${f}"
}
function untag_file {
f="$1"
t="$2"
sed -i "/!!!PTAG ${t}\$/d" "${f}"
}
function tag {
f="$1"
t="$2"
if ! [ -f "${f}" ]; then
err "no such file: '${f}'"
fi
if [ "${t}" == "" ]; then
err "missing tag"
fi
if file_has_tag "${f}" "${t}"; then
err "'${f}' is already tagged with '${t}'"
else
tag_file "${f}" "${t}"
fi
}
function untag {
f="$1"
t="$2"
if ! [ -f "${f}" ]; then
err "no such file: '${f}'"
fi
if [ "${t}" == "" ]; then
err "missing tag"
fi
if ! file_has_tag "${f}" "${t}"; then
err "'${f}' is not tagged with '${t}'"
else
untag_file "${f}" "${t}"
fi
}
function search {
awk_prg=""
for tag in "$@"; do
awk_prg+="/!!!PTAG ${tag}\$/ || "
done
awk_prg+="0 { n++ } END { print n; }"
xargs_template="f=\"\$1\"
n=\$(tail -n 250 \"\${f}\" | env LC_ALL='C' awk '${awk_prg}')
if [ \"\$n\" == \"$#\" ]; then
echo \"\${f}\"
fi"
echo "${xargs_template}" > /tmp/ptag_script.sh
find . -type f -name '*.pdf' -print0 | xargs -0 -P8 -I {} bash /tmp/ptag_script.sh {}
}
function list {
f="$1"
if [ "x${1}x" == "xx" ]; then
find . -type f -name "*.pdf" -print0 | xargs -0 -P8 -I {} bash -c "tail -n 250 \"{}\" | grep -a \"!!!PTAG\" | cut -d\" \" -f 2-" | sort -u
else
if ! [ -f "${f}" ]; then
err "no such file: '${f}'"
fi
tail -n 250 "${f}" | grep -a "!!!PTAG" | cut -d" " -f 2-
fi
}
function usage {
echo "usage: ptag command arguments..."
echo "COMMANDS:"
echo " tag FILE TAG"
echo " Apply TAG to FILE."
echo " untag FILE TAG"
echo " Remove TAG from FILE."
echo " search TAGS..."
echo " List all files that have every tag in TAGS."
echo " list [FILE]"
echo " If FILE if it is provided, list all tags applied"
echo " to that file. Otherwise list tags from all files."
echo " help"
echo " Show this helpful information."
}
function help {
usage
}
cmd=$1
shift
case ${cmd} in
"tag")
tag "$@"
;;
"untag")
untag "$@"
;;
"search")
search "$@"
;;
"list")
list "$@"
;;
"help")
help
;;
*)
usage
exit 1
;;
esac
| true |
e3189798163078e70b27cfe186acb2a400f82806 | Shell | tuplink/autovpn | /modules/monitor.time.sh | UTF-8 | 325 | 3.453125 | 3 | [] | no_license | #!/bin/bash
monitor_time(){
if [ ${MONITOR[Public Internet]} -ge 3 ] ; then
if [ "$NTPSTATUS" != $(date +%H) ] ; then
INFO "Updating Time"
if ntpdate time.nist.gov ; then
INFO "NTP Time Updated"
NTPSTATUS=$(date +%H)
else
ERROR "NTP Time Update Failed"
fi
fi
fi
}
| true |
427bc0bde591e010e023e7d90d207a8c0fbe0234 | Shell | asxtree/skybian | /static/skybian-firstrun | UTF-8 | 4,220 | 4 | 4 | [] | no_license | #!/bin/bash
# Created by @evanlinjin
# TODO(evanlinjin): Write documentation for the following:
# - Where we are placing the boot params.
# - Values of the boot params.
DEV_FILE=/dev/mmcblk0
CONFIG_FILE=/etc/skywire-config.json
TLS_KEY=/etc/skywire-hypervisor/key.pem
TLS_CERT=/etc/skywire-hypervisor/cert.pem
NET_NAME="Wired connection 1"
WIFI_NAME="Wireless connection 1"
# Stop here if config files are already generated.
if [[ -f "$CONFIG_FILE" ]]; then
echo "Nothing to be done here."
systemctl disable skybian-firstrun.service
exit 0
fi
# 'setup_skywire' extracts boot parameters.
# These parameters are stored in the MBR (Master Boot Record) Bootstrap code
# area of the boot device. This starts at position +0E0(hex) and has 216 bytes.
setup_skywire()
{
if ! readonly BOOT_PARAMS=$(/usr/bin/skyconf -if=$DEV_FILE -c=$CONFIG_FILE -keyf=$TLS_KEY -certf=$TLS_CERT); then
echo "Failed to setup skywire environment."
return 1
fi
# Obtains the following ENVs from boot params:
# MD IP GW PK SK HVS SS SUCCESS LOGFILE
echo "-----BEGIN BOOT PARAMS-----"
echo "$BOOT_PARAMS"
echo "-----END BOOT PARAMS-----"
if ! eval "$BOOT_PARAMS"; then
echo "Failed to eval boot params."
return 1
fi
# Print 'skyconf' logs.
if [[ -n "$LOGFILE" ]] ; then
echo "-----BEGIN SKYCONF LOGS-----"
$(command -v cat) - < "$LOGFILE" | while IFS= read -r line; do
echo "$line"
done
echo "-----END SKYCONF LOGS-----"
else
echo "Cannot access 'skyconf' logs."
fi
}
setup_skywire || exit 1
# 'setup_network' sets up networking for Skybian.
# It uses the IP (local IP address) and GW (Gateway IP address) of the boot
# params. If these are not defined, defaults will be kept.
setup_network()
{
echo "Setting up network $NET_NAME..."
if [[ -n "$IP" ]]; then
echo "Setting manual IP to $IP for $NET_NAME."
nmcli con mod "$NET_NAME" ipv4.addresses "$IP/24" ipv4.method "manual"
fi
if [[ -n "$GW" ]]; then
echo "Setting manual Gateway IP to $GW for $NET_NAME."
nmcli con mod "$NET_NAME" ipv4.gateway "$GW"
fi
nmcli con mod "$NET_NAME" ipv4.dns "1.0.0.1, 1.1.1.1"
}
setup_wifi()
{
echo "Setting up wifi connection $WIFI_NAME..."
nmcli c add type wifi con-name "$WIFI_NAME" ifname wlan0 ssid $WFN
if [[ -n "$WFP" ]]; then
nmcli c modify "$WIFI_NAME" wifi-sec.key-mgmt wpa-psk wifi-sec.psk $WFP
fi
if [[ -n "$IP" && -n "$GW" ]]; then
echo "Setting manual IP to $IP for $WIFI_NAME."
nmcli con mod "$WIFI_NAME" ipv4.addresses "$IP/24" ipv4.method "manual"
fi
if [[ -n "$GW" ]]; then
echo "Setting manual Gateway IP to $GW for $WIFI_NAME."
nmcli con mod "$WIFI_NAME" ipv4.gateway "$GW"
fi
nmcli con mod "$WIFI_NAME" ipv4.dns "1.0.0.1, 1.1.1.1"
nmcli con down "$WIFI_NAME"
sleep 3
nmcli con up "$WIFI_NAME"
}
# assume wifi should be configured instead of ethernet when wifi name env var is set
if [[ -n "$WFN" ]]; then
setup_wifi || exit 1
else
setup_network || exit 1
fi
for file in /etc/ssh/ssh_host* ; do
echo "[skybian-firstrun] Checking $file:"
cat "$file"
done
echo "Enabling 'skywire-visor.service'."
systemctl enable skywire-visor.service
sleep 2
systemctl start skywire-visor.service
install_ntp()
{
# (courtesy of https://github.com/some4/skywire-install-bash/blob/master/install.sh)
# Stop timesyncd:
systemctl stop systemd-timesyncd.service
# Backup (but don't overwrite an existing) config. If not, sed will keep
# appending file:
cp -n /etc/systemd/timesyncd.conf /etc/systemd/timesyncd.orig
# Use fresh copy in case installer used on existing system:
cp /etc/systemd/timesyncd.orig /etc/systemd/timesyncd.conf
# When system is set to sync with RTC the time can't be updated and NTP
# is crippled. Switch off that setting with:
timedatectl set-local-rtc 0
timedatectl set-ntp on
apt update && apt install -y ntp
systemctl disable systemd-timesyncd.service
info "Restarting NTP..."
systemctl restart ntp.service
# Set hardware clock to UTC (which doesn't have daylight savings):
hwclock -w
}
install_ntp || logger "Failed to setup ntp service"
systemctl disable skybian-firstrun.service
exit 0
| true |
28219292948f250357123e973bda0b61bced9d06 | Shell | ghostlambdax/recognizeapp | /nclouds/start_deployment.sh | UTF-8 | 2,349 | 2.640625 | 3 | [] | no_license | #!/bin/bash -e
# pip python-dev and awscli is for sync assets on s3 bucket of cdn
# Deployment steps that need access to RDS
# For deployment steps that need access to AWS credentials, use jenkins_deployment.sh
apk add --no-cache aws-cli
apk add --no-cache nodejs npm
# get samples for credentials
apk add --no-cache rsync
nclouds/use_samples.sh
# get variables
nclouds/use_node_script.sh "/recognize/$EnvironmentType/"
source ssm_source
./infrastructure/ensure_deploy_container_instance.rb $EnvironmentType
# NOTE: This is old. Precompile is now a separate Fargate task in the run_deployment method of jenkins_deployment.sh
# (I think...)
# compile assets - move assets to efs
# RAILS_ENV=production RAILS_DEPLOY_SCRIPT=true bundle exec rake assets:precompile --trace 2>&1 | sed -e 's/^/PRECOMPILING: /;' &
# (RAILS_ENV=production RAILS_DEPLOY_SCRIPT=true bundle exec rake assets:precompile --trace && rsync -va --ignore-existing /usr/src/app/tmp/assets/ /usr/src/app/public/assets/ ) 2>&1 | sed -e 's/^/PRECOMPILING: /;' &
# compile non-digested assets
# ex. Favicon and ajax-loader-company.gif
# RAILS_ENV=production RAILS_DEPLOY_SCRIPT=true bundle exec rake assets:non_digested
# compile themes
RAILS_ENV=production RAILS_DEPLOY_SCRIPT=true bundle exec rake recognize:compile_themes --trace 2>&1 | sed -e 's/^/THEMECOMPILATION: /;' &
PATH="$(gem env gemdir)/bin:$PATH"
bin/aws/sync_cron.rb $EnvironmentType 2>&1 | sed -e 's/^/CRONSYNC: /;' &
echo "debug release info"
echo $CURRENT_RELEASE_INFO
echo "debug EFS start"
df -h
echo "debug EFS end"
# rsync files from stashed public directory back into new public dir which is EFS
rsync -azh --stats /usr/src/app/public-orig/ /usr/src/app/public 2>&1 | sed -e 's/^/RSYNC: /;' &
# refresh CMS cache
# Just using sales queue because its mostly unused
RAILS_ENV=production bundle exec rails r 'CmsManager.delay(queue: "sales").reset_page_caches' 2>&1 | sed -e 's/^/CMSCACHERESET: /;' &
# run migrations
echo "Starting database migrations in process"
RAILS_ENV=production bundle exec rake db:migrate --trace
# trim db sessions
echo "Starting database session trim in process"
RAILS_ENV=production bundle exec rake db:sessions:trim
echo "start_deployment.sh has reached the end - waiting for background processes to finish"
wait # for bg processes to finish
# successful termination
exit 0
| true |
424428e1de49a313651e7737f4e97ae9e46e6597 | Shell | yshinkarev/my-bash | /db/_load_pg_props.sh | UTF-8 | 217 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
if [ -z "${DB_PROP_FILE}" ]; then
DB_PROP_FILE=$(dirname "$0")/db.properties
fi
if [ ! -f ${DB_PROP_FILE} ]; then
>&2 echo "Missing DB properties file"
exit 1
fi
source ${DB_PROP_FILE} | true |
31c5eedd55d322314b3c2816ae67c484eedd2f25 | Shell | cultab/dotfiles | /polybar/bin/polybar_start | UTF-8 | 627 | 3 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
for If in $(ip addr | grep "^[0-9]" | cut -d ':' -f 2 | sed "s/\s*//g"); do
case "$If" in
wl*)
export WIRELESS_INTERFACE="$If"
;;
en*)
export WIRED_INTERFACE="$If"
;;
*)
echo "Ignoring interface $If"
;;
esac
done
if type "xrandr"; then
for m in $(xrandr --query | grep " connected" | cut -d" " -f1); do
MONITOR=$m polybar --reload mybar &
done
else
polybar --reload mybar &
fi
polybar --reload traybar -c ~/.config/polybar/traybar.ini &
pid=$!
sh -c "sleep 5; polybar-msg -p $pid cmd hide" &
| true |
73b5b91b356a640b9ef6c0b1413644edc7895b67 | Shell | deirde/sws_scripts | /databaseLocalImport.sh | UTF-8 | 1,423 | 3.640625 | 4 | [] | no_license | #!/bin/bash
source ./_config.sh
echo "-------------------- SETUP, CHECK IT BEFORE CONFIRM --------------------"
echo "DB_LC_HOST:" $DB_LC_HOST
echo "DB_LC_NAME:" $DB_LC_NAME
echo "DB_LC_UID:" $DB_LC_UID
echo "DB_LC_PSW:" $DB_LC_PSW
echo "------------------------------------------------------------------------"
echo
echo "Attention, the database data will be overwritten!"
echo "Before proceeding further drop your database dump named <import-me.sql> in the same folder of this script."
echo "Note: the file will be deleted after the import."
read -p "Are you sure to do this? [Y/n]"
echo
if [ "$REPLY" == "Y" ]; then
if [ ! -f import-me.sql ]; then
echo "The file <import-me.sql> doesn't exist! I can't proceed further."
else
sed -i 's/utf8mb4_unicode_520_ci/utf8_general_ci/g' import-me.sql
sed -i 's/utf8_general_ci_unicode_ci/utf8_general_ci/g' import-me.sql
sed -i 's/utf8_general_ci/utf8_general_ci/g' import-me.sql
sed -i 's/utf8mb4/utf8/g' import-me.sql
mysqldump -u${DB_LC_UID} -p${DB_LC_PSW} -h ${DB_LC_HOST} --add-drop-table --no-data ${DB_LC_NAME} | grep ^DROP | mysql -u${DB_LC_UID} -p${DB_LC_PSW} -h ${DB_LC_HOST} ${DB_LC_NAME}
mysql -u $DB_LC_UID -p$DB_LC_PSW $DB_LC_NAME < import-me.sql
rm -f import-me.sql
echo "It's done. The data has been imported successfully."
fi
else
echo "Nothing has been done, bye bye.";
fi
| true |
9368495836284c291231e7cabe3064a9c8fb6d92 | Shell | telosprotocol/TOP-chain | /src/programs/topio/scripts/install.sh | UTF-8 | 5,127 | 3.859375 | 4 | [] | no_license | #!/bin/bash
#
# install - install a program, script, or datafile
#
# topio: install.sh, 2020-05-25 13:00 by smaug
#
# Copyright TOPNetwork Technology
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation, and that the name of M.I.T. not be used in advertising or
# publicity pertaining to distribution of the software without specific,
# written prior permission. M.I.T. makes no representations about the
# suitability of this software for any purpose. It is provided "as is"
# without express or implied warranty.
#
# This script is compatible with the BSD install script, but was written
# from scratch.
pwd
username=`whoami`
if [ $username != 'root' ]; then
echo "Permission denied, please retry with root"
return
fi
if [ $SUDO_USER ]
then
echo "sudo user"
username=$SUDO_USER
else
echo "whoami"
username=`whoami`
fi
echo_and_run() { echo "$*" ; "$@" ; }
topio_name='topio'
osinfo=`awk -F= '/^NAME/{print $2}' /etc/os-release |awk -F ' ' '{print $1}' |awk -F '\"' '{print $2}'`
osname=`uname`
# using more simple way
which yum
if [ $? != 0 ]; then
osinfo="Ubuntu"
else
osinfo="CentOS"
fi
ubuntu_os="Ubuntu"
centos_os="CentOS"
centos_os_version=`cat /etc/os-release |grep CENTOS_MANTISBT_PROJECT_VERSION |awk -F '"' '{print $2}' `
osname_linux="Linux"
osname_darwin="Darwin"
ntpd_path="/usr/sbin/ntpd"
ntpd_service="ntp.service"
init_os_config() {
sed -i "/ulimit\s-n/d" /etc/profile
sed -i '$a\ulimit -n 65535' /etc/profile
source /etc/profile
echo "set ulimit -n 65535"
}
if [ $osinfo = ${ubuntu_os} ]
then
echo "Ubuntu"
init_os_config
ntpd_service="ntp.service"
if [ ! -f "$ntpd_path" ]; then
echo "prepare topio runtime environment, please wait for seconds..."
echo_and_run echo "apt update -y > /dev/null 2>&1" | bash
echo_and_run echo "apt update -y > /dev/null 2>&1" | bash
echo_and_run echo "apt install -y ntp > /dev/null 2>&1" | bash
if [ $? != 0 ]; then
echo "install ntp failed"
return
fi
fi
elif [ $osinfo = ${centos_os} ]
then
echo "Centos"
init_os_config
if [ $centos_os_version = 7 ]; then
ntpd_service="ntpd.service"
if [ ! -f "$ntpd_path" ]; then
echo "prepare topio runtime environment, please wait for seconds..."
echo_and_run echo "yum update -y > /dev/null 2>&1" | bash
echo_and_run echo "yum install -y ntp > /dev/null 2>&1" | bash
if [ $? != 0 ]; then
echo "install ntp failed"
return
fi
fi
elif [ $centos_os_version = 8 ]; then
ntpd_service="chronyd.service"
echo_and_run echo "yum -y install chrony > /dev/null 2>&1" | bash
else
echo "Not Support Centos-Version:$centos_os_version"
return
fi
else
echo "unknow osinfo:$osinfo"
fi
ntpd_status=`systemctl is-active $ntpd_service`
if [ $ntpd_status = "active" ]; then
echo "ntp service already started"
else
echo_and_run echo "systemctl enable $ntpd_service" | bash
run_status=$?
if [ $run_status != 0 ]; then
echo "enable $ntpd_service failed: $run_status"
return
fi
echo_and_run echo "systemctl start $ntpd_service" | bash
run_status=$?
if [ $run_status != 0 ]; then
echo "start $ntpd_service failed: $run_status"
return
fi
fi
if [ ! -x "$topio_name" ]; then
echo "not found $topio_name"
return
fi
INSTALL_TOPIO_BIN_PATH="/usr/bin/"
mkdir -p ${INSTALL_TOPIO_BIN_PATH}
if [ $osname = ${osname_linux} ]; then
echo_and_run echo "cp -f $topio_name ${INSTALL_TOPIO_BIN_PATH}" |bash
elif [ $osname = ${osname_darwin} ]; then
echo_and_run echo "cp -f $topio_name ${INSTALL_TOPIO_BIN_PATH}" |bash
fi
ldconfig
# register topio as service
echo $username
if [ $username = "root" ]
then
topio_home=/$username/topnetwork
else
topio_home=/home/$username/topnetwork
fi
echo $topio_home
echo ""
echo "############now will register topio as service##############"
echo ""
TOPIO_SAFEBOX_SERVICE="
[Unit]
Description=the cpp-topnetwork command line interface
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
Environment=TOPIO_HOME=$topio_home
PIDFile=$topio_home/safebox.pid
ExecStart=/usr/bin/topio node safebox
PrivateTmp=true
[Install]
WantedBy=multi-user.target"
#printf '%s\n' "$TOPIO_SAFEBOX_SERVICE"
printf '%s\n' "$TOPIO_SAFEBOX_SERVICE" | tee /lib/systemd/system/topio-safebox.service
systemctl enable topio-safebox.service
timedatectl set-timezone UTC
which $topio_name
if [ $? != 0 ]; then
echo "install topio failed"
return
fi
echo "install $topio_name done, good luck"
echo "now run command to check md5: topio -v"
echo "now run command for help info: topio -h"
topio node safebox
| true |
74c0db0c59aae2d31f1ee7e34d6bd567d710aa1b | Shell | tarantool/luacheck | /scripts/dedicated_coverage.sh | UTF-8 | 3,150 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eu
set -o pipefail
# Collects test coverage for luacheck modules with associated spec files.
# Runs spec files from the arguments or all spec files.
# Each module can be covered only from its own spec file.
# Should be executed from root Luacheck directory.
declare -A spec_to_module
spec_to_module[spec/bad_whitespace_spec.lua]=src/luacheck/stages/detect_bad_whitespace.lua
spec_to_module[spec/cache_spec.lua]=src/luacheck/cache.lua
spec_to_module[spec/check_spec.lua]=src/luacheck/check.lua
spec_to_module[spec/config_spec.lua]=src/luacheck/config.lua
spec_to_module[spec/decoder_spec.lua]=src/luacheck/decoder.lua
spec_to_module[spec/empty_blocks_spec.lua]="src/luacheck/stages/detect_empty_blocks.lua"
spec_to_module[spec/expand_rockspec_spec.lua]=src/luacheck/expand_rockspec.lua
spec_to_module[spec/filter_spec.lua]=src/luacheck/filter.lua
spec_to_module[spec/format_spec.lua]=src/luacheck/format.lua
spec_to_module[spec/fs_spec.lua]=src/luacheck/fs.lua
spec_to_module[spec/globbing_spec.lua]=src/luacheck/globbing.lua
spec_to_module[spec/luacheck_spec.lua]=src/luacheck/init.lua
spec_to_module[spec/lexer_spec.lua]=src/luacheck/lexer.lua
spec_to_module[spec/cli_spec.lua]=src/luacheck/main.lua
spec_to_module[spec/options_spec.lua]=src/luacheck/options.lua
spec_to_module[spec/parser_spec.lua]=src/luacheck/parser.lua
spec_to_module[spec/serializer_spec.lua]=src/luacheck/serializer.lua
spec_to_module[spec/cyclomatic_complexity_spec.lua]=src/luacheck/stages/detect_cyclomatic_complexity.lua
spec_to_module[spec/globals_spec.lua]=src/luacheck/stages/detect_globals.lua
spec_to_module[spec/reversed_fornum_loops_spec.lua]=src/luacheck/stages/detect_reversed_fornum_loops.lua
spec_to_module[spec/unbalanced_assignments_spec.lua]=src/luacheck/stages/detect_unbalanced_assignments.lua
spec_to_module[spec/uninit_accesses_spec.lua]=src/luacheck/stages/detect_uninit_accesses.lua
spec_to_module[spec/unreachable_code_spec.lua]=src/luacheck/stages/detect_unreachable_code.lua
spec_to_module[spec/unused_fields_spec.lua]=src/luacheck/stages/detect_unused_fields.lua
spec_to_module[spec/unused_locals_spec.lua]=src/luacheck/stages/detect_unused_locals.lua
spec_to_module[spec/linearize_spec.lua]=src/luacheck/stages/linearize.lua
spec_to_module[spec/resolve_locals_spec.lua]=src/luacheck/stages/resolve_locals.lua
spec_to_module[spec/standards_spec.lua]=src/luacheck/standards.lua
spec_to_module[spec/utils_spec.lua]=src/luacheck/utils.lua
if [ $# -eq 0 ]; then
specs="$(sort <<< "${!spec_to_module[@]}")"
else
specs="$@"
fi
{
echo Spec Module Hits Missed Coverage
for spec in $specs; do
if [ -v spec_to_module[$spec] ]; then
module="${spec_to_module[$spec]}"
rm -f luacov.stats.out
rm -f luacov.report.out
echo "busted -c $spec" >&2
busted -c "$spec" >&2 || true
luacov
echo -n "$spec "
grep -P "$module +[^ ]+ +[^ ]+ +[^ ]+" luacov.report.out || echo "$module 0 0 0.00%"
echo >&2
else
echo "No associated module for spec $spec" >&2
fi
done
} | column -t
| true |
3c29b514c4dc38b8d9d155c16ef83bacc9fb274c | Shell | DaMSL/K3-Benchstack | /hdfs/docker/namenode_startup.sh | UTF-8 | 409 | 2.6875 | 3 | [] | no_license | #!/bin/bash
cp /hadoop/config/core-site.xml /software/hadoop-2.5.1/etc/hadoop/
cp /hadoop/config/hdfs-site.xml /software/hadoop-2.5.1/etc/hadoop/
mkdir /hadoop/namenode/
if [ "$(ls -A /hadoop/namenode/)" ]; then
echo "Existing namenode data found... no need to format"
else
echo "Formatting namenode..."
/software/hadoop-2.5.1/bin/hdfs namenode -format
fi
/software/hadoop-2.5.1/bin/hdfs namenode
| true |
dfd3c7a0b434832b74718187b1bf537d193a3361 | Shell | delkyd/alfheim_linux-PKGBUILDS | /jwm-git/PKGBUILD | UTF-8 | 2,160 | 2.640625 | 3 | [] | no_license | # Maintainer: Brian Bidulock <bidulock@openss7.org>
# Contributor: Corey Mwamba <contact.me@coreymwamba.co.uk>
pkgname=jwm-git
pkgver=s1636
pkgrel=2
pkgdesc="JWM is a light-weight window manager for the X11 Window System. Git version."
arch=('i686' 'x86_64')
url="http://joewing.net/projects/jwm/"
license=('MIT')
groups=('x11')
provides=('jwm')
conflicts=('jwm' 'jwm-snapshot' 'jwm-flashfixed' 'jwm-snapshot-lite')
depends=('libx11' 'libxft' 'libjpeg>=7' 'libxpm' 'libxinerama' 'libpng' 'cairo' 'librsvg' 'fribidi')
backup=('etc/system.jwmrc')
makedepends=('git')
source=("$pkgname::git+https://github.com/joewing/jwm.git"
jwm.desktop)
md5sums=('SKIP'
'ad898472f7538ffc3ff511c055fee535')
pkgver() {
cd $pkgname
# Use the tag of the last commit
git describe --always | sed 's|-|.|g'
}
prepare() {
cd $pkgname
/usr/bin/cp -f /usr/share/automake-1.15/config.guess .
/usr/bin/cp -f /usr/share/automake-1.15/config.sub .
/usr/bin/cp -f /usr/share/automake-1.15/install-sh .
/usr/bin/cp -f /usr/share/gettext/config.rpath .
/usr/bin/cp -f /usr/share/gettext/po/Makefile.in.in po/
autoreconf
}
build() {
cd $pkgname
./configure --prefix=/usr --sysconfdir=/etc
# --disable-icons disable icon support
#--disable-png disable PNG images
#--disable-cairo disable Cairo support
#--disable-rsvg disable rsvg support
#--disable-jpeg disable JPEG images
#--disable-xft disable Xft
#--disable-xrender disable XRender
#--disable-fribidi disable bi-directional unicode support
#--disable-xpm disable XPM images
#--disable-shape disable use of the X shape extension
#--disable-xmu disable Xmu support
#--disable-xinerama disable Xinerama support
#--disable-nls do not use Native Language Support
make V=0
}
package() {
cd $pkgname
make BINDIR="$pkgdir/usr/bin" MANDIR="$pkgdir/usr/share/man" \
DESTDIR="$pkgdir" SYSCONF="$pkgdir/etc" \
mkdir_p="/usr/bin/mkdir -p" install
install -Dm644 "$srcdir/jwm.desktop" "$pkgdir/usr/share/xsessions/jwm.desktop"
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true |
986d0479bcb5f5288c3a32f6c011271c646afa66 | Shell | StarryWisdom/eastern-front-mod | /make.bash | UTF-8 | 894 | 3.8125 | 4 | [] | no_license | #!/bin/bash -e
rm -rf _build
check_tmp () {
xmllint ./dat/vesselData.xml > /dev/null
../../artemis-check.py --no-warn
}
# assumes the file location is good
# it really should check that, but that will be a long bit of code :/
# sorry if you edit it and it decides to zip your home directory
# assumes PWD is inside where the zip will be made
# $1 = zip name
create_zip () {
zipName="$1"
cd ..
zip -qr "$zipName".zip "$zipName"
}
# checks and creates the zip being created at $1
check_and_create () {
dir="$1"
old_dir="$PWD"
cd "$dir"
check_tmp
create_zip "`basename "$1"`"
version=`cat "$old_dir/src/version"`
convert "$old_dir/src/logo512.png" -gravity North -pointsize 45 -fill white -annotate +200+100 "$version" "EF MOD/dat/logo512.png"
cd "$old_dir"
}
build_loc="_build/EF MOD"
mkdir -p "$build_loc"
rsync -aWSH mod-files/ "$build_loc"
check_and_create "$build_loc"
| true |
e4ceece24a58f8a952bf39609836398e5537bdb8 | Shell | agcilantro/ofbiz | /bootstrap.sh | UTF-8 | 1,151 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
# installs webupd8team repository to install java jdk
echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu precise main" | tee /etc/apt/sources.list.d/webupd8team-java.list
echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu precise main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
apt-get update
echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
apt-get install -y oracle-java7-installer
apt-get install -y apache2
apt-get install -y git
# apt-get install -y php5
if ! [ -L /var/www ]; then
rm -rf /var/www
ln -fs /vagrant /var/www
# apt-get install -y oracle-java7-installer
# clone, install, load data, build and start ofbiz
git clone --recursive https://github.com/apache/ofbiz.git ofbiz
# change ownership of ofbiz folder to vagrant:ofbiz, chmod ofbiz folder to 700
sudo addgroup ofbiz
sudo adduser vagrant ofbiz
sudo chown -R vagrant:ofbiz ofbiz
sudo chmod 700 ofbiz
cd ofbiz
./ant
./ant load-demo
./ant load-extseed
./ant start
fi
| true |
603eeee625535f35af110364117c08d69e700c7f | Shell | mehrdad-shokri/Scriptology | /BASH/workfiles/19-ForLoop with range.sh | UTF-8 | 137 | 2.890625 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
echo "Using for-loop & range {start...end} Example, generate IP addresses"
for i in {1..254}
do
echo 192.169.8.$i
done
| true |
1b1a83fb093cf2c34e5b23022b267765cef2be99 | Shell | mingjunyang/generate_qt_project_rust | /qtcreator_rust_project_gen.sh | UTF-8 | 944 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
workspace=""
project_name=""
if [[ $1 == "" ]];
then
echo "no project workspace"
exit 1
else
workspace=$1
fi
if [[ $2 ]];
then
project_name=$(basename $(pwd))
fi
if [[ -d $workspace ]]; then
cd $workspace
project_name="$(basename $(pwd))"
echo $project_name
touch "$project_name".creator
touch "$project_name".files
echo "-std=c17" >> "$project_name".cflags
echo "-std=c++17" >> "$project_name".cxxflags
touch "$project_name".includes
touch "$project_name".config
else
echo "$workspace not a workspace"
exit 1
fi
rs_file=$(fd "\.rs$")
toml_file=$(fd "\.toml$")
md_file=$(fd "\.md$")
glsl_file=$(fd "\.glsl$")
for item in $rs_file; do
echo $item >> "$project_name".files
done
for item in $toml_file; do
echo $item >> "$project_name".files
done
for item in $md_file; do
echo $item >> "$project_name".files
done
for item in $glsl_file; do
echo $item >> "$project_name".files
done
| true |
b1974b5119aca64dfa783f42bd12cbfbfb741016 | Shell | charltonaustin/dotfiles | /bin/take_a_break | UTF-8 | 689 | 2.921875 | 3 | [] | no_license | #!/bin/bash
x=`/usr/bin/osascript <<EOT
tell application "Finder"
activate
set myReply to button returned of (display dialog "Take a break" default button 1 buttons {"Cancel", "5", "15"})
end tell
EOT`
echo 'Button is: '$x
if [[ $x = "5" ]]; then
mkdir -p $HOME/dev/work
touch $HOME/dev/work/daily_pomodoro.txt
echo `date` "Taking a break for 5 mins" >> $HOME/dev/work/daily_pomodoro.txt
delayed_mac 5 play_alarm
delayed_mac 5 pomodoro
fi
if [[ $x = "15" ]]; then
mkdir -p $HOME/dev/work
touch $HOME/dev/work/daily_pomodoro.txt
echo `date` "Taking a break for 15 mins" >> $HOME/dev/work/daily_pomodoro.txt
delayed_mac 15 play_alarm
delayed_mac 15 pomodoro
fi
| true |
b6d0c0ccf42a33813c1a56366982341546de7f9b | Shell | Tamilarasanonline/Sample | /bin/compile | UTF-8 | 4,082 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir>
echo "Compile started"
shopt -s dotglob
set -e
# ------------------------------------------------------------------------------------------------
# compile_build_dir: /tmp/staged/app
# compile_cache_dir: /tmp/cache
# compile_buildpack_bin: /tmp/buildpacks/{project-name}/bin
# compile_buildpack_dir: /tmp/buildpacks/{project-name}
# pwd: /tmp/staged
#------------------------------------------------------------------------------------------------------
compile_build_dir=$1
compile_cache_dir=$2
compile_buildpack_dir=$(cd $(dirname $0) && cd .. && pwd)
compile_buildpack_bin=$compile_buildpack_dir/bin
echo "pwd: $(pwd)"
echo "compile_build_dir: $compile_build_dir"
echo "compile_cache_dir: $compile_cache_dir"
echo "compile_buildpack_bin: $compile_buildpack_bin"
echo "compile_buildpack_dir: $compile_buildpack_dir"
cd $compile_build_dir
# -----------------------------------creating public directory under cache directory-------------------------------------------------------------
mkdir -p $compile_cache_dir/public
mv * $compile_cache_dir/public
mv $compile_cache_dir/public .
[[ -f public/Procfile ]] && mv public/Procfile .
#jdk8
if [ -d jdk1.8.0_25 ]; then
echo "-----> jdk1.8.0_25 folder found, moving along."
else
echo -n "-----> jdk1.8.0_25 folder not found! "
if [[ -d "$compile_cache_dir/jdk1.8.0_25" ]]; then
echo -n "Copying jdk from cache to app... "
cp -r "$compile_cache_dir/jdk1.8.0_25" "$compile_build_dir"
echo "Done!"
else
echo -n "-----> Extracting (jdk1.8.0_25)....."
curl --max-time 180 --location "https://s3.amazonaws.com/covisintrnd.com-software/jdk-8u25-linux-x64.gz" | tar xz
cd "$compile_build_dir"
chmod -R uog+rx $compile_build_dir/jdk1.8.0_25
echo "Done!"
fi
fi
#jdk8 - end
# Tomee plus - tomeeplus
if [ -d apache-tomee-plus-1.7.1 ]; then
echo "-----> Apache Tomee folder found, moving along."
else
echo -n "-----> Apache Tomee foldernot found! "
if [[ -d "$compile_cache_dir/apache-tomee-plus-1.7.1" ]]; then
echo -n "Copying apache-tomee-plus-1.7.1 from cache to app... "
cp -r "$compile_cache_dir/apache-tomee-plus-1.7.1" "$compile_build_dir"
echo "Done!"
else
echo -n "-----> Installing (Apache Tomee Server)....."
wget http://apache.openmirror.de/tomee/tomee-1.7.1/apache-tomee-1.7.1-plus.tar.gz && tar xvzf apache-tomee-1.7.1-plus.tar.gz && rm -rf apache-tomee-1.7.1-plus.tar.gz
cd $compile_build_dir/apache-tomee-plus-1.7.1
tomee_build_dir=$compile_build_dir/tomeebuild
#compile the source
make
echo "TomEE has been compiled successfully..."
#install
make install
echo "TomEE has been installed successfully..."
chmod -R uog+rx $compile_build_dir/tomeebuild
echo "Done!"
fi
fi
#apache - Tomee - end
export JAVA_HOME="$compile_build_dir/jdk1.8.0_25"
export PATH="$JAVA_HOME/bin:$PATH"
cd $compile_build_dir
# -------------------------------copying build pack Tomee end-----------------------------------------------------------------
cp $compile_buildpack_bin/boot.sh .
# -------------------------------copying webagent conf -----------------------------------------------------------------
#logging properties which having without ConsoleHandler
cp -f $compile_buildpack_dir/conf/logging.properties $compile_build_dir/apache-tomee-plus-1.7.1/conf/logging.properties
#copying server.xml from buildpack to tomcat conf directory
cp -f $compile_buildpack_dir/conf/server.xml $compile_build_dir/apache-tomee-plus-1.7.1/conf/server.xml
cd $compile_build_dir
rm -rf "$compile_cache_dir/jdk1.8.0_25"
cp -r jdk1.8.0_25 "$compile_cache_dir/jdk1.8.0_25"
rm -rf "$compile_cache_dir/apache-tomee-plus-1.7.1"
cp -r apache-tomee-plus-1.7.1 "$compile_cache_dir/apache-tomee-plus-1.7.1"
# Deploy all war files
WEBAPPDIR=$compile_build_dir/apache-tomee-plus-1.7.1/webapps
cd $compile_build_dir/public
for WAR in *.war; do
[ -r "$WAR" ] || continue
echo "Deploying $WAR"
mv "$WAR" "$WEBAPPDIR"
done
| true |
c8580ad50623d4826972c4ee77bb332c12053e0e | Shell | zzhh123/ops-dockerfile | /tomcat9-jolokia-centos7/create-docker.sh | UTF-8 | 557 | 3.140625 | 3 | [] | no_license | #!/bin/sh
set -e
CONTAINER_USER="${CONTAINER_USER:-root}"
BASE_DIR=`pwd`
TMP_DIR_BASE=ttt
TMP_DIR=$BASE_DIR/$TMP_DIR_BASE/m8tmp
mkdir -p $TMP_DIR
cp docker*.sh $TMP_DIR
cp install*.sh $TMP_DIR
cp *.war $TMP_DIR
cp *.xml $TMP_DIR
cat > Dockerfile <<EOF
FROM tomcat:9-jre8-alpine
USER root
COPY $TMP_DIR_BASE /tmp/
RUN /bin/sh /tmp/m8tmp/install-root.sh
WORKDIR /opt
USER ${CONTAINER_USER}
ENTRYPOINT ["/tmp/m8tmp/docker-entrypoint.sh"]
EOF
docker build --no-cache -t bolonzhang/tomcat9-jolokia-centos7:jdk8-tomcat9-jolokia .
#rm -fr $TMP_DIR_BASE
| true |
b9c1b275eb65c0326a49408566dacecc01fc6267 | Shell | postgres-ai/nancy | /nancy | UTF-8 | 688 | 4 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# 2018โ2019 ยฉ Postgres.ai
#
# A wrapper for all Nancy CLI commands.
# Usage: use 'nancy help' for help.
#
DEBUG=0
cmd=""
case "$1" in
help )
source ${BASH_SOURCE%/*}/help/help.sh "nancy"
exit 1;
;;
* )
word="${1/-/_}"
if [[ ! -f "${BASH_SOURCE%/*}/nancy_$word.sh" ]]; then
>&2 echo "ERROR: Unknown command: $word."
>&2 echo "Try 'nancy help'"
exit 1
fi
cmd="${BASH_SOURCE%/*}/nancy_$word.sh"
shift;
;;
esac
while [ -n "$1" ]; do
if [ "$1" == "--debug" ]; then
DEBUG=1
fi
if [ ${1%"${1#??}"} = '--' ]; then
cmd="$cmd $1"
else
cmd="$cmd \"$1\""
fi
shift
done
[[ "$DEBUG" -eq "1" ]] && echo "CMD: $cmd"
eval "$cmd"
| true |
2a743cab3a6e4b06a9021ea529b8654a8d379a13 | Shell | Bashorun97/protonvpn-setup | /setup.sh | UTF-8 | 884 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# EndSARS
# EndPoliceBrutalityInNigeria
echo "Input the number of your Linux distro";
echo "1: Debian/Ubuntu/Linux Mint";
echo "2: ArchLinux/Manjaro";
echo "3: SUSE";
echo "4: Fedora/CentOS/RHEL";
read -p "What's your Linux distro?: " choice
int_choice=$((choice + 0));
case $int_choice in
1) distro=1 ;;
2) distro=2 ;;
3) distro=3 ;;
4) distro=4 ;;
*)
esac
if [ "$distro" -eq 1 ]; then
apt install -y openvpn dialog python3-pip python3-setuptools
pip3 install protonvpn-cli
elif [ "$distro" -eq 2 ]; then
pacman -S openvpn dialog python-pip python-setuptools
pip3 install protonvpn-cli
elif [ "$distro" -eq 3 ]; then
zypper in -y openvpn dialog python3-pip python3-setuptools
pip3 install protonvpn-cli
elif [ "$distro" -eq 4 ]; then
dnf install -y openvpn dialog python3-pip python3-setuptools
pip3 install protonvpn-cli
fi
protonvpn init | true |
ca9d54a7084b8f81079bb21ad6a5a135c8b6b746 | Shell | zhaolincheung/ip_frequency | /ipFrequency.sh | UTF-8 | 1,968 | 3.265625 | 3 | [] | no_license | #!/bin/sh
if [ "$1" != "" ] && [ "$2" != "" ]; then
logfile="/data3/im-log/*.webim.log.imp.$1"
interval=$2
else
echo "***Usage:sh ipFrequency.sh date interval."
echo "*****date format is YYYY-MM-DD."
echo "*****interval is the ip time interval with minitues as a basic unit."
echo "*****Eg:sh ipFrequency.sh 2013-05-12 5"
exit
fi
cat $logfile | grep sendMsgOk | grep -v "fromUserId=0" |
grep "spamReasons=\[\]" | gawk -F"\t" 'BEGIN{interval='$interval'}
function getIP(name){
split(name, arr, "=");
ip = arr[2];
sub(/^[[:blank:]]*/, "", ip);#ๅป้คๅทฆ็ฉบๆ ผ
sub(/[[:blank:]]*$/, "", ip);#ๅป้คๅณ็ฉบๆ ผ
return ip;
}
function getMins(time){ #ๅฐๆถ้ด่ฝฌๅไธบๅ้ๆฐ
split(time, tarr, ":");
return tarr[1]*60+tarr[2];
}{
mins = getMins($1);
ip = getIP($20);
mi[mins"_"ip] ++ ;
#ๅฐipๆ ๅฐๅฐๅ้ๆฐไธ
if(dict[mins] != 0){
if(index(dict[mins],ip) == 0){
dict[mins] = dict[mins]","ip;
}
}else{
dict[mins] = ip;
}
}END{
for(i = 0; i < 24 * 60; i++){
#็ป่ฎก่ฏฅๆถ้ด้ด้ๅ
็ip้ข็
for(j = i; (j < i + interval) && (j < 24 * 60); j++){
len = split(dict[j], ipArr, "," );#่ทๅ่ฏฅๅ้ๅฏนๅบ็ipๅ่กจ
for(k = 1; k <= len; k++) {
ip = ipArr[k];
fre[ip] += mi[j"_"ip];
iplist[ip] ++; #ๅฐ่ฏฅๆถ้ดๆฎตๅ
ๅบ็ฐ็ipๆพๅฐๆฐ็ปไธญ
}
}
#ๆดๆฐipๅฏนๅบ็ๆๅคง้ข็
for(k in iplist){
if(fre[k] > max[k]){
max[k] = fre[k];
mintime[k] = int(i/60)":"(i%60);
maxtime[k] = int((i + interval)/60)":"((i+interval)%60);
}
}
delete iplist;
delete fre;
}
#่พๅบipๆๅคง้ข็
for(k in max) {
printf("%s-%s\t%s\t%d\n", mintime[k], maxtime[k], k, max[k]);
}
}'
| true |
e80541357532653471f29bbf64ca876a2d1093c1 | Shell | mijime/dotfiles | /bin/csprint_plugin_google_calendar | UTF-8 | 1,660 | 3.5 | 4 | [] | no_license | #!/usr/bin/env bash
set -ueo pipefail
enable_google_calendar=${CSPRINT_ENABLE_GOOGLE_CALENDAR:-1}
google_profile=${CSPRINT_GOOGLE_BEAREQ_PROFILE:-${GOOGLE_BEAREQ_PROFILE:-"default"}}
google_calendar_icon=${CSPRINT_GOOGLE_CALENDAR_ICON:-calendar}
cmd:events:list() {
local csprint_begin csprint_next_begin args=()
while [[ $# -gt 0 ]]; do
case $1 in
--csprint-begin)
csprint_begin=$2
shift 2
;;
--csprint-next-begin)
csprint_next_begin=$2
shift 2
;;
*)
args=("${args[@]}" "$1")
shift 1
;;
esac
done
events:list \
--time-min "${csprint_begin}" \
--time-max "${csprint_next_begin}"
}
cmd:status:list() {
local csprint_begin csprint_next_begin args=()
while [[ $# -gt 0 ]]; do
case $1 in
--csprint-begin)
csprint_begin=$2
shift 2
;;
--csprint-next-begin)
csprint_next_begin=$2
shift 2
;;
*)
args=("${args[@]}" "$1")
shift 1
;;
esac
done
events:list \
--time-min "${csprint_begin}" \
--time-max "${csprint_next_begin}" |
awk -F "\t" -v icon=":${google_calendar_icon}:" '$1!=""{print icon,$1}'
}
events:list() {
beareq-oapi \
--profile "${google_profile}" calendar.events.list \
--calendar-id primary \
--order-by startTime \
--show-deleted false \
--single-events true \
--jq '.items[]|select(.attendees and (.attendees[]|select(.self)|(.responseStatus=="accepted" or .responseStatus=="tentative")))|[.summary,.start.dateTime,.end.dateTime]|@tsv' \
"$@"
}
if [[ -z ${enable_google_calendar} ]]; then
return 0
fi
cmd=$1
shift
"cmd:${cmd}" "$@"
| true |
324e1e172b5d8aab8ffa6249f74ddfb6a765967a | Shell | timurchak/QTimWidgets | /cmake/debian/postrm.in | UTF-8 | 327 | 3.375 | 3 | [] | no_license | #!/bin/bash
check_for_user_f() {
IAM="$(echo | whoami)"
RVALUE="${1}"
if [ "$IAM" == "$RVALUE" ]; then
retval=0
else
retval=1
fi
return $retval
}
set -e
if check_for_user_f "root"; then
ldconfig
echo "root detected, performing actions . . ."
echo "lib postrm done!"
fi
| true |
35638399952ba4ffb55abe9994906e53ec6b24ec | Shell | lxuegsu/Scripting | /bash/commands.sh | UTF-8 | 671 | 2.90625 | 3 | [] | no_license | #!/bin/bash
echo "All commands in bash ;"
echo
echo "which bash // find current shell"
echo "whereis bash /// find current shell"
echo "chsh -s csh /// change shell"
echo "lp myfile /// print myfile"
echo "abc:defghi" | cut -b 1
echo "abd:defghi" | cut -c 1-3,5-9
echo
echo "abc:defgh:liu:1234:li" | cut -d : -f 1,4
echo "abc:defgh:liu:1234:li" | cut -f 1,2,4,3 -d :
echo "abc defgh liu 1234 li" | cut -f 1,2,4,3 -d :
echo "abc defgh liu 1234 li" | cut -f 1,2,4,3 -d : -s
echo
echo "abc defgh liu 1234 li" | sed 's/liu/abc/g'
echo "abc defgh liu 1234 li" | sed '/^$/d;G;G'
echo "abc defgh liu 1234 li" | sed G
echo
dir1=/usr/local
dir2=/var/spool
pushd $dir1
| true |
f94ce0dbb3e7f51bd86c8a37e44ace88b2c1db3f | Shell | apnex/carbon | /apis.sh | UTF-8 | 589 | 2.9375 | 3 | [] | no_license | #!/bin/bash
URL="https://apigw.vmware.com/v1/m4/api/dcr/rest/apix/apis"
RESPONSE=$(curl -k -X GET \
-H "Content-Type: application/json" \
"$URL" 2>/dev/null)
printf "%s\n" "${RESPONSE}" > apis.json
#printf "%s\n" "${RESPONSE}" | jq --tab '. | map(select(.api_ref_doc_type=="SWAGGER"))'
## build record structure
read -r -d '' INPUTSPEC <<-CONFIG
. | map(select(.api_ref_doc_type=="SWAGGER"))
| map({
"id": .id,
"name": .name,
"version": .version,
"api_ref_doc_url": .api_ref_doc_url
})
CONFIG
PAYLOAD=$(echo "$RESPONSE" | jq -r "$INPUTSPEC")
echo "${PAYLOAD}" | jq --tab .
| true |
b5c35211f2ba861518651884174ab0ca25975cc0 | Shell | mdcallag/mytools | /bench/splinterdb.vs.rocksdb/rb_splinter.sh | UTF-8 | 2,226 | 3.125 | 3 | [] | no_license | nr1=$1
nr2=$2
dbfile=$3
usenuma=$4
rowlen=$5
cachegb=$6
nlook=$7
nrange=$8
devname=$9
keylen=20
opts="\
--max-async-inflight 0 \
--num-insert-threads 1 --num-lookup-threads $nlook --num-range-lookup-threads $nrange \
--db-capacity-gib 250 \
--key-size $keylen --data-size $rowlen \
--db-location $dbfile \
--cache-capacity-gib $cachegb \
"
killall vmstat
killall iostat
function start_stats {
pfx=$1
# The xpid variables are global and read elsewhere
while :; do date; ps aux | grep driver_test | grep -v grep | tail -1; sleep 10; done >& $pfx.ps &
pspid=$!
vmstat 1 >& $pfx.vm &
vpid=$!
iostat -y -mx 1 >& $pfx.io &
ipid=$!
echo forked $vpid and $ipid and $pspid
}
function stop_stats {
pfx=$1
nrows=$2
# SplinterDB doesn't force data to disk. Do that here to get accurate write-amp estimate via iostat
sync; sync; sync; sleep 60
kill $vpid
kill $ipid
kill $pspid
echo killed $vpid and $ipid and $pspid
ls -lh $dbfile >> $pfx.res
ls -lh $dbfile
grep "\/second" $pfx.res | grep -v megabytes
echo -e "count\tIOwGB\tIOwMB/s\tUwGB\tWamp"
gbwritten=$( echo $rowlen $keylen $nrows | awk '{ printf "%.1f", (($1 + $2) * $3) / (1024*1024*1024.0) }' )
grep $devname $pfx.io | awk '{ c += 1; wmb += $9 } END { printf "%s\t%.1f\t%.1f\t%.1f\t%.1f\n", c, wmb/1024.0, wmb / c, gbw, (wmb/1024.0)/gbw }' gbw=$gbwritten
}
if [ $usenuma == "yes" ]; then numactl="numactl --interleave=all" ; else numactl="" ; fi
rm $dbfile
echo Cached at $( date )
start_stats bm.cached
echo "build/release/bin/driver_test splinter_test --perf $opts --num-inserts $nr1" > bm.cached.res
build/release/bin/driver_test splinter_test --perf $opts --num-inserts $nr1 >> bm.cached.res 2>&1
stop_stats bm.cached $nr1
#valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes --verbose --log-file=bm.vg \
#valgrind --tool=massif \
# build/release/bin/driver_test splinter_test --perf $opts --num-inserts $nr1
rm $dbfile
echo IO-bound at $( date )
start_stats bm.iobuf
echo "build/release/bin/driver_test splinter_test --perf $opts --num-inserts $nr2" > bm.iobuf.res
build/release/bin/driver_test splinter_test --perf $opts --num-inserts $nr2 >> bm.iobuf.res 2>&1
stop_stats bm.iobuf $nr2
| true |
8db08ed5982b52313cec90e3215bd4061d454ad3 | Shell | interstar/pyxtape | /crossfade_cat.sh | UTF-8 | 4,454 | 4.09375 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
#
# crossfade_cat.sh
#
# Concatenates two files together with a crossfade of $1 seconds.
# Filenames are specified as $2 and $3.
#
# $4 is optional and specifies if a fadeout should be performed on
# first file.
# $5 is optional and specifies if a fadein should be performed on
# second file.
#
# Example: crossfade_cat.sh 10 infile1.wav infile2.wav auto auto
#
# By default, the script attempts to guess if the audio files
# already have a fadein/out on them or if they just have really
# low volumes that won't cause clipping when mixxing. If this
# is not detected then the script will perform a fade in/out to
# prevent clipping.
#
# The user may specify "yes" or "no" to force the fade in/out
# to occur. They can also specify "auto" which is the default.
#
# Crossfaded file is created as "mix.wav".
#
# Original script from Kester Clegg. Mods by Chris Bagwell to show
# more examples of sox features.
# (See https://github.com/jacksonh/sox/blob/master/scripts/crossfade_cat.sh )
#
SOX=sox
#SOXI=../src/soxi
if [ "$3" == "" ]; then
echo "Usage: $0 crossfade_seconds first_file second_file [ fadeout ] [ fadein ]"
echo
echo "If a fadeout or fadein is not desired then specify \"no\" for that option. \"yes\" will force a fade and \"auto\" will try to detect if a fade should occur."
echo
echo "Example: $0 10 infile1.wav infile2.wav auto auto"
exit 1
fi
fade_length=$1
first_file=$2
second_file=$3
fade_first="auto"
if [ "$4" != "" ]; then
fade_first=$4
fi
fade_second="auto"
if [ "$5" != "" ]; then
fade_second=$5
fi
fade_first_opts=
if [ "$fade_first" != "no" ]; then
#fade_first_opts="fade t 0 0:0:$fade_length 0:0:$fade_length"
fade_first_opts="fade t 0 0:0:$fade_length "
fi
fade_second_opts=
if [ "$fade_second" != "no" ]; then
fade_second_opts="fade t 0:0:$fade_length"
fi
echo "crossfade and concatenate files"
echo
echo "Finding length of $first_file..."
first_length=`$SOX "$first_file" 2>&1 -n stat | grep Length | cut -d : -f 2 | cut -f 1`
echo "Length is $first_length seconds"
trim_length=`echo "$first_length - $fade_length" | bc`
# Get crossfade section from first file and optionally do the fade out
echo "Obtaining $fade_length seconds of fade out portion from $first_file..."
$SOX "$first_file" -e signed-integer -b 16 fadeout1.wav trim $trim_length
# When user specifies "auto" try to guess if a fadeout is needed.
# "RMS amplitude" from the stat effect is effectively an average
# value of samples for the whole fade length file. If it seems
# quite then assume a fadeout has already been done. An RMS value
# of 0.1 was just obtained from trail and error.
if [ "$fade_first" == "auto" ]; then
RMS=`$SOX fadeout1.wav 2>&1 -n stat | grep RMS | grep amplitude | cut -d : -f 2 | cut -f 1`
should_fade=`echo "$RMS > 0.1" | bc`
if [ $should_fade == 0 ]; then
echo "Auto mode decided not to fadeout with RMS of $RMS"
fade_first_opts=""
else
echo "Auto mode will fadeout"
fi
fi
$SOX fadeout1.wav fadeout2.wav $fade_first_opts
# Get the crossfade section from the second file and optionally do the fade in
echo "Obtaining $fade_length seconds of fade in portion from $second_file..."
$SOX "$second_file" -e signed-integer -b 16 fadein1.wav trim 0 $fade_length
# For auto, do similar thing as for fadeout.
if [ "$fade_second" == "auto" ]; then
RMS=`$SOX fadein1.wav 2>&1 -n stat | grep RMS | grep amplitude | cut -d : -f 2 | cut -f 1`
should_fade=`echo "$RMS > 0.1" | bc`
if [ $should_fade == 0 ]; then
echo "Auto mode decided not to fadein with RMS of $RMS"
fade_second_opts=""
else
echo "Auto mode will fadein"
fi
fi
$SOX fadein1.wav fadein2.wav $fade_second_opts
# Mix the crossfade files together at full volume
echo "Crossfading..."
$SOX -m -v 1.0 fadeout2.wav -v 1.0 fadein2.wav crossfade.wav
echo "Trimming off crossfade sections from original files..."
echo "Trim first"
$SOX "$first_file" -e signed-integer -b 16 song1.wav trim 0 $trim_length
echo "Trim second"
$SOX "$second_file" -e signed-integer -b 16 song2.wav trim $fade_length
echo "concat"
$SOX song1.wav crossfade.wav song2.wav mix.wav
echo -e "Removing temporary files...\n"
rm fadeout1.wav fadeout2.wav fadein1.wav fadein2.wav crossfade.wav song1.wav song2.wav
mins=`echo "$trim_length / 60" | bc`
secs=`echo "$trim_length % 60" | bc`
echo "The crossfade in mix.wav occurs at around $mins mins $secs secs"
| true |
9dd91d77f99ba2fc0d981cde8eb0b4faaaee9124 | Shell | diggerdu/Puccini | /ori/addnoise.sh | UTF-8 | 393 | 2.9375 | 3 | [] | no_license | #!/bin/sh
rm -rf treated_rec
mkdir treated_rec
for file in `ls ~/keyword_spotting/positive/treated_noise`
do
for ((i=10;i<=20;i++));
do
rm output.list
touch output.list
for speech in `ls |grep raw`
do
echo treated_rec/${file}_${i}_${speech} >> output.list
done
./addnoise -i input.list -o output.list -n ~/keyword_spotting/positive/treated_noise/${file} -s $i
done
done
| true |
d4bee98d9db657777c2296eb3ee3fbf390873e68 | Shell | AdvaniaDC/hpcflow-snapshot | /hpcflow-snapshot | UTF-8 | 12,030 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/bash
### Basic Configuration
ADC_SNAPSHOT_ROOT=/tmp/adv-snapshots
mkdir -p $ADC_SNAPSHOT_ROOT
ADC_SNAPSHOT_TARS=$ADC_SNAPSHOT_ROOT/tars
ADC_IMAGE_RAW=$ADC_SNAPSHOT_ROOT/image.raw
ADC_IMAGE_QCOW2=$ADC_SNAPSHOT_ROOT/image.qcow2
ADC_IMAGE_MOUNT=$ADC_SNAPSHOT_ROOT/mnt
ADC_OUTPUT=$ADC_SNAPSHOT_ROOT/run.out
ADC_ERROR=$ADC_SNAPSHOT_ROOT/run.err
# Colors for terminal output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[1;34m'
NC='\033[0m'
# Print error message and exit
error () {
printf "\n${RED} ***${NC} ERROR: $1, exiting!\n\n"
exit 1
}
# Print a section title
print_title () {
let ADC_SPAD=${#1}+2
printf "\n${BLUE}%.*s\n" $ADC_SPAD "----------------------------------------------------------------------"
printf "${BLUE} %s\n" "$1"
printf "%s\n" "$1" >> $ADC_OUTPUT
printf "%.*s${NC}\n" $ADC_SPAD "----------------------------------------------------------------------"
printf "%.*s\n" $ADC_SPAD "----------------------------------------------------------------------" >> $ADC_OUTPUT
}
# Print a padded message where a result (OK|FAIL) will be printed on the end
print_message () {
let ADC_SPAD=70-${#1}
printf " %s %.*s " "$1" $ADC_SPAD "----------------------------------------------------------------------"
printf " %s %.*s " "$1" $ADC_SPAD "----------------------------------------------------------------------" >> $ADC_OUTPUT
}
# Print colored resultes at the end of a message (OK or FAIL)
print_result () {
if [ $1 -eq 0 ]
then
printf "%s${GREEN}OK${NC}\n"
printf "%sOK\n" >> $ADC_OUTPUT
elif [ $1 -ne 0 ] && [ -z "$2" ]
then
printf "%s${RED}FAIL${NC}\n"
printf "%sFAIL\n" >> $ADC_OUTPUT
cat $ADC_ERROR
exit 1
else
printf "${RED}%s${NC}\n" $2
printf "${2}\n" >> $ADC_OUTPUT
fi
}
cleanup () {
print_title "Running cleanup tasks"
print_message "Umounting partitions"
umount $ADC_IMAGE_MOUNT/* >> $ADC_OUTPUT 2>&1
print_result 0
print_message "Removing devmaps"
kpartx -d $ADC_IMAGE_RAW >> $ADC_OUTPUT 2>&1
print_result 0
}
trap cleanup EXIT
# Request user input to setup environment variables needed to connect to the glance image store
set_os_environ () {
printf " Please enter your HPCFlow project name: "
read -r OS_PROJECT_INPUT
export OS_PROJECT_NAME=$OS_PROJECT_INPUT
printf " Please enter your HPCFlow username: "
read -r OS_USERNAME_INPUT
export OS_USERNAME=$OS_USERNAME_INPUT
printf " Please enter your HPCFlow password: "
read -sr OS_PASSWORD_INPUT
export OS_PASSWORD=$OS_PASSWORD_INPUT
export OS_AUTH_URL=https://identity.hpc.is/v3
export OS_USER_DOMAIN_NAME="Default"
export OS_PROJECT_DOMAIN_ID="default"
export OS_REGION_NAME="is-1"
export OS_INTERFACE=public
export OS_IDENTITY_API_VERSION=3
unset OS_TENANT_ID
unset OS_TENANT_NAME
echo ""
if [ -z "$OS_USERNAME" ] || [ -z "$OS_PASSWORD" ] || [ -z "$OS_AUTH_URL" ] || [ -z "$OS_PROJECT_NAME" ]
then
printf "\n${RED} ***${NC} ERROR: HPCFlow environment not set properly, exiting!\n"
exit 1
fi
}
if [[ $EUID -ne 0 ]]; then
error "This script must be run as root"
exit 1
fi
print_title "Checking for updates to the hpcflow-snapshot script"
ADC_SCRIPT=$0
ADC_NEWSCRIPT=$ADC_SNAPSHOT_ROOT/newscript
print_message "Checking if latest version"
curl -s -H 'Cache-Control: no-cache' -o $ADC_NEWSCRIPT https://raw.githubusercontent.com/AdvaniaDC/hpcflow-snapshot/master/hpcflow-snapshot
ADC_CURLSTAT=$?
diff -q "$ADC_SCRIPT" "$ADC_NEWSCRIPT" &>/dev/null
ADC_SCRIPT_DIFF=$?
print_result $ADC_SCRIPT_DIFF "NO"
if [ $ADC_SCRIPT_DIFF -eq 1 ] && [ $ADC_CURLSTAT -eq 0 ];
then
read -ep ' Update now? [y/n] '
if [[ $REPLY =~ ^[Yy]$ ]];
then
mv --backup=t "$ADC_SCRIPT" "${ADC_SCRIPT}_old"
mv "$ADC_NEWSCRIPT" "$ADC_SCRIPT"
chmod +x "$ADC_SCRIPT"
exec "$ADC_SCRIPT" "$@"
exit 0
fi
fi
print_title "Setting HPCFlow environment"
# Make sure the the HPCFlow API variables are set, request them if they are not
if [ -z "$OS_USERNAME" ] || [ -z "$OS_PASSWORD" ] || [ -z "$OS_AUTH_URL" ] || [ -z "$OS_PROJECT_NAME" ]
then
set_os_environ
fi
printf " Enter name to give the image: "
read -r ADC_IMAGE_NAME_INPUT
export ADC_IMAGE_NAME=$ADC_IMAGE_NAME_INPUT
print_message "Adding OpenStack repo for glance"
yum install -q -y centos-release-openstack-queens >> $ADC_OUTPUT 2>$ADC_ERROR
print_result $?
print_message "Installing required packages"
yum install -q -y kpartx parted python2-glanceclient >> $ADC_OUTPUT 2>$ADC_ERROR >> $ADC_OUTPUT 2>$ADC_ERROR
print_result $?
print_message "Testing connection to glance"
glance image-list >/dev/null 2>$ADC_ERROR
print_result $?
print_message "Creating temporary directory to backup into"
mkdir -p $ADC_SNAPSHOT_TARS
print_result $?
print_title "Backing up existing partitions with labels: boot, EFI and primary"
for part in boot EFI primary
do
DEVPATH=$(blkid /dev/sda*|grep "$part"|awk '{print $1}'|sed 's/://')
if [ "$DEVPATH" == "" ]
then
if [ $part == "EFI" ]
then
DEVPATH=/dev/sda1
fi
if [ $part == "boot" ]
then
DEVPATH=/dev/sda2
fi
if [ $part == "primary" ]
then
DEVPATH=/dev/sda3
fi
fi
PARTPATH=$(mount | grep $DEVPATH | awk '{print $3}')
declare ${part}_UUID=$(blkid /dev/sda* | grep $DEVPATH | grep -o -P " UUID=\".*?\" " | cut -d= -f2)
if [ "$PARTPATH" != "" ]
then
print_message "Backing up $PARTPATH "
tar cf $ADC_SNAPSHOT_TARS/$part.tar -C $PARTPATH --preserve-permissions --preserve-order --sparse --selinux --acls --xattrs --numeric-owner --one-file-system \
--exclude=$ADC_SNAPSHOT_TARS/* \
--exclude=./tmp/* \
--exclude=./sys/* \
--exclude=./run/* \
--exclude=./proc/* \
--exclude=./var/tmp/* \
--exclude=./root/.bash_history \
--exclude=./root/.glanceclient \
--exclude=./home/centos/.bash_history \
--exclude=./var/lib/cloud \
--exclude=./var/cache/* \
--exclude=./var/log/*.log* \
--exclude=./var/log/anaconda/* \
--exclude=./var/log/audit/* \
--exclude=./var/log/cron* \
--exclude=./var/log/dmesg* \
--exclude=./var/log/messages* \
--exclude=./var/log/secure* \
--exclude=./var/log/lastlog* \
--exclude=./var/log/wtmp* \
--exclude=./var/log/maillog* \
--exclude=./var/lib/yum/uuid \
. >> $ADC_OUTPUT 2>&1
print_result $?
else
printf "${RED}***${NC} ERROR: partition label '$part' not found, exiting\n"
exit 1
fi
done
#EFI_UUID=$(blkid /dev/sda* | grep 'LABEL="EFI"' | grep -o -P " UUID=\".*?\" " | cut -d= -f2 | sed 's/["-]//g')
#boot_UUID=$(blkid /dev/sda* | grep 'LABEL="boot"' | grep -o -P " UUID=\".*?\" " | cut -d= -f2 | sed 's/["]//g')
#primary_UUID=$(blkid /dev/sda* | grep 'LABEL="primary"' | grep -o -P " UUID=\".*?\" " | cut -d= -f2 | sed 's/["]//g')
EFI_UUID=$(echo $EFI_UUID | sed 's/["-]//g')
boot_UUID=$(echo $boot_UUID | sed 's/["]//g')
primary_UUID=$(echo $primary_UUID | sed 's/["]//g')
print_title "Creating raw disk image"
# Calculate how big the image needs to be
IMAGE_SIZE=$(du -sB1 /tmp/adv-snapshots/tars/ | awk '{print $1}')
let "IMAGE_SIZE=$IMAGE_SIZE+1346434048"
# Remove any existing disk image
rm -f $ADC_IMAGE_RAW >> $ADC_OUTPUT 2>$ADC_ERROR
print_message "Creating raw disk "
fallocate -l $IMAGE_SIZE $ADC_IMAGE_RAW
print_result $?
print_message " Creating GPT partition table"
parted $ADC_IMAGE_RAW --script -- mklabel gpt
print_result $?
print_message " Adding EFI partition"
parted $ADC_IMAGE_RAW --script -- mkpart primary fat16 0% 211MB
print_result $?
print_message " Adding boot partition"
parted $ADC_IMAGE_RAW --script -- mkpart primary ext4 211MB 1285MB
print_result $?
print_message " Adding primary partition"
parted $ADC_IMAGE_RAW --script -- mkpart primary xfs 1285MB 100%
print_result $?
print_message " Marking EFI partition as bootable"
parted $ADC_IMAGE_RAW --script -- set 1 boot on
print_result $?
print_message " Setting EFI partition name"
parted $ADC_IMAGE_RAW --script -- name 1 EFI
print_result $?
print_message " Setting boot partition name"
parted $ADC_IMAGE_RAW --script -- name 2 boot
print_result $?
print_message " Setting primary partition name"
parted $ADC_IMAGE_RAW --script -- name 3 primary
print_result $?
print_title "Restore data into raw disk image"
mkdir -p $ADC_IMAGE_MOUNT
echo " Mapping partitions"
for KPART_MAP in $(kpartx -av $ADC_IMAGE_RAW | awk '{print $3}')
do
DEVMAP=/dev/mapper/$KPART_MAP
#echo $KPART_MAP
if [[ "$KPART_MAP" == *"p1" ]]
then
printf " EFI:\n"
print_message " Creating EFI filesystem"
mkfs.vfat -n EFI -i $EFI_UUID $DEVMAP >> $ADC_OUTPUT
print_result $?
print_message " Mounting EFI filesystem"
mkdir -p $ADC_IMAGE_MOUNT/p1
mount $DEVMAP $ADC_IMAGE_MOUNT/p1
print_result $?
print_message " Restoring tar backup of EFI partition"
tar xf $ADC_SNAPSHOT_TARS/EFI.tar -C $ADC_IMAGE_MOUNT/p1 --warning=no-timestamp
print_result $?
print_message " Unmounting EFI filesystem"
umount $ADC_IMAGE_MOUNT/p1
print_result $?
print_message " Removing EFI tar file"
rm -f $ADC_SNAPSHOT_TARS/EFI.tar
print_result $?
elif [[ "$KPART_MAP" == *"p2" ]]
then
printf " boot:\n"
print_message " Creating boot filesystem"
mkfs -t ext4 -U $boot_UUID -L boot -q $DEVMAP >> $ADC_OUTPUT
print_result $?
print_message " Mounting boot filesystem"
mkdir -p $ADC_IMAGE_MOUNT/p2
mount $DEVMAP $ADC_IMAGE_MOUNT/p2
print_result $?
print_message " Restoring tar backup of boot partition"
tar xf $ADC_SNAPSHOT_TARS/boot.tar -C $ADC_IMAGE_MOUNT/p2 --warning=no-timestamp
print_result $?
print_message " Unmounting boot filesystem"
umount $ADC_IMAGE_MOUNT/p2
print_result $?
print_message " Removing boot tar file"
rm -f $ADC_SNAPSHOT_TARS/boot.tar
print_result $?
else
printf " primary:\n"
print_message " Creating primary filesystem"
mkfs -t xfs -m uuid=$primary_UUID -L primary $DEVMAP >> $ADC_OUTPUT
print_result $?
print_message " Mounting primary filesystem"
mkdir -p $ADC_IMAGE_MOUNT/p3
mount -o nouuid $DEVMAP $ADC_IMAGE_MOUNT/p3
print_result $?
print_message " Restoring tar backup of primary partition"
tar xf $ADC_SNAPSHOT_TARS/primary.tar -C $ADC_IMAGE_MOUNT/p3 --warning=no-timestamp
print_result $?
print_message " Clearing 70-persistent-net.rules"
> $ADC_IMAGE_MOUNT/p3/etc/udev/rules.d/70-persistent-net.rules
print_result $?
print_message " Clearing 75-persistent-net-generator.rules"
> $ADC_IMAGE_MOUNT/p3/etc/udev/rules.d/75-persistent-net-generator.rules
print_result $?
print_message " Clearing MAC address from eno49"
sed -i '/HWADDR=.*/d' $ADC_IMAGE_MOUNT/p3/etc/sysconfig/network-scripts/ifcfg-eno49
print_result $?
print_message " Clearing MAC address from eno50"
sed -i '/HWADDR=.*/d' $ADC_IMAGE_MOUNT/p3/etc/sysconfig/network-scripts/ifcfg-eno50
print_result $?
print_message " Removing existing SSH host keys"
rm -f $ADC_IMAGE_MOUNT/p3/etc/ssh/ssh_host_*
print_result $?
print_message " Unmounting primary filesystem"
umount $ADC_IMAGE_MOUNT/p3
print_result $?
print_message " Removing primary tar file"
rm -f $ADC_SNAPSHOT_TARS/primary.tar
print_result $?
fi
done
print_message "Unmapping partitions"
kpartx -d $ADC_IMAGE_RAW >> $ADC_OUTPUT 2>&1
print_result $?
print_title "Converting raw disk image and uploading"
print_message "Creating compressed QCOW2 image"
qemu-img convert -f raw -O qcow2 -c $ADC_IMAGE_RAW $ADC_IMAGE_QCOW2
print_result $?
print_message "Removing raw disk image"
rm -f $ADC_IMAGE_RAW
print_result $?
print_message "Uploading image to HPCFlow"
glance --os-image-api-version 1 image-create --store swift --name $ADC_IMAGE_NAME --disk-format qcow2 --file $ADC_IMAGE_QCOW2 --container-format bare --property "hw-firmware-type=uefi" 2>$ADC_ERROR | tee -a $ADC_OUTPUT > ${ADC_OUTPUT}_upload
print_result $?
print_message "Removing raw image file"
rm -f $ADC_IMAGE_RAW
print_result $?
print_message "Removing qcow2 image file"
rm -f $ADC_IMAGE_QCOW2
print_result $?
print_title "Glance image report"
cat ${ADC_OUTPUT}_upload
printf "\n Done\n\n"
| true |
06736f88cc21b4f8d5fe26d38ef4c11c0fe11280 | Shell | kohana-archive/kohana-ci | /violations.sh | UTF-8 | 411 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# Prepare the environment with some common variables
BASEDIR=$(dirname $0)
. $BASEDIR/environment
# Cleanup after previous builds
rm -rf build || true
mkdir build
# What paths do we want to check?
PHPCS_PATHS=${PHPCS_PATHS:-**/classes **/**/classes}
# Lets go..
pushd $SOURCEDIR
phpcs --standard=Kohana -s --extensions=php --report=checkstyle $PHPCS_PATHS > ../build/phpcs.xml
PHPCSEXIT=$?
popd
| true |
a61219131645eceb3c6c1c293963d563393729c1 | Shell | abhijeetka/rancher-app-laravel | /bin/run6.1.9.sh | UTF-8 | 8,897 | 3.359375 | 3 | [] | no_license | #!/bin/bash
set -e
[ "$DEBUG" == "1" ] && set -x && set +e
# Required variables
sleep 5
export GLUSTER_HOSTS=`dig +short ${GLUSTER_HOST}`
if [ -z "${GLUSTER_HOSTS}" ]; then
echo "*** ERROR: Could not determine which containers are part of Gluster service."
echo "*** Is Gluster service linked with the alias \"${GLUSTER_HOST}\"?"
echo "*** If not, please link gluster service as \"${GLUSTER_HOST}\""
echo "*** Exiting ..."
exit 1
fi
export DB_HOSTS=`dig +short ${DB_HOST}`
if [ -z "${DB_HOSTS}" ]; then
echo "*** ERROR: Could not determine which containers are part of PXC service."
echo "*** Is PXC service linked with the alias \"${DB_HOST}\"?"
echo "*** If not, please link gluster service as \"${DB_HOST}\""
echo "*** Exiting ..."
exit 1
fi
if [ "${DB_PASSWORD}" == "**ChangeMe**" -o -z "${DB_PASSWORD}" ]; then
DB_PASSWORD=${DB_ENV_PXC_ROOT_PASSWORD}
if [ "${DB_PASSWORD}" == "**ChangeMe**" -o -z "${DB_PASSWORD}" ]; then
echo "ERROR: Could not retreive PXC_ROOT_PASSWORD from PXC service - DB_ENV_PXC_ROOT_PASSWORD env var is empty - Exiting..."
exit 0
fi
fi
if [ "${WP_DB_NAME}" == "**ChangeMe**" -o -z "${WP_DB_NAME}" ]; then
WP_DB_NAME=`echo "${WORDPRESS_NAME}" | sed "s/\./_/g"`
fi
if [ "${HTTP_DOCUMENTROOT}" == "**ChangeMe**" -o -z "${HTTP_DOCUMENTROOT}" ]; then
HTTP_DOCUMENTROOT=${GLUSTER_VOL_PATH}/${WORDPRESS_NAME}
fi
### Prepare configuration
# nginx config
perl -p -i -e "s/HTTP_PORT/${HTTP_PORT}/g" /etc/nginx/sites-enabled/wordpress
HTTP_ESCAPED_DOCROOT=`echo ${HTTP_DOCUMENTROOT} | sed "s/\//\\\\\\\\\//g"`
perl -p -i -e "s/HTTP_DOCUMENTROOT/${HTTP_ESCAPED_DOCROOT}/g" /etc/nginx/sites-enabled/wordpress
# php-fpm config
PHP_ESCAPED_SESSION_PATH=`echo ${PHP_SESSION_PATH} | sed "s/\//\\\\\\\\\//g"`
perl -p -i -e "s/;?session.save_path\s*=.*/session.save_path = \"${PHP_ESCAPED_SESSION_PATH}\"/g" /etc/php5/fpm/php.ini
ALIVE=0
for glusterHost in ${GLUSTER_HOSTS}; do
echo "=> Checking if I can reach GlusterFS node ${glusterHost} ..."
if ping -c 10 ${glusterHost} >/dev/null 2>&1; then
echo "=> GlusterFS node ${glusterHost} is alive"
ALIVE=1
break
else
echo "*** Could not reach server ${glusterHost} ..."
fi
done
if [ "$ALIVE" == 0 ]; then
echo "ERROR: could not contact any GlusterFS node from this list: ${GLUSTER_HOSTS} - Exiting..."
exit 1
fi
if [ ! -d /opt/alm_task_manager ]; then
cd /opt
git clone http://ashishka:password@172.27.56.81:7990/scm/atm/alm_task_manager.git
echo "*******checking out master branch*******"
cd /opt/alm_task_manager
git checkout -b master origin/master
cd ..
cd ..
fi
if [ -d /opt/alm_task_manager/app/storage ]; then
chmod -R 777 /opt/alm_task_manager/app/storage
fi
if [ -e /opt/alm_task_manager/composer.json ]; then
echo "****running composer update******* "
cd /opt/alm_task_manager
#ssh -o StrictHostKeyChecking=no abhijeetka@github.com #7.5.1,
composer config http-basic.example.org abhijeetka abhijeet123#
#composer config -g github-oauth.github.com <oauthtoken>
#composer update --no-dev ##7.5, 7.5.2
#composer dump-autoload -o -a ##7.3.1, 7.6.1
composer update --no-dev
cd ..
cd ..
fi
#chown -R root:root . ## 7.4.2
#here document root is checked but we dont have to.or not even have to create a structure.
#if [ ! -d ${HTTP_DOCUMENTROOT} ]; then
# mkdir -p ${HTTP_DOCUMENTROOT}
#fi
if [ ! -d ${PHP_SESSION_PATH} ]; then
mkdir -p ${PHP_SESSION_PATH}
chown www-data:www-data ${PHP_SESSION_PATH}
# chown -R www-data:www-data ${HTTP_DOCUMENTROOT}
fi
#checking if index.php already exist, but we dont have to check and download wordpress.. we have to clone our repository and run composer update and db migrate and that's it.
# composer update
# php artisan migrate with non interactive mode --no-interaction OR -n
# also we have to run chown chown www-data:www-data ${HTTP_DOCUMENTROOT}/wp-config.php chmod 640 ${HTTP_DOCUMENTROOT}/wp-config.php
#if [ ! -e ${HTTP_DOCUMENTROOT}/index.php ]; then
# echo "=> Installing wordpress in ${HTTP_DOCUMENTROOT} - this may take a while ..."
# touch ${HTTP_DOCUMENTROOT}/index.php
# curl -o /tmp/wordpress.tar.gz "https://wordpress.org/wordpress-${WORDPRESS_VERSION}.tar.gz"
# tar -zxf /tmp/wordpress.tar.gz -C /tmp/
# mv /tmp/wordpress/* ${HTTP_DOCUMENTROOT}/
# chown -R www-data:www-data ${HTTP_DOCUMENTROOT}
#fi
if grep "PXC nodes here" /etc/haproxy/haproxy.cfg >/dev/null; then
PXC_HOSTS_HAPROXY=""
PXC_HOSTS_COUNTER=0
for host in `echo ${DB_HOSTS} | sed "s/,/ /g"`; do
PXC_HOSTS_HAPROXY="$PXC_HOSTS_HAPROXY\n server pxc$PXC_HOSTS_COUNTER $host check port 9200 rise 2 fall 3"
if [ $PXC_HOSTS_COUNTER -gt 0 ]; then
PXC_HOSTS_HAPROXY="$PXC_HOSTS_HAPROXY backup"
fi
PXC_HOSTS_COUNTER=$((PXC_HOSTS_COUNTER+1))
done
perl -p -i -e "s/DB_PASSWORD/${DB_PASSWORD}/g" /etc/haproxy/haproxy.cfg
perl -p -i -e "s/.*server pxc.*//g" /etc/haproxy/haproxy.cfg
perl -p -i -e "s/# PXC nodes here.*/# PXC nodes here\n${PXC_HOSTS_HAPROXY}/g" /etc/haproxy/haproxy.cfg
fi
# we can comment this section as not required in our setup
#if [ ! -e ${HTTP_DOCUMENTROOT}/wp-config.php ] && [ -e ${HTTP_DOCUMENTROOT}/wp-config-sample.php ] ; then
# echo "=> Configuring wordpress..."
# touch ${HTTP_DOCUMENTROOT}/wp-config.php
# WP_DB_PASSWORD=`pwgen -s 20 1`
# sed -e "s/database_name_here/$WP_DB_NAME/
# s/username_here/$WP_DB_NAME/
# s/password_here/$WP_DB_PASSWORD/
# s/localhost/127.0.0.1/
# /'AUTH_KEY'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'SECURE_AUTH_KEY'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'LOGGED_IN_KEY'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'NONCE_KEY'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'AUTH_SALT'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'SECURE_AUTH_SALT'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'LOGGED_IN_SALT'/s/put your unique phrase here/`pwgen -c -n -1 65`/
# /'NONCE_SALT'/s/put your unique phrase here/`pwgen -c -n -1 65`/" ${HTTP_DOCUMENTROOT}/wp-config-sample.php > ${HTTP_DOCUMENTROOT}/wp-config.php
# chown www-data:www-data ${HTTP_DOCUMENTROOT}/wp-config.php
# chmod 640 ${HTTP_DOCUMENTROOT}/wp-config.php
# Download nginx helper plugin
# curl -O `curl -i -s https://wordpress.org/plugins/nginx-helper/ | egrep -o "https://downloads.wordpress.org/plugin/[^']+"`
# unzip -o nginx-helper.*.zip -d ${HTTP_DOCUMENTROOT}/wp-content/plugins
# chown -R www-data:www-data ${HTTP_DOCUMENTROOT}/wp-content/plugins/nginx-helper
# Activate nginx plugin and set up pretty permalink structure once logged in
# cat << ENDL >> ${HTTP_DOCUMENTROOT}/wp-config.php
#\$plugins = get_option( 'active_plugins' );
#if ( count( \$plugins ) === 0 ) {
# require_once(ABSPATH .'/wp-admin/includes/plugin.php');
# \$wp_rewrite->set_permalink_structure( '/%postname%/' );
# \$pluginsToActivate = array( 'nginx-helper/nginx-helper.php' );
# foreach ( \$pluginsToActivate as \$plugin ) {
# if ( !in_array( \$plugin, \$plugins ) ) {
# activate_plugin( '${HTTP_DOCUMENTROOT}/wp-content/plugins/' . \$plugin );
# }
# }
#}
#ENDL
## we can comment out till here for our container.
echo "=> Creating database ${WP_DB_NAME}, username root, with password ${WP_DB_PASSWORD} ..."
service haproxy start
sleep 2
mysql -h 127.0.0.1 -u root -p${DB_PASSWORD} -e "CREATE DATABASE IF NOT EXISTS ${WP_DB_NAME}; GRANT ALL PRIVILEGES ON ${WP_DB_NAME}.* TO '${WP_DB_NAME}'@'10.42.%' IDENTIFIED BY '${WP_DB_PASSWORD}'; FLUSH PRIVILEGES;"
service haproxy stop
if [ -d /opt/alm_task_manager/vendor ]; then
cd /opt/alm_task_manager
echo "*******migrating database*******"
service haproxy start
php artisan -n migrate --force
service haproxy stop
cd ..
cd ..
fi
echo "=> Mounting GlusterFS volume ${GLUSTER_VOL} from GlusterFS node ${glusterHost} ..."
mount -t glusterfs ${glusterHost}:/${GLUSTER_VOL} ${GLUSTER_VOL_PATH}
if [ ! -d /var/www/alm_task_manager/ ]; then
echo "*******copying alm-task-manager*******"
cp -rf /opt/alm_task_manager /var/www/
fi
if [ -d /var/www/alm_task_manager/app/storage ]; then
rm -rf /var/www/alm_task_manager/app/storage/.gitignore
rm -rf /var/www/alm_task_manager/app/storage/cache/.gitignore
rm -rf /var/www/alm_task_manager/app/storage/logs/.gitignore
rm -rf /var/www/alm_task_manager/app/storage/meta/.gitignore
rm -rf /var/www/alm_task_manager/app/storage/sessions/.gitignore
rm -rf /var/www/alm_task_manager/app/storage/views/.gitignore
chmod -R 777 /var/www/alm_task_manager/app/storage
fi
if [ ! -e ${HTTP_DOCUMENTROOT}/healthcheck.txt ]; then
echo "OK" > ${HTTP_DOCUMENTROOT}/healthcheck.txt
fi
/usr/bin/supervisord
| true |
fa1e904905859bb6f1fd68d22231a4bc97a9f2c3 | Shell | akshayssalunkhe/FlipCoinSimulation | /FlipCoinSimulation.sh | UTF-8 | 1,150 | 3.96875 | 4 | [] | no_license | #!/bin/bash -x
#DISPLAYING WELCOME MESSAGE
echo "Welcome To Flip Coin Simulation"
#DECLEARING DICTIONARY
declare -A flipResult
#VARIABLES
flipTimes=0;
side=0;
index=0;
numberOfCoins=0;
#TAKING USER INPUT OF NUMBER OF TIMES
read -p "Enter The Number Of Times You Want To Flip A Coin " flipTimes
read -p "Enter The Number of Coins You Want To Flip " numberOfCoins
#GENERATING VALUE AND STORING IN DICTIONARY
function flipCoin() {
if [[ $flipTimes -gt 0 && $numberOfCoins -gt 0 ]]
then
for (( i=1; i<=flipTimes; i++ ))
do
side=""
for (( index=1; index<=$numberOfCoins; index++ ))
do
if [ $((RANDOM%2)) -eq 0 ]
then
side+=H
else
side+=T
fi
done
flipResult[$side]=$((${flipResult[$side]}+1))
done
else
echo "Enter Value Greater Than Zero"
fi
}
#CALLING FUNCTION
flipCoin
#USING LOOP TO CALCULATE PERCENTAGE
for i in ${!flipResult[@]}
do
flipResult[$i]=`echo "scale=2; ${flipResult[$i]}*100/$flipTimes" | bc`
done
#SORTING THE DICTIONARY AND SHOWING WINNING COMBINATION
echo " Winning Combination Is : "
for i in ${!flipResult[@]}
do
echo "$i ${flipResult[$i]}"
done | sort -k2 -rn | head -1
| true |
f3281b190113baf85fa4ca00ae7bea0400c95bb4 | Shell | rvsubbu/todos | /now.sh | UTF-8 | 362 | 2.765625 | 3 | [] | no_license | rm -f dts firstddnums next_items.html len
grep -n "<dt" tasks.html | cut -d: -f1 > dts
touch firstddnums next_items.html
grep -n "<\/dl" tasks.html | cut -d: -f1 > len
l=$((1+`cat len`))
for n in `cat dts`
do
echo $(($l-$n)) >> firstddnums
done
for n in `cat firstddnums`
do
tail -n $n tasks.html | head -1 >> next_items.html
done
rm -f firstddnums dts len
| true |
7ac04d70dc2b79081b82d906b36c857357c88e5e | Shell | mrajner/grat | /src/test/tests/bugs.sh | UTF-8 | 1,985 | 2.6875 | 3 | [] | no_license | #!/bin/bash -
#===============================================================================
# FILE: bugs.sh
# AUTHOR: mrajner (mrajner@gik.pw.edu.pl)
# CREATED: 28.08.2014 09:54
#===============================================================================
set -o nounset
counter=0
base=${0/.sh}
touch ${base}.dat
# value_check -F ../data/shum.2012.nc@VSH:shum -Ja -o:level -H -Sj,l
# exit
# set NaN if value cannot be found (previousle 0)
# FIXED 14eea59d5338d987901cc44bdbd10fc8af6c792d
{
value_check -v -H
value_check -FNCEP@SP -D1777 -Sj -H
value_check -FNCEP@SP,NCEP -D2100,2014 -Sj -H -wn
} &>${base}.dat${counter}
let counter++
# 2014.09.16
# segmentation fault
# FIXED 5a582b0e0148df1b22f8f74d0ddd7d581bf53374
{
grat -D2000
value_check -D1950
} &>${base}.dat${counter}
let counter++
# 2014.09.12
# unnecessary header output when -BI,N but no @GE
# FIXED 59a6cb55654b458fcfc048253723ede06f0970a1
grat -F10@SP -M2 -G@GN -BI,N -Sj -H>${base}.dat${counter}
let counter++
# 2014.09.04
# problem when -D before -S
{
value_check -Sj -D2009 -F10
} &>${base}.dat${counter}
let counter++
# 2014.09.02
# second @LS (after @GP)
value_check \
-F ../data/test_data.nc:sp, :t,@LS:ls,@GP:gp,@LS:ls\
-S j -D201201 : m \
-o :level -J1000,10 -H 2>/dev/null > ${base}.dat${counter}
let counter++
# 2014.09.02
# should ignore not_starting_with_dash
# but treat all after it as one parameter
# i.e. not -S given error
# FIXED 40927a5342cb05872bd9e063ddd9ed3edb235499
{
value_check -starting_with_dash -Sj -F10
value_check not_starting_with_dash -Sj -F10
grat -starting_with_dash -Sj -F10@SP
grat not_starting_with_dash -Sj -F10@SP
} &>${base}.dat${counter}
let counter++
# 2014.09.02
# FIXED 329259ae88ccc8c5b9cb241bf5d43c9a14920308
value_check -F 10@SP -Sj -D 2010@~ &> ${base}.dat${counter}
let counter++
# 2015.05.07
# fixed with 5795c272829b2a7de1a2b1474cb08afca7d8f360
value_check -D 2010 :2011 : 5@M -- 2>&1 ${base}.dat${counter}
let counter++
| true |
e8f47cbe63709e2facd2f92918606b316a3e6e28 | Shell | apana/MyChi | /ChiAnalysis/bin/SubmitData/submitData.sh | UTF-8 | 1,204 | 3.84375 | 4 | [] | no_license | #!/bin/sh
QUEUE=1nd
SUB_SCRIPT=toBatchCHI.sh
#####################################################################################
function Submit()
{
ARGS=2
if [ $# -lt "$ARGS" ]
# Test number of arguments to script (always a good idea).
then
echo "Usage: `basename $0` <cfgfile> <logfile> "
exit $E_BADARGS
fi
cfgfile=$1
logfile=$2
jobnum=$3
echo
echo "************************************************"
echo "Submitting job to the CERN $QUEUE batch queue"
echo "************************************************"
echo
echo "CFG: " $cfgfile
echo "LOG: " $logfile
echo
echo bsub -q ${QUEUE} -oo ${logfile} ${SUB_SCRIPT} ${cfgfile} ${jobnum}
bsub -q ${QUEUE} -oo ${logfile} ${SUB_SCRIPT} ${cfgfile} ${jobnum}
}
######################################################################################
theDate=`date '+%Y%m%d'`
logDir=logs_${theDate}
if [[ ! -e $logDir ]]; then
mkdir $logDir
fi
cp ../ChiNtuple.cc ../ChiNtuple.h $logDir
for a in {0..116}
## for a in 0
do
config=chiNtuple_cfg.py
logfile=$logDir/chiCFG_data_${a}.log
echo $config $logfile $a
Submit $config $logfile ${a}
let COUNTER=COUNTER+1
done
echo
echo "==> Submitted ${COUNTER} jobs <=="
echo
| true |
c56ec402361fe27d1b49156fb8f73194a40bdd64 | Shell | physcip/iocage-plugin-spectrum-protect | /post_install.sh | UTF-8 | 2,096 | 2.796875 | 3 | [] | no_license | #!/bin/sh
# Create mountpoints
mkdir /compat/linux/etc/adsm
mkdir /compat/linux/etc/tsm
mkdir /compat/linux/proc/self
touch /compat/linux/proc/self/mounts
ln -s /compat/linux/etc/mtab /compat/linux/etc/fstab
# Download IBM Spectrum Protect (TSM) client 8.1.17
mkdir /compat/linux/tmp
cd /compat/linux/tmp
curl https://public.dhe.ibm.com/storage/tivoli-storage-management/maintenance/client/v8r1/Linux/LinuxX86/BA/v8117/8.1.17.0-TIV-TSMBAC-LinuxX86.tar --output 8.1.17.0-TIV-TSMBAC-LinuxX86.tar
curl https://public.dhe.ibm.com/storage/tivoli-storage-management/maintenance/client/v8r1/Linux/LinuxX86/BA/v8117/8.1.17.0-TIV-TSMBAC-LinuxX86.tar.sha256sum.txt --output 8.1.17.0-TIV-TSMBAC-LinuxX86.tar.sha256sum.txt
cat 8.1.17.0-TIV-TSMBAC-LinuxX86.tar.sha256sum.txt | shasum -a 256 -c
tar xvf 8*-TIV-TSMBAC-LinuxX86.tar
# Install
cd /compat/linux
rpm2cpio < /compat/linux/tmp/gskcrypt64-8.0.55.29.linux.x86_64.rpm | cpio -id --quiet
rpm2cpio < /compat/linux/tmp/gskssl64-8.0.55.29.linux.x86_64.rpm | cpio -id --quiet
rpm2cpio < /compat/linux/tmp/TIVsm-API64.x86_64.rpm | cpio -id --quiet
rpm2cpio < /compat/linux/tmp/TIVsm-BA.x86_64.rpm | cpio -id --quiet
# Link missing libraries
ln -s /compat/linux/opt/tivoli/tsm/client/api/bin64/libgpfs.so /compat/linux/usr/lib64/libgpfs.so
ln -s /compat/linux/opt/tivoli/tsm/client/api/bin64/libdmapi.so /compat/linux/usr/lib64/libdmapi.so
ln -s /compat/linux/usr/local/ibm/gsk8_64/lib64/libgsk8ssl_64.so /compat/linux/usr/lib64/libgsk8ssl_64.so
ln -s /compat/linux/usr/local/ibm/gsk8_64/lib64/libgsk8iccs_64.so /compat/linux/usr/lib64/libgsk8iccs_64.so
ln -s /compat/linux/usr/local/ibm/gsk8_64/lib64/libgsk8km_64.so /compat/linux/usr/lib64/libgsk8km_64.so
ln -s /compat/linux/usr/local/ibm/gsk8_64/lib64/libgsk8cms_64.so /compat/linux/usr/lib64/libgsk8cms_64.so
# Disabling unused system processes
echo 'dsmc_enable="YES"' >> /etc/rc.conf
echo 'syslogd_enable="NO"' >> /etc/rc.conf
echo 'cron_enable="NO"' >> /etc/rc.conf
# cleanup
pkg delete -y rpm4 curl
pkg autoremove -y
pkg clean -y
rm -r /compat/linux/tmp
rm -r /usr/src
| true |
9942890e3e17e3bd34e24deca963f3c05d67db85 | Shell | ualeshenko/audit-user-client | /cron-client.sh | UTF-8 | 1,200 | 3.71875 | 4 | [] | no_license | #!/bin/bash
set_env () {
files_systemd=(
"client-audit.service"
"timer-client-audit.timer"
)
path_to_systemctl="/etc/systemd/system"
path_to_backup="/srv/audit-user-client/systemd-service/"
}
recover () {
echo "Recover services - In progress"
cp -fi ${path_to_backup}/* "${path_to_systemctl}/"
chmod 660 "${path_to_systemctl}/${files_systemd[0]}" "${path_to_systemctl}/${files_systemd[1]}"
systemctl daemon-reload && systemctl enable "${files_systemd[0]}"
systemctl start "${files_systemd[0]}"
echo "Recover services - success"
}
checker_files () {
for file_service in "${files_systemd[@]}" ; do
if [ -f ${path_to_systemctl}/${file_service} ] ; then
echo "File ${file_service} - 200 Ok"
else
recover
fi
done
}
checker_service () {
status_timer=`systemctl status "${files_systemd[1]}" &> /dev/null ; echo $?`
if [ "${status_timer}" -eq "0" ] ; then echo "Timer ${files_systemd[1]} - Ok"
else systemctl restart "${files_systemd[1]}"
fi
}
main () {
set_env
checker_files
checker_service
}
for ten_s in {0..11}; do
main ; sleep 5
done
| true |
8d8a95768a339b6e0a3a496afef293389e2d044e | Shell | gloppasglop/gitbook-s2i | /s2i/bin/run | UTF-8 | 265 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
if [ -e "/opt/app-root/etc/generate_container_user" ]; then
source /opt/app-root/etc/generate_container_user
fi
[ "$1" == "--debug" ] && exec /bin/bash
echo "Launching gitbook..."
cd _book
exec static-server --port 8080 --index index.html
| true |
2c4769a30d7e613cb5efc8eb436ca2912f8374f8 | Shell | tingtingths/docker-v2ray | /build.sh | UTF-8 | 399 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env sh
if [ -z "$1" ]; then
echo 'Missing version argument, e.g. build.sh v4.33.0'
exit 1
fi
echo "Building version ${1}..."
sleep 3
docker build --no-cache -t registry.itdog.me/v2ray:$1 --build-arg VERSION=$1 . \
&& docker tag registry.itdog.me/v2ray:$1 registry.itdog.me/v2ray:latest \
&& docker push registry.itdog.me/v2ray:$1 \
&& docker push registry.itdog.me/v2ray:latest
| true |
c6dd2895c54a666407d2fbb596eb2bddfada3a1b | Shell | elirex/terminal | /gitlog | UTF-8 | 507 | 3.3125 | 3 | [] | no_license | #!/bin/bash
HASH='%C(bold blue)%h%C(reset)'
DATE='%C(bold cyan)%aD%C(reset)'
RELATIVE_TIME='%C(bold green)(%ar)%C(reset)'
REFS='%C(bold yellow)%d%C(reset)'
COMMIT_MSG='%C(white)%s%C(reset)'
AUTHOR='%C(dim white)- %an%C(reset)'
FORMAT="${HASH} - ${DATE} ${RELATIVE_TIME}${REFS}%n ${COMMIT_MSG} ${AUTHOR}"
git_branch_log() {
git log --graph --abbrev-commit --decorate --pretty="tformat:${FORMAT}"
}
git_complete_log() {
git log --graph --abbrev-commit --decorate --all --pretty="tformat:${FORMAT}"
}
| true |
9374c16895378ca8472f13bf7fba3e38cf28002a | Shell | ubuntu-chu/study | /android/android-images.sh | UTF-8 | 2,877 | 3.734375 | 4 | [] | no_license | #!/bin/sh
image_path_prefix="/home/itl/work/A83T/"
image_path_suffix="lichee/tools/pack/"
image_dst_path="/home/mount/images"
image_test_path=$image_dst_path/history/dev_test
project_hmt=hmt
project_hmt_v3=hmt_v3
project_hevc=hevc
project_hevc_v2=hevc_v2
project_cpe=cpe
project_ist=ist
action_install="install"
action_test="test"
action_test_clr="test_clr"
action_list="list"
help(){
echo "Usage : $0 <action> <project>"
echo "Param action : $action_install|$action_list|$action_test|$action_test_clr"
echo "Param project : $project_ist|$project_hmt|$project_hmt_v3|$project_hevc|$project_hevc_v2|$project_cpe"
exit 1
}
execute_cmd()
{
echo "$@"
$@
if [ $? -ne 0 ];then
echo "execute $@ failed! please check what happened!"
exit 1
fi
}
if [ $# -ne 1 ] && [ $# -ne 2 ]; then
help
fi
case "$1" in
$action_install|$action_test|$action_test_clr)
;;
$action_list)
which tree
if [ $? -eq 0 ]; then
tree $image_dst_path
else
ll -R $image_dst_path
fi
exit
;;
*)
help
break;
esac
do_image_install()
{
project_name=$1
image_install_path=$2/$project_name/A83T/firmware
if [ ! -d $image_install_path ]; then
execute_cmd mkdir -p /$image_install_path
fi
execute_cmd cd $image_path_prefix/$project_name/$image_path_suffix/
image_list="sun8iw6p1_android_${project_name}_card0.img sun8iw6p1_android_${project_name}_uart0.img"
for image_name in $image_list; do
if [ -r ${image_name} ]; then
install_image_name=`echo ${image_name} | cut -d '.' -f 1`
install_image_name_suffiex=`echo ${image_name} | cut -d '.' -f 2`
image_date=`stat ${image_name} | grep "ๆ่ฟๆดๆน"| sed -n "s/ๆ่ฟๆดๆน๏ผ\(.*\) \(.*\) \(.*\)/\1_\2/p"|tr -d '-'|tr -d ':' | cut -d '.' -f 1`
execute_cmd cp ${image_name} $image_install_path/${install_image_name}_${image_date}.${install_image_name_suffiex}
#execute_cmd cp ${image_name} $image_install_path/${install_image_name}_`date +"%Y%m%d_%H%M%S"`.${install_image_name_suffiex}
fi
done
#find ./ -iregex ".*sun8iw6p1_android_hmt_\(card0\|uart0\)\.img$" -print0 | xargs -0 -I {} cp {} /home/mount/Images-Android/hmt/
}
do_image_clear()
{
project_name=$1
image_install_path=$2/$project_name/A83T/firmware
[ ! -d $image_install_path ] && execute_cmd mkdir -p /$image_install_path
execute_cmd rm -rf /$image_install_path/*
}
case "$2" in
$project_hmt | $project_hmt_v3 | $project_hevc | $project_hevc_v2 | $project_cpe | $project_ist)
case "$1" in
$action_install)
do_image_install $2 $image_dst_path
;;
$action_test)
do_image_install $2 $image_test_path
;;
$action_test_clr)
do_image_clear $2 $image_test_path
;;
*)
help
break;
esac
;;
*)
echo "$2 : invalid param! please check!"
help
;;
esac
| true |
198862f32b6ed28431a20190e14ba86116f67139 | Shell | cimentadaj/unix_workbench_answers | /05_bash_programming/bash_programming1.sh | UTF-8 | 282 | 3.453125 | 3 | [] | no_license | #!/usr/bin/sh
# 1. Look at the man pages for bc .
man bc
# 2. Try doing some math in bc interactively.
bc -i
2.51231 * 555.213151
quit
# 3. Try writing some equations in a file and then provide
# that file as an argument to bc.
echo "5.5 * 5.21351" > test.sh
bc test.sh
quit
| true |
8880eb99ff17d3328251f7b93dd700f20cfbf462 | Shell | lhotse-speech/lhotse | /tools/check_style.sh | UTF-8 | 347 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
IGNORE_LIST='F401,F403,F811,E131,E124,E126,E501,E741,W503'
MAX_LINE_LENGTH=120
BASE=$(git rev-parse --show-toplevel)
ALLFILES=$(git ls-tree --full-tree --name-only -r HEAD | grep -e ".*\.py\$")
for FILE in ${ALLFILES}; do
flake8 --ignore ${IGNORE_LIST} \
--max-line-length ${MAX_LINE_LENGTH} \
${BASE}/${FILE}
done
| true |
de13713ed4546ad3905834987f2484fd14e5dbbb | Shell | alexte/mate-book-pro-x-linux-config | /home/alex/choose-wlan-network | UTF-8 | 1,399 | 3.703125 | 4 | [] | no_license | #!/bin/bash
ifconfig wlan0 up
sleep 1
iwlist wlan0 scan | grep SSID | sort -u | sed -re "s/^.*SSID:\"(.*)\"$/\\1/g" | grep . > /tmp/wlan-scan
cat /tmp/wlan-scan | grep -v \" | while read a ; do echo "\"$a\" \"$a\" "; done > /tmp/wlan-list
dialog --output-fd 3 --no-tags --menu "WLAN Networks" 0 0 0 --file /tmp/wlan-list 3> /tmp/wlan-selected
if [ ! -s /tmp/wlan-selected ]; then
echo
echo "No network chosen"
sleep 5
exit
fi
SSID=`cat /tmp/wlan-selected`
PWD=$( dialog --insecure --passwordbox "Password for $SSID" 0 0 --output-fd 1)
SECRET=`echo $PWD | wpa_passphrase $SSID | grep " psk=" | cut -d "=" -f 2- `
if [ -n "$SECRET" ]; then
echo
echo
echo "Writing the wlan0 config file to /etc/network/interfaces.d/"
(
echo "iface wlan0 inet dhcp"
echo " wpa-ssid $SSID"
echo " wpa-psk $SECRET"
) > /etc/network/interfaces.d/wlan
echo "restarting interface"
ifdown wlan0
sleep 1
ifup wlan0
echo -n "Hit Enter:"
read
else
echo
echo "No SECRET ? Trying without encryption..."
echo "Writing the wlan0 config file to /etc/network/interfaces.d/"
(
echo "iface wlan0 inet dhcp"
echo " wireless-essid $SSID"
echo " wireless-mode managed"
) > /etc/network/interfaces.d/wlan
echo "restarting interface"
ifdown wlan0
ifup wlan0
echo -n "Hit Enter:"
read
fi
| true |
741eace0908e783db3f5ff4bf59a6fec7d5fa470 | Shell | aisayev/ffmpeg-proxy | /contrib/ffmpeg-proxy | UTF-8 | 590 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# https://github.com/aisayev/ffmpeg-proxy
read raw
[[ $raw =~ (^GET /)(https?://[^ ]*)( HTTP) ]] && url="${BASH_REMATCH[2]}"
if [ -z "$url" ]
then
echo "HTTP/1.1 400 Bad request"
else
echo -e "HTTP/1.1 200 OK\n\rContent-Type: video/mp2t\n\rConnection: close\n\r"
/usr/bin/ffmpeg -user-agent "Mozilla/5.0 (QtEmbedded; U; Linux; C) AppleWebKit/533.3 (KHTML, like Gecko) MAG200 stbapp ver: 2 rev: 234 Safari/533.3" -i $url -map 0 -c copy -hide_banner -nostats -loglevel 0 -metadata service_provider=ffmpeg-proxy -metadata service_name=IPTV -f mpegts -
fi
| true |
d9613c2d0e37c79816d5ee1e5770d23592630474 | Shell | samant8/member-MITRE | /Tangerine/demo/FraudWasteAbuse/configure.sh | UTF-8 | 732 | 3.1875 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/bin/bash
dirn=`dirname $0`
TANGERINE=`cd -P $dirn/../.. ; echo $PWD`
DEMO=`cd -P $dirn; echo $PWD`
if [ "$1" = "build" ]; then
cd $TANGERINE
rm ./client/lib/*jar
mvn clean install
mvn dependency:copy-dependencies
cd $DEMO/apps/netowl_app
rm ./lib/*jar
mvn clean install
mvn dependency:copy-dependencies
cd $DEMO
mkdir -p lib
fi
cd $DEMO
# This relies on having already run "mvn install" at $TANGERINE
#
cp $TANGERINE/client/target/client.jar $DEMO
cp $TANGERINE/client/lib/*jar $DEMO/lib
cp $DEMO/apps/netowl_app/target/netowlapp.jar $DEMO
cp $DEMO/apps/netowl_app/lib/*jar $DEMO/lib
# In your app environment, export PYTHONPATH=$DEMO/piplib
#
pip install --upgrade --target $DEMO/piplib requests
| true |
032071c37b1ea51bdf649d53a50fa4144f56cedd | Shell | njh/delphyne | /test/result-formatting.sh | UTF-8 | 1,029 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# shellcheck source=delphyne
source "${BASH_SOURCE%/*}/../delphyne"
set +e
set +u
testDisplayZones() {
assertEquals $'example.com\nexample.org\nexample.net' "$(display_zones < zones.json)"
}
testDisplayError() {
assertEquals '=> Access denied' "$(display_error json-headers.txt error.json 2>&1)"
}
testDisplayMultipleErrors() {
expected=$'=> Unsupported record type \'xx\' (line 18)\n'
expected+=$'=> Unsupported record type \'zz\' (line 19)'
assertEquals "${expected}" "$(display_error json-headers.txt errors.json 2>&1)"
}
testDisplayErrorNotJson() {
result="$(display_error html-headers.txt error.json 2>&1)"
assertContains 'ERROR: An error occurred while making request' "${result}"
assertContains 'Content-Type: text/html' "${result}"
assertContains '"error": "Access denied"' "${result}"
}
testDisplayMessage() {
result="$(display_message "$(<success.json)" 2>&1)"
assertEquals '=> 8 records added' "${result}"
}
# shellcheck disable=SC1091
source "$(command -v shunit2)"
| true |
4794e131ac7e69796a6edc2c316d656b05c2f948 | Shell | wenxuefeng3930/openverse-api | /load_sample_data.sh | UTF-8 | 4,977 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
WEB_SERVICE_NAME="${WEB_SERVICE_NAME:-web}"
ANALYTICS_SERVICE_NAME="${ANALYTICS_SERVICE_NAME:-analytics}"
CACHE_SERVICE_NAME="${CACHE_SERVICE_NAME:-cache}"
UPSTREAM_DB_SERVICE_NAME="${UPSTREAM_DB_SERVICE_NAME:-upstream_db}"
DB_SERVICE_NAME="${DB_SERVICE_NAME:-db}"
# Set up API database and upstream
docker-compose exec -T "$WEB_SERVICE_NAME" /bin/bash -c "python3 manage.py migrate --noinput"
# Create a superuser and a user for integration testing
# Not that the Python code uses 4 spaces for indentation after the tab that is stripped by <<-
docker-compose exec -T "$WEB_SERVICE_NAME" /bin/bash -c "python3 manage.py shell <<-EOF
from django.contrib.auth.models import User
usernames = ['continuous_integration', 'deploy']
for username in usernames:
if User.objects.filter(username=username).exists():
print(f'User {username} already exists')
continue
if username == 'deploy':
user = User.objects.create_superuser(username, f'{username}@example.com', 'deploy')
else:
user = User.objects.create_user(username, f'{username}@example.com', 'deploy')
user.save()
EOF"
# Migrate analytics
docker-compose exec -T "$ANALYTICS_SERVICE_NAME" /bin/bash -c "PYTHONPATH=. pipenv run alembic upgrade head"
# Load content providers
docker-compose exec -T "$DB_SERVICE_NAME" /bin/bash -c "psql -U deploy -d openledger <<-EOF
DELETE FROM content_provider;
INSERT INTO content_provider (created_on, provider_identifier, provider_name, domain_name, filter_content, media_type) VALUES
(now(), 'flickr', 'Flickr', 'https://www.flickr.com', false, 'image'),
(now(), 'rawpixel', 'rawpixel', 'https://www.rawpixel.com', false, 'image'),
(now(), 'sciencemuseum', 'Science Museum', 'https://www.sciencemuseum.org.uk', false, 'image'),
(now(), 'stocksnap', 'StockSnap', 'https://stocksnap.io', false, 'image'),
(now(), 'wikimedia', 'Wikimedia', 'https://commons.wikimedia.org', false, 'image'),
(now(), 'jamendo', 'Jamendo', 'https://www.jamendo.com', false, 'audio');
EOF"
docker-compose exec -T "$UPSTREAM_DB_SERVICE_NAME" /bin/bash -c "psql -U deploy -d openledger <<-EOF
DROP TABLE IF EXISTS content_provider CASCADE;
EOF"
docker-compose exec -T "$UPSTREAM_DB_SERVICE_NAME" /bin/bash -c "PGPASSWORD=deploy pg_dump -t content_provider -U deploy -d openledger -h db | psql -U deploy -d openledger"
# Load sample data for images
docker-compose exec -T "$UPSTREAM_DB_SERVICE_NAME" /bin/bash -c "PGPASSWORD=deploy pg_dump -s -t image -U deploy -d openledger -h db | psql -U deploy -d openledger"
docker-compose exec -T "$UPSTREAM_DB_SERVICE_NAME" /bin/bash -c "psql -U deploy -d openledger <<-EOF
ALTER TABLE image RENAME TO image_view;
ALTER TABLE image_view ADD COLUMN standardized_popularity double precision, ADD COLUMN ingestion_type varchar(1000);
\copy image_view (identifier,created_on,updated_on,ingestion_type,provider,source,foreign_identifier,foreign_landing_url,url,thumbnail,width,height,filesize,license,license_version,creator,creator_url,title,meta_data,tags,watermarked,last_synced_with_source,removed_from_source,standardized_popularity) from './sample_data/sample_data.csv' with (FORMAT csv, HEADER true)
EOF"
# Load sample data for audio
docker-compose exec -T "$UPSTREAM_DB_SERVICE_NAME" /bin/bash -c "PGPASSWORD=deploy pg_dump -s -t audio -U deploy -d openledger -h db | head -n -14 | psql -U deploy -d openledger"
docker-compose exec -T "$UPSTREAM_DB_SERVICE_NAME" /bin/bash -c "psql -U deploy -d openledger <<-EOF
ALTER TABLE audio RENAME TO audio_view;
ALTER TABLE audio_view ADD COLUMN standardized_popularity double precision, ADD COLUMN ingestion_type varchar(1000), ADD COLUMN audio_set jsonb;
\copy audio_view (identifier,created_on,updated_on,ingestion_type,provider,source,foreign_identifier,foreign_landing_url,url,thumbnail,duration,bit_rate,sample_rate,category,genres,audio_set,alt_files,filesize,license,license_version,creator,creator_url,title,meta_data,tags,watermarked,last_synced_with_source,removed_from_source,standardized_popularity) from './sample_data/sample_audio_data.csv' with (FORMAT csv, HEADER true)
EOF"
# Load search quality assurance data.
curl -XPOST localhost:8001/task -H "Content-Type: application/json" -d '{"model": "image", "action": "LOAD_TEST_DATA"}'
sleep 2
curl -XPOST localhost:8001/task -H "Content-Type: application/json" -d '{"model": "audio", "action": "LOAD_TEST_DATA"}'
sleep 2
# Ingest and index the data
curl -XPOST localhost:8001/task -H "Content-Type: application/json" -d '{"model": "image", "action": "INGEST_UPSTREAM"}'
sleep 30
curl -XPOST localhost:8001/task -H "Content-Type: application/json" -d '{"model": "audio", "action": "INGEST_UPSTREAM"}'
sleep 30
# Clear source cache since it's out of date after data has been loaded
docker-compose exec -T "$CACHE_SERVICE_NAME" /bin/bash -c "echo \"del :1:sources-image\" | redis-cli"
docker-compose exec -T "$CACHE_SERVICE_NAME" /bin/bash -c "echo \"del :1:sources-audio\" | redis-cli"
| true |
07e82981491c05e1e086d2e9a301efa3bc3dc24b | Shell | philthompson/blog | /build.sh | UTF-8 | 33,982 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/opt/homebrew/bin/bash
#
# using bash installed with homebrew, which is at
# version 5.x, as opposed to macOS built-in bash
# which is at version 3.x
#
# TODO: (for in-place mode) when a (.html) file is
# changed or unchanged, add its full path to a list.
# then, compare all (.html) files to that list to
# find (.html) files that should be deleted (move
# them to a out/in-place/trash-yyyymmddhhmmss dir).
# then after doing this, we should be able to also
# compare the gen/static dir to the out/in-place dir
# to find static files that have been deleted.
#
#
THIS_SCRIPT="`perl -MCwd -le 'print Cwd::abs_path shift' "${0}"`"
THIS_DIR="$(dirname "${THIS_SCRIPT}")"
OUT_DIR="${THIS_DIR}/out/$(date +%Y-%m-%d-%H%M%S)"
GEN_DIR="${THIS_DIR}/gen"
STATIC_DIR="${THIS_DIR}/gen/static"
MARKDOWN_PERL_SCRIPT="${THIS_DIR}/Markdown_1.0.1/Markdown.pl"
# installed with pip inside virtual environment:
# $ python3 -m venv python-venv
# $ source python-venv/bin/activate
# $ python3 -m pip install markdown-it-py
# $ deactivate
COMMONMARK_SCRIPT="${THIS_DIR}/python-venv/bin/markdown-it"
# the top-level URL
SITE_URL="https://philthompson.me"
if [[ "${1}" == "in-place" ]]
then
OUT_DIR="${THIS_DIR}/out/in-place"
fi
SKIP_GALLERY="false"
if [[ "${2}" == "skip-gallery" ]]
then
SKIP_GALLERY="true"
fi
echo "${THIS_SCRIPT}"
echo "${THIS_DIR}"
echo "${OUT_DIR}"
# make new build output dir
mkdir -p "${OUT_DIR}"
mkdir -p "${OUT_DIR}/archive"
RSS_FINAL_FILENAME="feed.xml"
RSS_FINAL="${OUT_DIR}/${RSS_FINAL_FILENAME}"
RSS_BIRDS_GALLERY_ITEMS="${OUT_DIR}/rss-birds-gallery-items.xml"
RSS_BLOG_ARTICLE_ITEMS="${OUT_DIR}/rss-blog-article-items.xml"
# overwrite RSS blog article items here, but only overwrite
# the birds gallery items if we're actually updating that
echo -n "" > "${RSS_BLOG_ARTICLE_ITEMS}"
# put static files in place, except for markdown files
# which are rendered to .html files later
# - using rsync with "a" (archive) options EXCEPT for
# "t" option, which has been replaced with "c" to
# only copy a file if the checksum has changed,
# not just the timestamp
# - note trailing slash for source (current directory)
# and no trailing slash for dest directory to
# populate with current dirโs contents
# - note no delete option -- use this script without
# "in-place" argument for final full build without
# deleted static files
rsync -vrlpcgoD \
--exclude="*.md" \
--exclude="mandelbrot-gallery/*" \
--exclude="gallery-img/**/*.db" \
--exclude="gallery-img/**/*.supplement" \
--exclude="gallery-img/**/*.txid" \
"${STATIC_DIR}"/ "${OUT_DIR}"
#cp -rp "${STATIC_DIR}"/* "${OUT_DIR}/"
STYLE_FILE="${OUT_DIR}/css/style.css"
TMP_FILE="`bash ${GEN_DIR}/style.sh`"
if [[ ! -f "${STYLE_FILE}" ]] || [[ "`echo "${TMP_FILE}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${STYLE_FILE}" | cut -d ' ' -f 1`" ]]
then
echo "style file [${STYLE_FILE}] IS changed"
echo "${TMP_FILE}" > "${STYLE_FILE}"
#else
# echo "style file [${STYLE_FILE}] is unchanged"
fi
# put 5 articles on each "home page" calling the newest one index.html
# TODO: consider putting 10 articles on each page -- prefer smaller/faster-
# loading pages so we're going with 5 per page now
HOME_PAGES="$(ls -1 "${GEN_DIR}/articles" | sort -r | paste - - - - - | awk '{if (NR=="1") {i=NR-1; print i" index.html "$0}else{ p=NR-1; print p" older"p".html "$0}}')"
# array
#HOME_PAGES_CONTENT=()
# associative array
declare -A HOME_PAGES_CONTENT
# to allow variables outside while loop to be modified from within the
# loop, we will use a "here string" (at the "done <<< ..." line) to
# provide input to the while loop
# (see https://stackoverflow.com/a/16854326/259456)
while read HOME_PAGE_LINE
do
HOME_PAGE_IDX="`echo "${HOME_PAGE_LINE}" | cut -d ' ' -f 1`"
HOME_PAGE="`echo "${HOME_PAGE_LINE}" | cut -d ' ' -f 2`"
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="`bash "${GEN_DIR}/header.sh" Home '.' "philthompson, phil, thompson, personal, blog" "Personal blog home โ philthompson.me" 3`"
done <<< "${HOME_PAGES}"
# content for page /archive/index.html
ARCHIVE_INDEX_CONTENT=""
# associative array
# content by year for page /<year>/index.html
declare -A YEAR_PAGES_CONTENT
buildHomepageArticleSnippet() {
ARTICLE_DATE_REFORMAT="${1}"
ARTICLE_YEAR="${2}"
ARTICLE_TITLE_URL="${3}"
ARTICLE_TITLE="${4}"
ARTICLE_MARKDOWN_FILE="${5}"
echo " <div class=\"container\">"
echo " <div class=\"article-info\">${ARTICLE_DATE_REFORMAT}</div>"
echo " <h1 class=\"article-title\"><a href=\"./${ARTICLE_YEAR}/${ARTICLE_TITLE_URL}.html\">${ARTICLE_TITLE}</a></h1>"
# thanks to https://stackoverflow.com/a/44055875/259456
# for showing how to keep quoted command arguments in
# a bash variable and execute them later without eval
MARKDOWN_CMD=("perl" "${MARKDOWN_PERL_SCRIPT}" "--html4tags")
if [[ ! -z "$(grep -m 1 '(gen-markdown-flavor: CommonMark)' "${ARTICLE_MARKDOWN_FILE}")" ]]
then
echo "article [${ARTICLE_MARKDOWN_FILE}] specifies it should use CommonMark" >&2
MARKDOWN_CMD=("${COMMONMARK_SCRIPT}")
fi
if [[ -z "$(grep -m 1 "more://" "${ARTICLE_MARKDOWN_FILE}")" ]]
then
"${MARKDOWN_CMD[@]}" "${ARTICLE_MARKDOWN_FILE}" | sed 's/${SITE_ROOT_REL}/./g' | sed "s#\${THIS_ARTICLE}#./${ARTICLE_YEAR}/${ARTICLE_TITLE_URL}.html#g"
else
"${MARKDOWN_CMD[@]}" "${ARTICLE_MARKDOWN_FILE}" | grep -B 999 'more://' | grep -v 'more://' | sed 's/${SITE_ROOT_REL}/./g' | sed "s#\${THIS_ARTICLE}#./${ARTICLE_YEAR}/${ARTICLE_TITLE_URL}.html#g"
echo "<a href=\"./${ARTICLE_YEAR}/${ARTICLE_TITLE_URL}.html\">continue reading...</a>"
fi
echo " <p style=\"clear:both;\"></p>"
echo " </div>"
}
buildHomepageBirdsGalleryLink() {
GALLERY_LATEST_FILE="${1}"
SITE_ROOT_REL="${2}"
GALLERY_PAGE_REL="$(head -n 1 "${GALLERY_LATEST_FILE}" | sed "s/SITE_ROOT_REL/${SITE_ROOT_REL}/g")"
SHOOT_FAVORITE="$(head -n 2 "${GALLERY_LATEST_FILE}" | tail -n 1 | sed "s/SITE_ROOT_REL/${SITE_ROOT_REL}/g")"
SHOOT_DATE="$(tail -n 1 "${GALLERY_LATEST_FILE}")"
cat << xxxxxEOFxxxxx
<a href="${GALLERY_PAGE_REL}" style="text-decoration:none">
<div class="container" style="background-color:rgba(150,150,150,0.1); padding:1.0rem; overflow:hidden; border-radius: 0.3rem">
<img class="width-40" style="float:right" src="${SHOOT_FAVORITE}" />
Latest Bird Gallery:<br/>${SHOOT_DATE}
</div>
</a>
xxxxxEOFxxxxx
}
buildArticleRssItem() {
ARTICLE_DATE="${1}"
ARTICLE_YEAR="${2}"
ARTICLE_TITLE_URL="${3}"
ARTICLE_TITLE="${4}"
ARTICLE_MARKDOWN_FILE="${5}"
SITE_HOME_URL="${6}"
# since the articles are dated like:
# YYYY-MM-DD, or
# YYYY-MM-DD-N (where N is the Nth article for the day)
# we can use midnight for the time, plus N hours
# append "-0" to date if missing the 4th dash-delimited field
if [[ "${ARTICLE_DATE}" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]]
then
ARTICLE_DATE="${ARTICLE_DATE}-0"
fi
# format according to https://www.w3.org/Protocols/rfc822/#z28
ARTICLE_DATE_RSS="$(date -j -f '%Y-%m-%d-%H' "${ARTICLE_DATE}" '+%a, %d %b %Y %H:00:00 %z')"
ABSOLUTE_ARTICLE_URL="${SITE_HOME_URL}/${ARTICLE_YEAR}/${ARTICLE_TITLE_URL}.html"
echo " <item>"
echo " <title>${ARTICLE_TITLE}</title>"
echo " <link>${ABSOLUTE_ARTICLE_URL}</link>"
echo " <pubDate>${ARTICLE_DATE_RSS}</pubDate>"
echo -n " <description><![CDATA["
#if [[ ! -z "$(grep '(gen-markdown-flavor: CommonMark)' "${ARTICLE_MARKDOWN_FILE}")" ]]
#then
# echo "article [${ARTICLE_MARKDOWN_FILE}] specifies it should use CommonMark"
# MARKDOWN_EXECUTABLE="${COMMONMARK_SCRIPT}"
# MARKDOWN_ARGS=""
#fi
# thanks to https://stackoverflow.com/a/44055875/259456
# for showing how to keep quoted command arguments in
# a bash variable and execute them later without eval
MARKDOWN_CMD=("perl" "${MARKDOWN_PERL_SCRIPT}" "--html4tags")
if [[ ! -z "$(grep -m 1 '(gen-markdown-flavor: CommonMark)' "${ARTICLE_MARKDOWN_FILE}")" ]]
then
echo "article [${ARTICLE_MARKDOWN_FILE}] specifies it should use CommonMark"
MARKDOWN_CMD=("${COMMONMARK_SCRIPT}")
fi
# it's probably not necessary, but remove leading/trailing whitespace
# from all html lines
# important here, possibly, to replace relative links with absolute ones
# so that links work in RSS readers
if [[ -z "$(grep -m 1 "more://" "${ARTICLE_MARKDOWN_FILE}")" ]]
then
#"${MARKDOWN_EXECUTABLE}" $MARKDOWN_ARGS "${ARTICLE_MARKDOWN_FILE}" | sed "s#\${SITE_ROOT_REL}#${SITE_HOME_URL}#g" | sed "s#\${THIS_ARTICLE}#${ABSOLUTE_ARTICLE_URL}#g"
"${MARKDOWN_CMD[@]}" "${ARTICLE_MARKDOWN_FILE}" | sed "s#\${SITE_ROOT_REL}#${SITE_HOME_URL}#g" | sed "s#\${THIS_ARTICLE}#${ABSOLUTE_ARTICLE_URL}#g"
else
#"${MARKDOWN_EXECUTABLE}" $MARKDOWN_ARGS "${ARTICLE_MARKDOWN_FILE}" | grep -B 999 'more://' | grep -v 'more://' | sed "s#\${SITE_ROOT_REL}#${SITE_HOME_URL}#g" | sed "s#\${THIS_ARTICLE}#${ABSOLUTE_ARTICLE_URL}#g"
"${MARKDOWN_CMD[@]}" "${ARTICLE_MARKDOWN_FILE}" | grep -B 999 'more://' | grep -v 'more://' | sed "s#\${SITE_ROOT_REL}#${SITE_HOME_URL}#g" | sed "s#\${THIS_ARTICLE}#${ABSOLUTE_ARTICLE_URL}#g"
echo "<a href=\"${ABSOLUTE_ARTICLE_URL}\">continue reading...</a>"
fi | grep -v '!-- Copyright' | sed 's/^[[:space:]]*//g' | sed 's/[[:space:]]*$//g' | tr -d '\n'
echo "]]></description>"
echo " <category>articles</category>"
echo " <guid>${ABSOLUTE_ARTICLE_URL}</guid>"
echo " </item>"
}
GALLERY_LATEST_FILE="${OUT_DIR}/gallery-latest.txt"
# after the header, but before the article snippets,
# insert the latest birds gallery picture+link
# (this requires building the birds gallery pages
# here, before the articles)
if [[ "${SKIP_GALLERY}" == "false" ]]
then
# run script to generate gallery/ pages
bash "${GEN_DIR}/gallery.sh" \
"${STATIC_DIR}" \
"${OUT_DIR}" \
"${GEN_DIR}/header.sh" \
"${GEN_DIR}/footer.sh" \
".." \
"${RSS_BIRDS_GALLERY_ITEMS}" \
"${SITE_URL}" \
"${GALLERY_LATEST_FILE}"
fi
# capture function stdout into a variable, thanks to:
# https://unix.stackexchange.com/a/591153/210174
{ read -d '' HOMEPAGE_BIRDS_GALLERY_LINK; }< <(buildHomepageBirdsGalleryLink "${GALLERY_LATEST_FILE}" "${SITE_ROOT_REL}")
GALLERY_LATEST_FILE="${1}"
# replace placeholder on home page with image and link for latest birds gallery
# .width-resp-25-40
HOME_PAGES_CONTENT["0"]="${HOME_PAGES_CONTENT["0"]}
${HOMEPAGE_BIRDS_GALLERY_LINK}"
# to allow variables outside while loop to be modified from within the
# loop, we will use a "here string" (at the "done <<< ..." line) to
# provide input to the while loop
# (see https://stackoverflow.com/a/16854326/259456)
while read ARTICLE_MARKDOWN_FILE
do
if [[ ! "${ARTICLE_MARKDOWN_FILE}" =~ ^.*/20[0-9]{2}-[0-9]{2}-[0-9]{2}[^/]*\.md$ ]]
then
echo "skipping file [${ARTICLE_MARKDOWN_FILE}] not named like an article" >&2
continue
fi
ARTICLE_METADATA="$(grep -B 99 '^\[//\]: # (gen-meta-end)' "${ARTICLE_MARKDOWN_FILE}")"
ARTICLE_TITLE="$(echo "${ARTICLE_METADATA}" | grep -m 1 gen-title: | cut -d ' ' -f 4- | sed 's/)$//')"
ARTICLE_TITLE_URL="$(echo "${ARTICLE_METADATA}" | grep -m 1 gen-title-url: | cut -d ' ' -f 4- | sed 's/)$//')"
ARTICLE_KEYWORDS="$(echo "${ARTICLE_METADATA}" | grep -m 1 gen-keywords: | cut -d ' ' -f 4- | sed 's/)$//')"
ARTICLE_DESCRIPTION="$(echo "${ARTICLE_METADATA}" | grep -m 1 gen-description: | cut -d ' ' -f 4- | sed 's/)$//')"
ARTICLE_DATE="$(basename "${ARTICLE_MARKDOWN_FILE}" | cut -d . -f 1)"
ARTICLE_DATE_REFORMAT="$(echo "${ARTICLE_DATE}" | cut -d '-' -f 1-3 | \
sed 's/-01-/-January-/' | \
sed 's/-02-/-February-/' | \
sed 's/-03-/-March-/' | \
sed 's/-04-/-April-/' | \
sed 's/-05-/-May-/' | \
sed 's/-06-/-June-/' | \
sed 's/-07-/-July-/' | \
sed 's/-08-/-August-/' | \
sed 's/-09-/-September-/' | \
sed 's/-10-/-October-/' | \
sed 's/-11-/-November-/' | \
sed 's/-12-/-December-/' | sed 's/-0/-/' | sed 's/-/ /g' | awk '{print $2" "$3", "$1}')"
ARTICLE_YEAR="$(echo "${ARTICLE_DATE}" | cut -d '-' -f 1)"
#echo "==== [${ARTICLE_MARKDOWN_FILE}] -> date [${ARTICLE_DATE}] -> year [${ARTICLE_YEAR}]" >&2
# just do year for now -- create archive <a> tag later
#ARCHIVE_INDEX_CONTENT="$(echo -e "${ARCHIVE_INDEX_CONTENT}\n<a href=\"../${ARTICLE_YEAR}/\">${ARTICLE_YEAR}</a>")"
#ARCHIVE_INDEX_CONTENT="$(echo -e "${ARCHIVE_INDEX_CONTENT}\n${ARTICLE_YEAR}")"
# embed newline directly into variable
ARCHIVE_INDEX_CONTENT="${ARCHIVE_INDEX_CONTENT}
${ARTICLE_YEAR}"
mkdir -p "${OUT_DIR}/${ARTICLE_YEAR}"
if [[ -z "${YEAR_PAGES_CONTENT[$ARTICLE_YEAR]}" ]]
then
YEAR_PAGES_CONTENT[$ARTICLE_YEAR]="`bash "${GEN_DIR}/header.sh" "Archive โ ${ARTICLE_YEAR}" '..' "blog, archive, history, year, ${ARTICLE_YEAR}" "Personal blog archive for ${ARTICLE_YEAR} โ philthompson.me" 7`"
fi
# embed newlines directly into variables
YEAR_PAGES_CONTENT[$ARTICLE_YEAR]="${YEAR_PAGES_CONTENT[$ARTICLE_YEAR]}
<div class=\"article-title\">
<small class=\"article-info\">${ARTICLE_DATE_REFORMAT}</small>
<a href=\"${ARTICLE_TITLE_URL}.html\">${ARTICLE_TITLE}</a>
</div>"
PREV_NEXT="$(ls -1 "${GEN_DIR}/articles" | sort | grep -B 1 -A 1 "${ARTICLE_DATE}.md")"
PREV_MARKDOWN_FILE="$(echo "${PREV_NEXT}" | head -n 1)"
NEXT_MARKDOWN_FILE="$(echo "${PREV_NEXT}" | tail -n 1)"
PREV_MARKDOWN_FILE_YEAR="$(echo "${PREV_MARKDOWN_FILE}" | cut -d '-' -f 1)"
NEXT_MARKDOWN_FILE_YEAR="$(echo "${NEXT_MARKDOWN_FILE}" | cut -d '-' -f 1)"
PREV_TITLE_URL="$(grep -B 99 '^\[//\]: # (gen-meta-end)' "${GEN_DIR}/articles/${PREV_MARKDOWN_FILE}" | grep -m 1 gen-title-url: | cut -d ' ' -f 4- | sed 's/)$//')"
NEXT_TITLE_URL="$(grep -B 99 '^\[//\]: # (gen-meta-end)' "${GEN_DIR}/articles/${NEXT_MARKDOWN_FILE}" | grep -m 1 gen-title-url: | cut -d ' ' -f 4- | sed 's/)$//')"
TMP_FILE="$(bash "${GEN_DIR}/article.sh" \
"${MARKDOWN_PERL_SCRIPT}" \
"${ARTICLE_MARKDOWN_FILE}" \
"${ARTICLE_TITLE}" \
"${ARTICLE_TITLE_URL}" \
"${ARTICLE_KEYWORDS}" \
"${ARTICLE_DESCRIPTION}" \
"${ARTICLE_DATE_REFORMAT}" \
"${PREV_MARKDOWN_FILE_YEAR}/${PREV_TITLE_URL}.html" \
"${NEXT_MARKDOWN_FILE_YEAR}/${NEXT_TITLE_URL}.html" \
"${GEN_DIR}/header.sh" \
"${GEN_DIR}/footer.sh" \
".." \
"${COMMONMARK_SCRIPT}")"
PAGE_PATH_FROM_ROOT="/${ARTICLE_YEAR}/${ARTICLE_TITLE_URL}.html"
ARTICLE_FILE="${OUT_DIR}${PAGE_PATH_FROM_ROOT}"
# replace page path into page content
TMP_FILE="$(echo "${TMP_FILE}" | sed "s#REPLACE_PAGE_URL#${PAGE_PATH_FROM_ROOT}#")"
if [[ ! -f "${ARTICLE_FILE}" ]] || [[ "`echo "${TMP_FILE}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${ARTICLE_FILE}" | cut -d ' ' -f 1`" ]]
then
echo "article file [${ARTICLE_FILE}] IS changed"
echo "${TMP_FILE}" > "${ARTICLE_FILE}"
#else
# echo "article file [${ARTICLE_FILE}] is unchanged"
fi
HOME_PAGE_IDX="$(echo "${HOME_PAGES}" | grep -m 1 "${ARTICLE_DATE}" | awk '{print $1}')"
# append article home page snippet to the appropriate home page
# capture function stdout into a variable, thanks to:
# https://unix.stackexchange.com/a/591153/210174
{ read -d '' ARTICLE_HOME_SNIPPET; }< <(buildHomepageArticleSnippet "${ARTICLE_DATE_REFORMAT}" "${ARTICLE_YEAR}" "${ARTICLE_TITLE_URL}" "${ARTICLE_TITLE}" "${ARTICLE_MARKDOWN_FILE}")
# embed newline directly into variable
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
${ARTICLE_HOME_SNIPPET}"
# for articles on the home page (the first 5 articles) or on
# the first "older1.html" page, put them in the site's RSS
if [ "${HOME_PAGE_IDX}" == "0" ] || [ "${HOME_PAGE_IDX}" == "1" ]
then
buildArticleRssItem "${ARTICLE_DATE}" "${ARTICLE_YEAR}" "${ARTICLE_TITLE_URL}" "${ARTICLE_TITLE}" "${ARTICLE_MARKDOWN_FILE}" "${SITE_URL}" >> "${RSS_BLOG_ARTICLE_ITEMS}"
fi
done <<< "$(find "${GEN_DIR}/articles" -type f | sort -r)"
COMMON_HOME_PAGE_FOOTER="`bash "${GEN_DIR}/footer.sh" Home .`"
# to allow variables outside while loop to be modified from within the
# loop, we will use a "here string" (at the "done <<< ..." line) to
# provide input to the while loop
# (see https://stackoverflow.com/a/16854326/259456)
while read HOME_PAGE_LINE
do
HOME_PAGE_IDX="`echo "${HOME_PAGE_LINE}" | cut -d ' ' -f 1`"
HOME_PAGE="`echo "${HOME_PAGE_LINE}" | cut -d ' ' -f 2`"
# embed newlines directly into variables
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
<footer>"
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
<div class=\"btns\">"
PREV_NEXT="$(echo "${HOME_PAGES}" | cut -d ' ' -f 2 | grep -B 1 -A 1 "${HOME_PAGE}")"
PREV_PAGE_FILE="$(echo "${PREV_NEXT}" | head -n 1)"
NEXT_PAGE_FILE="$(echo "${PREV_NEXT}" | tail -n 1)"
if [[ "${NEXT_PAGE_FILE}" != "${HOME_PAGE}" ]]
then
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
<a class=\"btn\" href=\"./${NEXT_PAGE_FILE}\">Older Articles</a>"
fi
if [[ "${PREV_PAGE_FILE}" != "${HOME_PAGE}" ]]
then
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
<a class=\"btn\" href=\"./${PREV_PAGE_FILE}\">Newer Articles</a>"
fi
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
</div>"
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}
${COMMON_HOME_PAGE_FOOTER}"
PAGE_PATH_FROM_ROOT="/${HOME_PAGE}"
OUT_HOME_PAGE="${OUT_DIR}${PAGE_PATH_FROM_ROOT}"
# replace page path into page content
HOME_PAGES_CONTENT[$HOME_PAGE_IDX]="$(echo "${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}" | sed "s#REPLACE_PAGE_URL#${PAGE_PATH_FROM_ROOT}#")"
if [[ ! -f "${OUT_HOME_PAGE}" ]] || [[ "`echo "${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${OUT_HOME_PAGE}" | cut -d ' ' -f 1`" ]]
then
echo "home page [${OUT_HOME_PAGE}] IS changed"
echo "${HOME_PAGES_CONTENT[$HOME_PAGE_IDX]}" > "${OUT_HOME_PAGE}"
#else
# echo "home page [${OUT_HOME_PAGE}] is unchanged"
fi
done <<< "${HOME_PAGES}"
COMMON_YEAR_PAGE_FOOTER="`bash "${GEN_DIR}/footer.sh" "ignoreme" ..`"
for YEAR_FILE_YEAR in "${!YEAR_PAGES_CONTENT[@]}"
do
YEAR_PAGES_CONTENT[$YEAR_FILE_YEAR]="${YEAR_PAGES_CONTENT[$YEAR_FILE_YEAR]}
${COMMON_YEAR_PAGE_FOOTER}"
PAGE_PATH_FROM_ROOT="/${YEAR_FILE_YEAR}/index.html"
YEAR_PAGE="${OUT_DIR}${PAGE_PATH_FROM_ROOT}"
# replace page path into page content
YEAR_PAGES_CONTENT[$YEAR_FILE_YEAR]="$(echo "${YEAR_PAGES_CONTENT[$YEAR_FILE_YEAR]}" | sed "s#REPLACE_PAGE_URL#${PAGE_PATH_FROM_ROOT}#")"
if [[ ! -f "${YEAR_PAGE}" ]] || [[ "`echo "${YEAR_PAGES_CONTENT[$YEAR_FILE_YEAR]}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${YEAR_PAGE}" | cut -d ' ' -f 1`" ]]
then
echo "year page [${YEAR_PAGE}] IS changed"
echo "${YEAR_PAGES_CONTENT[$YEAR_FILE_YEAR]}" > "${YEAR_PAGE}"
#else
# echo "year page [${YEAR_PAGE}] is unchanged"
fi
done
# TODO: make archive pages with below general-purpose markdown page building loop
# since a line with the year is added for each article, we need to keep
# only the unique years
ARCHIVE_INDEX_SORT_U="`echo "${ARCHIVE_INDEX_CONTENT}" | sort -u | sed '/^$/d'`"
ARCHIVE_INDEX_CONTENT=""
while read LINE
do
ARCHIVE_INDEX_CONTENT="${ARCHIVE_INDEX_CONTENT}
<div class=\"article-title\"><a href=\"../${LINE}/\">${LINE}</a></div>"
done <<< "${ARCHIVE_INDEX_SORT_U}"
TMP_FILE="`bash "${GEN_DIR}/header.sh" 'Archive' '..' "blog, archive, history, contents" "Personal blog archive โ philthompson.me" 30`"
# prepend header before existing archive file content
ARCHIVE_INDEX_CONTENT="${TMP_FILE}
${ARCHIVE_INDEX_CONTENT}"
TMP_FILE="`bash "${GEN_DIR}/footer.sh" 'Archive' '..'`"
# append footer after existing archive file content
ARCHIVE_INDEX_CONTENT="${ARCHIVE_INDEX_CONTENT}
${TMP_FILE}"
PAGE_PATH_FROM_ROOT="/archive/index.html"
ARCHIVE_FILE="${OUT_DIR}${PAGE_PATH_FROM_ROOT}"
# replace page path into page content
ARCHIVE_INDEX_CONTENT="$(echo "${ARCHIVE_INDEX_CONTENT}" | sed "s#REPLACE_PAGE_URL#${PAGE_PATH_FROM_ROOT}#")"
if [[ ! -f "${ARCHIVE_FILE}" ]] || [[ "`echo "${ARCHIVE_INDEX_CONTENT}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${ARCHIVE_FILE}" | cut -d ' ' -f 1`" ]]
then
echo "archive file [${ARCHIVE_FILE}] IS changed"
echo "${ARCHIVE_INDEX_CONTENT}" > "${ARCHIVE_FILE}"
#else
# echo "archive file [${ARCHIVE_FILE}] is unchanged"
fi
# generate mandelbrot-gallery STATIC markdown pages
# these static pages are then output as html by the regular below static page stuff
generateMandelbrotGalleryIndexPage() {
cat << xxxxxEOFxxxxx
!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="refresh" content="0; url=${1}">
</head>
<body></body>
</html>
xxxxxEOFxxxxx
}
#
# for nicer overall layout, the gallery pages' content will be wider
# than a normal blog post using a "wide-override" CSS class
# to center the wider child <div> inside the narrower parent <div>,
# "wide-override" uses this: https://stackoverflow.com/a/48608339/259456
#
generateMandelbrotGalleryPageHeader() {
THE_YEAR="${1}"
PREV_YEAR="${2}"
NEXT_YEAR="${3}"
PREV_BUTTON=""
NEXT_BUTTON=""
if [[ "${PREV_YEAR}" != "${THE_YEAR}" ]]
then
PREV_BUTTON="<a class=\"btn\" href=\"./${PREV_YEAR}.html\">${PREV_YEAR} Gallery</a>"
fi
if [[ "${NEXT_YEAR}" != "${THE_YEAR}" ]]
then
NEXT_BUTTON="<a class=\"btn\" href=\"./${NEXT_YEAR}.html\">${NEXT_YEAR} Gallery</a>"
fi
cat << xxxxxEOFxxxxx
<!-- this file is generated by build.sh -->
[//]: # (gen-title: Mandelbrot set Gallery)
[//]: # (gen-keywords: Mandelbrot set, gallery, very plotter, fractal, art)
[//]: # (gen-description: Mandelbrot set Gallery for images generated with Very Plotter)
[//]: # (gen-meta-end)
<style>
.img-container {
padding-top: 1.0rem;
padding-bottom: 1.0rem;
border-top: 1px solid #949b96;
}
details, details summary {
display: inline;
}
details summary {
list-style: none;
}
details > summary::-webkit-details-marker {
display: none;
}
details[open] {
display: block;
margin-left: auto;
margin-right: auto;
max-width: 100%;
padding-top: 1.0rem;
padding-bottom: 1.0rem;
border-top: 1px solid #949b96;
border-bottom: 1px solid #949b96;
}
#loc {
word-wrap: break-word;
}
@media screen and (min-width: 64rem) {
.width-resp-50-100 {
padding-left: 1.25%;
padding-right: 1.25%;
max-width: 30%;
}
}
@media screen and (min-width: 104rem) {
.width-resp-50-100 {
padding-left: 1.2%;
padding-right: 1.2%;
max-width: 22%;
}
}
.wide-override {
width: 100%
}
@media screen and (min-width: 48rem) {
.wide-override {
width: 47rem;
left: 50%;
position: relative;
transform: translateX(-50%);
}
}
@media screen and (min-width: 54rem) {
.wide-override { width: 52rem; }
}
@media screen and (min-width: 64rem) {
.wide-override { width: 61rem; }
}
@media screen and (min-width: 74rem) {
.wide-override { width: 70rem; }
}
@media screen and (min-width: 84rem) {
.wide-override { width: 80rem; }
}
@media screen and (min-width: 94rem) {
.wide-override { width: 90rem; }
}
@media screen and (min-width: 104rem) {
.wide-override { width: 98rem; }
}
.btns {
margin: 1rem 0;
}
</style>
## ${THE_YEAR} Mandelbrot set Gallery
These images were all generated with
<a href="\${SITE_ROOT_REL}/very-plotter" target="_self">Very Plotter</a>'s Mandelbrot set
explorer. Click any image below to view details, a link to open that location in your
browser, and larger renders.
<div class="btns">${PREV_BUTTON}
${NEXT_BUTTON}</div>
xxxxxEOFxxxxx
}
generateMandelbrotImageHtml() {
IMG_DATE="${1}"
IMG_THUMB="${2}"
IMG_TITLE="${3}"
IMG_DESC="${4}"
IMG_PARAMS="${5}"
IMG_RENDERS="${6}"
IMG_RE="${7}"
IMG_IM="${8}"
IMG_MAG="${9}"
IMG_SCALE="${10}"
IMG_ITER="${11}"
RENDERS_HTML=""
# create html list of renders
if [[ ! -z "${IMG_RENDERS}" ]]
then
RENDERS_HTML="<p>Available renders:</p>
<ul>"
while read RENDER_URL
do
RENDER_NAME="$(basename "${RENDER_URL}")"
RENDERS_HTML="${RENDERS_HTML}
<li><a target=\"_blank\" href=\"${RENDER_URL}\">${RENDER_NAME}</a></li>"
done <<< "${IMG_RENDERS}"
RENDERS_HTML="${RENDERS_HTML}
</ul>"
fi
cat <<- xxxxxEOFxxxxx
<details class="width-resp-50-100">
<summary>
<img class="width-100" src="${IMG_THUMB}"/>
</summary>
<p>${IMG_DATE}: ${IMG_TITLE}</p>
<p>${IMG_DESC}</p>
<p>Click
<a target="_blank" href="\${SITE_ROOT_REL}/very-plotter/${IMG_PARAMS}">
here</a> to view this location in Very Plotter.</p>
${RENDERS_HTML}
<p id="loc">Location:<br/>
Re: ${IMG_RE}<br/>
Im: ${IMG_IM}<br/>
Magnification: ${IMG_MAG} <small>(where 1.0 fits entire Mandelbrot set into the window)</small><br/>
Scale: ${IMG_SCALE} <small>(pixels/unit, for renders seen here)</small><br/>
Iterations: ${IMG_ITER}</p>
</details>
xxxxxEOFxxxxx
}
MANDELBROT_GALLERY_YEAR_DIRS="$(find "${STATIC_DIR}/mandelbrot-gallery" -type d -name "2*")"
PAGE_PATH_FROM_ROOT="/mandelbrot-gallery/index.html"
MANDELBROT_GALLERY_INDEX_PAGE="${OUT_DIR}${PAGE_PATH_FROM_ROOT}"
LATEST_MANDELBROT_GALLERY_YEAR="$(echo "${MANDELBROT_GALLERY_YEAR_DIRS}" | sort -nr | head -n 1)"
LATEST_MANDELBROT_GALLERY_YEAR="$(basename "${LATEST_MANDELBROT_GALLERY_YEAR}")"
# capture function stdout into a variable, thanks to:
# https://unix.stackexchange.com/a/591153/210174
{ read -d '' MANDELBROT_GALLERY_INDEX_CONTENT; }< <(generateMandelbrotGalleryIndexPage "./${LATEST_MANDELBROT_GALLERY_YEAR}.html")
# replace page path into page content
MANDELBROT_GALLERY_INDEX_CONTENT="$(echo "${MANDELBROT_GALLERY_INDEX_CONTENT}" | sed "s#REPLACE_PAGE_URL#${PAGE_PATH_FROM_ROOT}#")"
if [[ ! -f "${MANDELBROT_GALLERY_INDEX_PAGE}" ]] || [[ "`echo "${MANDELBROT_GALLERY_INDEX_CONTENT}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${MANDELBROT_GALLERY_INDEX_PAGE}" | cut -d ' ' -f 1`" ]]
then
echo "${MANDELBROT_GALLERY_INDEX_CONTENT}" > "${MANDELBROT_GALLERY_INDEX_PAGE}"
else
echo "mandelbrot gallery index [${MANDELBROT_GALLERY_INDEX_PAGE}] is unchanged"
fi
while read MANDELBROT_GALLERY_YEAR_DIR
do
MANDELBROT_GALLERY_YEAR="$(basename "${MANDELBROT_GALLERY_YEAR_DIR}")"
MANDELBROT_MD_PAGE="${MANDELBROT_GALLERY_YEAR_DIR}.md"
PREV_NEXT="$(echo "${MANDELBROT_GALLERY_YEAR_DIRS}" | sort -n | grep -B 1 -A 1 "${MANDELBROT_GALLERY_YEAR}")"
PREV_YEAR="$(basename "$(echo "${PREV_NEXT}" | head -n 1)")"
NEXT_YEAR="$(basename "$(echo "${PREV_NEXT}" | tail -n 1)")"
{ read -d '' MANDELBROT_PAGE_CONTENT; }< <(generateMandelbrotGalleryPageHeader "${MANDELBROT_GALLERY_YEAR}" "${PREV_YEAR}" "${NEXT_YEAR}")
# embed newlines directly into variable
MANDELBROT_PAGE_CONTENT="${MANDELBROT_PAGE_CONTENT}
<div class=\"wide-override\">"
while read MANDELBROT_IMG
do
IMG_BASENAME="$(basename "${MANDELBROT_IMG}")"
if [[ "${IMG_BASENAME}" == "example.txt" ]]
then
continue
fi
# date -j -f %Y-%m-%d 2022-02-24 "+%B %-d, %Y"
IMG_DATE="$(date -j -f %Y-%m-%d "${IMG_BASENAME:0:10}" "+%B %-d, %Y")"
IMG_THUMB=""
IMG_TITLE=""
IMG_DESC=""
IMG_PARAMS=""
IMG_RENDER_LINES=""
IMG_RE=""
IMG_IM=""
IMG_MAG=""
IMG_SCALE=""
IMG_ITER=""
while read IMG_LINE
do
LINE_KEY="$(echo "${IMG_LINE}" | cut -d : -f 1)"
LINE_VAL="$(echo "${IMG_LINE}" | cut -d : -f 2-)"
if [[ "${LINE_KEY}" == "thumb" ]]
then
IMG_THUMB="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "title" ]]
then
IMG_TITLE="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "desc" ]]
then
IMG_DESC="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "params" ]]
then
IMG_PARAMS="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "re" ]]
then
IMG_RE="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "im" ]]
then
IMG_IM="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "mag" ]]
then
IMG_MAG="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "scale" ]]
then
IMG_SCALE="${LINE_VAL}"
elif [[ "${LINE_KEY}" == "iterations" ]]
then
IMG_ITER="${LINE_VAL}"
# render01, render02, etc keys are matched and re-assembled into
# one variable
elif [[ "${LINE_KEY:0:6}" == "render" ]]
then
IMG_RENDER_LINES="${IMG_RENDER_LINES}
${LINE_KEY}:${LINE_VAL}"
fi
done <<< "$(grep -v "^#" "${MANDELBROT_IMG}" | grep -v "^$")"
# sort render01, render02, etc lines then drop line keys
IMG_RENDERS="$(echo "${IMG_RENDER_LINES}" | grep -v "^$" | sort -n | cut -d : -f 2-)"
{ read -d '' MANDELBROT_IMG_HTML; }< <(generateMandelbrotImageHtml "${IMG_DATE}" "${IMG_THUMB}" "${IMG_TITLE}" "${IMG_DESC}" "${IMG_PARAMS}" "${IMG_RENDERS}" "${IMG_RE}" "${IMG_IM}" "${IMG_MAG}" "${IMG_SCALE}" "${IMG_ITER}" | grep -v "^$" | grep -v "<p></p>")
# embed newlines directly into variable
MANDELBROT_PAGE_CONTENT="${MANDELBROT_PAGE_CONTENT}
${MANDELBROT_IMG_HTML}"
# image files are named with their date, so sorting will display them on
# the page in date order
done <<< "$(find "${MANDELBROT_GALLERY_YEAR_DIR}" -type f | sort)"
MANDELBROT_PAGE_CONTENT="${MANDELBROT_PAGE_CONTENT}
</div>"
if [[ ! -f "${MANDELBROT_MD_PAGE}" ]] || [[ "`echo "${MANDELBROT_PAGE_CONTENT}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${MANDELBROT_MD_PAGE}" | cut -d ' ' -f 1`" ]]
then
echo "${MANDELBROT_PAGE_CONTENT}" > "${MANDELBROT_MD_PAGE}"
else
echo "mandelbrot gallery markdown file [${MANDELBROT_MD_PAGE}] is unchanged"
fi
done <<< "${MANDELBROT_GALLERY_YEAR_DIRS}"
#tmp comment out to find double quote syntax error
# render markdown files in their static locations, and add their headers and footers
while read PAGE_MARKDOWN_FILE
do
PAGE_DIR="$(dirname "${PAGE_MARKDOWN_FILE}" | sed 's#^.*static/*##')"
mkdir -p "${OUT_DIR}/${PAGE_DIR}"
PAGE_DIR_REL_ROOT="$(echo "${PAGE_DIR}" | sed 's#[^/][^/]*#..#g')"
if [[ -z "${PAGE_DIR_REL_ROOT}" ]]
then
PAGE_DIR_REL_ROOT="."
fi
PAGE_HTML_FILE="$(echo "${PAGE_MARKDOWN_FILE}" | sed 's#^.*static/*##' | sed 's/\.md$/.html/')"
PAGE_METADATA="$(grep -B 99 '^\[//\]: # (gen-meta-end)' "${PAGE_MARKDOWN_FILE}")"
PAGE_TITLE="$(echo "${PAGE_METADATA}" | grep -m 1 gen-title: | cut -d ' ' -f 4- | sed 's/)$//')"
PAGE_KEYWORDS="$(echo "${PAGE_METADATA}" | grep -m 1 gen-keywords: | cut -d ' ' -f 4- | sed 's/)$//')"
PAGE_DESCRIPTION="$(echo "${PAGE_METADATA}" | grep -m 1 gen-description: | cut -d ' ' -f 4- | sed 's/)$//')"
TMP_HEADER="`bash "${GEN_DIR}/header.sh" "${PAGE_TITLE}" "${PAGE_DIR_REL_ROOT}" "${PAGE_KEYWORDS}" "${PAGE_DESCRIPTION}" 30`"
TMP_CONTENT="`perl "${MARKDOWN_PERL_SCRIPT}" --html4tags "${PAGE_MARKDOWN_FILE}" | sed 's/${SITE_ROOT_REL}/../g'`"
TMP_FOOTER="`bash "${GEN_DIR}/footer.sh" "${PAGE_TITLE}" "${PAGE_DIR_REL_ROOT}"`"
# embed newline chars directly in the code here, in order to
# avoid having to use "echo -e" and mess up newlines embedded
# in the variable contents
TMP_CONTENT="${TMP_HEADER}
${TMP_CONTENT}
${TMP_FOOTER}"
PAGE_PATH_FROM_ROOT="/${PAGE_HTML_FILE}"
STATIC_HTML_FILE="${OUT_DIR}${PAGE_PATH_FROM_ROOT}"
# replace page path into page content
TMP_CONTENT="$(echo "${TMP_CONTENT}" | sed "s#REPLACE_PAGE_URL#${PAGE_PATH_FROM_ROOT}#")"
if [[ ! -f "${STATIC_HTML_FILE}" ]] || [[ "`echo "${TMP_CONTENT}" | shasum -a 256 | cut -d ' ' -f 1`" != "`shasum -a 256 "${STATIC_HTML_FILE}" | cut -d ' ' -f 1`" ]]
then
echo "static file [${STATIC_HTML_FILE}] IS changed"
echo "${TMP_CONTENT}" > "${STATIC_HTML_FILE}"
#else
# echo "static file [${STATIC_HTML_FILE}] is unchanged"
fi
# static/**/*.md files are excluded from rsync and thus are
# not copied to the out dir in the first place
#rm "${OUT_DIR}/${PAGE_DIR}/$(basename "${PAGE_MARKDOWN_FILE}")"
done <<< $(find "${GEN_DIR}/static" -type f -name "*.md" | sort -r)
# for RSS file, concatenate:
# - header
# - latest birds gallery items file
# - latest blog items file
# - footer
if [ 1 ]
then
# create array for <item> content, keyed by timestamp
# associative array
# content by year for page /<year>/index.html
declare -A ITEMS_BY_TIMESTAMP
# create variable for all timestamps, then sort
ITEM_TIMESTAMPS=""
WITHIN_ITEM="false"
THIS_ITEM_CONTENT=""
THIS_ITEM_TIMESTAMP=""
# to allow variables outside while loop to be modified from within the
# loop, we will use a "here string" (at the "done <<< ..." line) to
# provide input to the while loop
# (see https://stackoverflow.com/a/16854326/259456)
while read ITEM_LINE
do
if [[ "${ITEM_LINE}" =~ "<item>" ]]
then
THIS_ITEM_CONTENT="${ITEM_LINE}"
WITHIN_ITEM="true"
elif [[ "${ITEM_LINE}" =~ "</item>" ]]
then
THIS_ITEM_CONTENT="${THIS_ITEM_CONTENT}
${ITEM_LINE}"
if [ ! -z "${THIS_ITEM_TIMESTAMP}" ]
then
ITEMS_BY_TIMESTAMP[$THIS_ITEM_TIMESTAMP]="${THIS_ITEM_CONTENT}"
ITEM_TIMESTAMPS="${ITEM_TIMESTAMPS}
${THIS_ITEM_TIMESTAMP}"
fi
WITHIN_ITEM="false"
THIS_ITEM_TIMESTAMP=""
elif [[ "${WITHIN_ITEM}" == "true" ]]
then
THIS_ITEM_CONTENT="${THIS_ITEM_CONTENT}
${ITEM_LINE}"
if [[ "${ITEM_LINE}" =~ "<pubDate>" ]]
then
LINE_PUBDATE="$(echo "${ITEM_LINE}" | tr '<>' '\n' | grep -m 1 20)"
THIS_ITEM_TIMESTAMP="$(date -j -f '%a, %d %b %Y %H:%M:%S %z' "${LINE_PUBDATE}" +%s)"
fi
fi
done <<< "$(cat "${RSS_BIRDS_GALLERY_ITEMS}" "${RSS_BLOG_ARTICLE_ITEMS}")"
# format date according to https://www.w3.org/Protocols/rfc822/#z28
RSS_PUBLISH_DATE="$(date '+%a, %d %b %Y %H:%M:%S %z')"
echo '<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">'
echo " <channel>"
echo " <title>philthompson.me</title>"
echo " <link>${SITE_URL}</link>"
echo " <atom:link href=\"${SITE_URL}/${RSS_FINAL_FILENAME}\" rel=\"self\" type=\"application/rss+xml\" />"
echo " <description>Phil Thompson's blog and photo galleries</description>"
echo " <language>en-us</language>"
echo " <copyright>Copyright $(date +%Y) Phil Thompson, All Rights Reserved</copyright>"
echo " <pubDate>${RSS_PUBLISH_DATE}</pubDate>"
echo " <lastBuildDate>${RSS_PUBLISH_DATE}</lastBuildDate>"
# pull out <item> contents in order to create final xml
ITEM_TIMESTAMPS="$(echo "${ITEM_TIMESTAMPS}" | grep -v "^$" | sort -n)"
while read ORDERED_ITEM_TIMESTAMP
do
echo "${ITEMS_BY_TIMESTAMP[$ORDERED_ITEM_TIMESTAMP]}"
done <<< "${ITEM_TIMESTAMPS}"
#cat "${RSS_BIRDS_GALLERY_ITEMS}"
#cat "${RSS_BLOG_ARTICLE_ITEMS}"
echo " </channel>"
echo "</rss>"
fi > "${RSS_FINAL}"
| true |
e090bd5411f52b0377fdbe15455a084381bc05ed | Shell | doegox/nfc-live | /content_for_iso/driver-ifdnfc.devel/config/includes.chroot/root/live-ifdnfc.sh | UTF-8 | 4,036 | 2.875 | 3 | [] | no_license | #!/bin/bash
REVISION=c25902fe431c
git clone http://code.google.com/p/ifdnfc/ ifdnfc-dev
cd ifdnfc-dev
git checkout $REVISION
autoreconf -vis
./configure
make
mkdir -p /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Linux/
cp src/Info.plist /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/
cp src/.libs/libifdnfc.so /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Linux/libifdnfc.so.0.1.4
mkdir -p /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/etc/reader.conf.d
sed "s#TARGETNAME#`awk '/IFDNFC_READER_NAME/ {print $3}' src/ifd-nfc.h`#;\
s#TARGETPATH#/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Linux/libifdnfc.so.0.1.4#" src/reader.conf.in \
> /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/etc/reader.conf.d/ifdnfc
mkdir -p /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin
cp src/ifdnfc-activate /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin
chmod +x /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin/ifdnfc-activate
cd ..
cat > /tmp/TRANSFER/driver-ifdnfc.generated/add.sh <<EOF
#!/bin/bash
mkdir -p ../@config
rsync -av config/ ../@config
EOF
chmod 755 /tmp/TRANSFER/driver-ifdnfc.generated/add.sh
# Avoid conflict with SCL3711 driver:
cp /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist.with_scl3711
sed -e '/0x04E6\|0x5591\|SCL3711/d' /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist > /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist.without_scl3711
cp /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist.without_scl3711 /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist
# Provide helper scripts
mkdir -p /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin/
cat > /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin/scl3711-pcsc_proprio << EOF
#!/bin/bash
sudo /etc/init.d/pcscd stop
# comment out SCL3711 from ifdnfc
sudo cp /usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist.without_scl3711 /usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist
# activate SCL3711 proprietary driver
sudo mv /usr/lib/pcsc/drivers/SCx371x.bundle/Contents/Info.plist.disabled /usr/lib/pcsc/drivers/SCx371x.bundle/Contents/Info.plist 2>/dev/null
sudo /etc/init.d/pcscd start
EOF
cat > /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin/scl3711-pcsc_ifdnfc << EOF
#!/bin/bash
sudo /etc/init.d/pcscd stop
# add SCL3711 to ifdnfc
sudo cp /usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist.with_scl3711 /usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist
# deactivate SCL3711 proprietary driver
sudo mv /usr/lib/pcsc/drivers/SCx371x.bundle/Contents/Info.plist /usr/lib/pcsc/drivers/SCx371x.bundle/Contents/Info.plist.disabled 2>/dev/null
sudo /etc/init.d/pcscd start
EOF
cat > /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin/scl3711-libnfc << EOF
#!/bin/bash
sudo /etc/init.d/pcscd stop
# comment out SCL3711 from ifdnfc
sudo cp /usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist.without_scl3711 /usr/lib/pcsc/drivers/ifdnfc.bundle/Contents/Info.plist
# deactivate SCL3711 proprietary driver
sudo mv /usr/lib/pcsc/drivers/SCx371x.bundle/Contents/Info.plist /usr/lib/pcsc/drivers/SCx371x.bundle/Contents/Info.plist.disabled 2>/dev/null
sudo /etc/init.d/pcscd start
EOF
chmod 755 /tmp/TRANSFER/driver-ifdnfc.generated/config/includes.chroot/usr/local/bin/*
| true |
0bd314bbba6e17c3adc109a64f702ad0e2128373 | Shell | sarah-n-wright/gwas_pipeline | /V1/sex_check.sh | UTF-8 | 1,517 | 3.078125 | 3 | [] | no_license | source /nrnb/ukb-majithia/sarah/Git/gwas_pipeline/Configs/$1 "X"
suff=$2
no_split=$(echo ${sex_chr[@]} | grep -c "XY")
file_name=${outDir}${outName}.updated_phe
if [ $case_list != "" ]
then
file_name=${outDir}${outName}.CC
fi
echo $file_name
# Don't need this if XY region already separated?
if [ $no_split -eq 0 ]
then
srun -l plink --bfile $file_name \
--split-x no-fail $build \
--allow-no-sex \
--make-bed --out ${outDir}${outName}.sex_split${suff}
file_name=${outDir}${outName}.sex_split${suff}
fi
# LD prune
if [ 1 -eq 1 ]
then
srun -l plink --bfile $file_name \
--indep-pairphase $LD_window $LD_shift $LD_r2 \
--out ${outDir}${outName}.sex_prune${suff}
srun -l plink --bfile $file_name \
--extract ${outDir}${outName}.sex_prune${suff}.prune.in \
--make-bed --out ${outDir}${outName}.temp${suff}
echo "--------------------X chromosome pruned-------------------------"
srun -l plink --bfile ${outDir}${outName}.temp${suff} \
--check-sex $sexFmin $sexFmax \
--out ${outDir}${outName}.sex_check${suff}
echo "--------------------Sex check performed-------------------------"
srun -l python /nrnb/ukb-majithia/sarah/Git/gwas_pipeline/V1/gender_check.py ${outDir}${outName}.sex_check ${outName}${suff}
echo "--------------------Figure plotted-------------------------"
grep "PROBLEM" ${outDir}${outName}.sex_check${suff}.sexcheck | \
awk '{print $1, $2}' > ${outDir}${outName}discordant_individuals${suff}.txt
echo "--------------------Discord file created-------------------------"
fi
| true |
f7ccda572072581ee0e5a75d841148dbb5e08aba | Shell | michfarr/webp-r-ack-tice- | /bin/download-assets | UTF-8 | 408 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# Insure the asset path is available for downloads
if ! [ -d ./src/assets/static ]; then
mkdir -p ./src/assets/static
fi
download_path='./bin/downloads'
scripts=( 'images' \
'audio' )
echo """
===== ===== ===== ===== ===== ===== ===== =====
ASSETS
===== ===== ===== ===== ===== ===== ===== =====
"""
for i in "${scripts[@]}"
do
"$download_path"/"$i"
done
| true |
0334eeea096aeb302e64fa84cd021f7415f6c7d7 | Shell | magedelmalah/red-pnda | /scripts/console-backend.sh | UTF-8 | 1,672 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
cd /opt/pnda
wget https://github.com/pndaproject/platform-console-backend/archive/develop.zip
unzip develop.zip
rm develop.zip
# console-backend-utils
ln -s /opt/pnda/platform-console-backend-develop/console-backend-utils /opt/pnda/console-backend-utils
cd /opt/pnda/console-backend-utils
npm install
cd /opt/pnda
# console-backend-data-logger
ln -s /opt/pnda/platform-console-backend-develop/console-backend-data-logger /opt/pnda/console-backend-data-logger
cd /opt/pnda/console-backend-data-logger
npm install
cp -r /opt/pnda/console-backend-utils ./
# upstart script for data-logger
cp $1/files/data-logger.conf /etc/init/
sudo service data-logger start
# console-backend-data-manager
cd /opt/pnda
ln -s /opt/pnda/platform-console-backend-develop/console-backend-data-manager /opt/pnda/console-backend-data-manager
cd /opt/pnda/console-backend-data-manager
npm install
cp -r /opt/pnda/console-backend-utils ./
rm ./conf/config.js
cat <<EOF > ./conf/config.js
var hostname = process.env.HOSTNAME || 'localhost';
var whitelist = ['http://localhost', 'http://' + hostname, 'http://' + hostname + ':8006', 'http://0.0.0.0:8006'];
module.exports = {
whitelist: whitelist,
deployment_manager: {
host: "http://127.0.0.1:5000",
API: {
endpoints: "/environment/endpoints",
packages_available: "/repository/packages?recency=999",
packages: "/packages",
applications: "/applications"
}
},
dataset_manager: {
host: "http://127.0.0.1:7000",
API: {
datasets: "/api/v1/datasets"
}
}
};
EOF
# upstart script for data-manager
cp $1/files/data-manager.conf /etc/init/
sudo service data-manager start | true |
6d3675483343f8c6ecfdecbc8893d7158f48de17 | Shell | alastorid/mystuff | /getmark2.coolpc.sh | UTF-8 | 523 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
m3dphp="http://www.coolpc.com.tw/dx.php"
m3dlist="http://coolpc.com.tw/dx12.php"
PostData=`curl -sv $m3dlist|& grep -o "id=[0-9]\+ "|tr '\n' ','|sed 's/id=//g;s/ ,/,/g;s/^/N=/;s/,$//;s/$/\&S=1/;'`
curl -sv --data "$PostData" "$m3dphp" 2>/dev/null | iconv --from big5 --to utf8|sed 's/<[^>]*>/,/g; s/,\{1,\}/,/g; s/^,\|,$//g; s/°//g;s/, ,\$\([0-9]\+\),โ,โ
\(.*\)$/\2, \$\1/' |column -t -s ','
#curl -sv http://coolpc.com.tw/3d.php|& iconv --from big5 --to utf8| sed -n 's/^[^>]\+>\([^<]\+\)<.*$/\1/p'
| true |
127cb1963f2eafe74d0874546541d5e7349a7c97 | Shell | Prajwal-Prathiksh/ajit-toolchain | /tests/verification/verify.sh | UTF-8 | 726 | 3.59375 | 4 | [] | no_license | #!/usr/bin/env bash
# The command.
_CMD="validation_outer_env_v3.py -j 1 -t 20000 -C "
_CWD="`pwd`";
_OUT_FILE="$_CWD/test-results.out";
# invariant check: check if the the script is run from its location
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )";
if [[ $DIR != "`pwd`" ]]; then
echo "AD: Must run this script from its directory as './...'";
exit; # if not then exit
fi
{
echo "You can review the output in file: $_OUT_FILE";
sleep 1;
echo -e "Testing STARTED: `date`\n\n" > $_OUT_FILE;
# Verify the 32 bit ajit
_AJIT32_DIR="./ajit32/";
bash -c "$_CMD $_AJIT32_DIR |& tee $_OUT_FILE";
echo -e "\n\nYou can review the output in file: $_OUT_FILE";
} |& tee $_OUT_FILE;
| true |
b468189bb72125e56b5694096ae9e209157e6210 | Shell | omrmzv/SIFTseq | /workflow/scripts/trim/BBDUK_WRAP_v2.sh | UTF-8 | 2,385 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
for i in "$@"
do
case $i in
--bbduk_path=*)
BBDUK=${i#*=}
;;
--seq_type=*)
SEQ_TYPE=${i#*=}
;;
--prep_type=*)
PREP_TYPE=${i#*=}
;;
--adapter_file=*)
ADAPTOR_SEQ=${i#*=}
;;
--mem_mb=*)
MEM=${i#*=}
;;
--threads=*)
THREADS=${i#*=}
;;
--maq=*)
MAQ=${i#*=}
;;
--entropy=*)
ENTROPY=${i#*=}
;;
--log_file=*)
LOG=${i#*=}
;;
--R1=*)
R1=${i#*=}
shift
;;
--R1_trim=*)
R1_trim=${i#*=}
shift
;;
--R2=*)
R2=${i#*=}
shift
;;
--R2_trim=*)
R2_trim=${i#*=}
;;
esac
done
if [[ $R2 == "" ]]; then
R2_trim="${R1_trim/R1/R2}"
fi
if [[ $PREP_TYPE == "MEYER_SSLP" ]] && [[ $SEQ_TYPE == "2x75" ]]
then
$BBDUK in1=$R1 in2=$R2 out1=$R1_trim out2=$R2_trim -Xmx1g -threads=$THREADS ref=$ADAPTOR_SEQ maq=$MAQ entropy=$ENTROPY tbo tpe &>$LOG
elif [[ $PREP_TYPE == "MEYER_SRSLY" ]] && [[ $SEQ_TYPE == "2x75" ]]
then
$BBDUK in1=$R1 in2=$R2 out1=$R1_trim out2=$R2_trim -Xmx1g -threads=$THREADS ref=$ADAPTOR_SEQ maq=$MAQ entropy=$ENTROPY tbo tpe &>$LOG
elif [[ $PREP_TYPE == "SRSLY_SSLP" ]] && [[ $SEQ_TYPE == "2x75" ]]
then
$BBDUK in1=$R1 in2=$R2 out1=$R1_trim out2=$R2_trim -Xmx1g -threads=$THREADS ref=$ADAPTOR_SEQ maq=$MAQ entropy=$ENTROPY tbo tpe &> $LOG
elif [[ $PREP_TYPE == "SWIFT_ACCEL" ]] && [[ $SEQ_TYPE == 2x1* ]] #covers paired end of type 2x100 or 2x150bp
then
$BBDUK in1=$R1 in2=$R2 out1=$R1.tmp.fastq out2=$R2.tmp.fastq -Xmx1g -threads=$THREADS ftr=74 &> $LOG
$BBDUK in1=$R1.tmp.fastq in2=$R2.tmp.fastq out1=$R1_trim out2=$R2_trim -Xmx1g -threads=$THREADS ref=$ADAPTOR_SEQ maq=$MAQ entropy=$ENTROPY tbo tpe swift=t mink=11 ktrim=r &>> $LOG
rm $R1.tmp.fastq $R2.tmp.fastq
elif [[ $PREP_TYPE == "SWIFT_ACCEL" ]] && [[ $SEQ_TYPE == "2x75" ]]
then
$BBDUK in1=$R1 in2=$R2 out1=$R1_trim out2=$R2_trim -Xmx1g -threads=$THREADS ref=$ADAPTOR_SEQ maq=$MAQ entropy=$ENTROPY tbo tpe swift=t mink=11 ktrim=r &>> $LOG
elif [[ $PREP_TYPE == "SWIFT_ACCEL" ]] && [[ $SEQ_TYPE == "1x75" ]]
then
$BBDUK in=$R1 out=$R1_trim ref=$ADAPTOR_SEQ maq=$MAQ entropy=$ENTROPY swift=t mink=11 ktrim=r &> $LOG
echo "This was processed as a single-end dataset so this is just a fun placeholder file" > $R2_trim #the lazyest of ways ...
else
echo "Error. Library type not found for $SEQ_TYPE and $PREP_TYPE"
exit 1
fi
| true |
a49c41d5bb29c33b8d067927180270800280c21c | Shell | dhamotharang/whiz-orders-dashboard | /bin/deploy_whiz-orders-dashboard.sh | UTF-8 | 804 | 3.46875 | 3 | [] | no_license | #!/bin/bash
#
#
set -ex
# to retain all backups, set this value to a non-positive integer
export bucket=whiz-orders-angular-builds
export build_location=whiz-orders-angular-build
#get the last uploaded zip filename
build_name=`aws s3 ls s3://$bucket | tail -1 | awk '{print $4}'`
#get the file name from .zip file
filename="${build_name%%.*}"
#copy the file from s3 bucket to local
aws s3 cp s3://$bucket/$build_name .
#unzip the file
unzip $build_name -d ~/temp
rm -fR ~/$build-location
#copy to the build location
mv ~/temp/$filename/architectui-angular-pro ~/$build-location
rm -fR ~/temp
rm -fR build_name
sudo service nginx reload
echo 'Removing the old build'
build_to_remove=`aws s3 ls s3://$bucket | awk 'FNR <= 1' | awk '{print $4}'`
aws s3 rm s3://$bucket/$build_to_remove
exit 0
| true |
46a8b3d49547671bd0e5a8cb8dcd2dd268b4bbeb | Shell | studost/dotfiles | /home/studo/.bash_aliases | UTF-8 | 2,330 | 2.78125 | 3 | [
"MIT"
] | permissive | # /etc/bash.bashrc.local for SuSE Linux
#
# Set some generic aliases
#
# 2013-08-12
# 2013-08-12
# 2013-08-12
# 11 2013-08-12
# 12 2013-08-12
alias l='ls -CF'
alias la='ls -A'
alias ll='ls -haltr --group-directories-first'
alias lll='ls -hal --color=no --group-directories-first'
alias lld='ls -halF -d */'
alias ls='ls --color=auto'
alias ltr='ls -haltr --color=auto'
alias ..='cd ..'
alias ...='cd ../..'
alias mv='mv -v'
alias cp='cp -v'
alias rm='rm -v'
alias whcih='which'
# alias svup='sudo zypper ref && sudo zypper up'
# alias svre="sudo /etc/init.d/snagview restart"
alias gi="~/git-info.sh"
alias gis="~/projects/studo/misc/git/git_status"
alias gitt="~/projects/studo/misc/git/gitt"
# alias ssh="~/projects/studo/misc/utils/ush"
alias sshs='ssh -D 12345 -f -C -q -N fcfrmonitos02 && /cygdrive/c/Program\ Files\ (x86)/Mozilla\ Firefox/firefox.exe &'
alias getline='cat > /tmp/getline.tmp && . /mnt/c/projects/studo/misc/utils/getline.sh'
# alias sm='svn mv'
#
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias grep='grep --color=auto'
#
alias hsp+='cd ~/projects/'
# 2015_08_22
alias df='df -hT'
# dat#
alias vmrun='/cygdrive/c/Program\ Files\ \(x86\)/VMware/VMware\ Workstation/vmrun.exe'
# alias ping='/cygdrive/c/Windows/System32/ping -t'
alias open='cygstart'
alias gvim='/cygdrive/c/Program\ Files\ \(x86\)/Vim/vim80/gvim.exe'
alias vera='/cygdrive/c/Program\ Files/VeraCrypt/VeraCrypt.exe'
alias chrome='/cygdrive/c/Program\ Files\ \(x86\)/Google/Chrome/Application/chrome.exe'
# alias ifconfig='ipconfig -all'
# alias n+='/usr/local/nagios/'
# alias nl3+='/opt/snag-view/appliance/nagios/libexec/'
# alias sv+='ll /opt/snag-view/'
# alias svdl+='/opt/snag-view/data/logs/'
# alias tc='truecrypt -t -k "" --protect-hidden=no'
# alias tcd='truecrypt -d'
# alias vi='vim'
# 2009-08-26, UST
# If id command returns zero, youโve root access.
#--------------------------------------------------
# if [ $(id -u) -eq 0 ];
# then # you are root, set red colour prompt
# PS1="\\[$(tput setaf 1)\\]\\u@\\h:\\w # \\[$(tput sgr0)\\]"
# else # normal
# PS1="[\\u@\\h:\\w] $"
# fi
#--------------------------------------------------
| true |
cc65b0af856ff1b4669796b5b11fb6b11d68f3ef | Shell | pacificclimate/data-prep-actions | /actions/vic-gen2-metadata/fill-values.sh | UTF-8 | 968 | 3.234375 | 3 | [] | no_license | #!/bin/bash
localdir="/local_temp/lzeman/ncatted"
for var in BASEFLOW EVAP GLAC_AREA GLAC_MBAL GLAC_OUTFLOW PET_NATVEG PREC RAINF RUNOFF SNOW_MELT SOIL_MOIST_TOT SWE TRANSP_VEG
do
echo "$(date) Now processing $var files"
for file in /storage/data/projects/hydrology/dataportal/CMIP5/VICGL/*$var*.nc
do
echo " Now processing $file"
echo " $(date) now copying $file to $localdir"
base=$(basename $file)
cp $file $localdir/$base
echo " $(date) now updating attributes in $file"
ncatted -a _FillValue,$var,m,s,-32767 -a _FillValue,lat,d,, -a _FillValue,lon,d,, -a _FillValue,time,d,, $localdir/$base $localdir/$base.att
echo " $(date) now converting $file to netcdf4"
nccopy -k 3 $localdir/$base.att $localdir/$base.4
echo " $(date) Now copying to /storage"
cp $localdir/$base.4 $file
echo " $(date) now cleaning up"
rm $localdir/$base
rm $localdir/$base.att
rm $localdir/$base.4
done
done
| true |
4dbab2455c0a8783ea99871f9962e6604e0228e3 | Shell | bismayswain/endsem | /assignment_8/assignment8/runtest.sh | UTF-8 | 301 | 3.234375 | 3 | [] | no_license | #!/bin/bash
for elements in $(cat params.txt);
do
for thread_no in $(cat threads.txt)
do
for i in {1..10}
do
outfile='r'$thread_no'.out'
A=`./prog.ex $elements $thread_no`
#echo $A
num=$(echo "$A" | tr -dc '0-9');
echo "$elements $thread_no $num" &>> $outfile
done
done
done
| true |
a693da12e97558bafb09d71b7effcca706f5cdc4 | Shell | gowong/prezto | /runcoms/zshrc | UTF-8 | 5,562 | 2.671875 | 3 | [
"MIT"
] | permissive | #
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
# Immediately execute commands from history (ex. !150)
setopt no_hist_verify
# Aliases
# alias dev='ssh gwong.dev.box.net -R "9000:localhost:9000"'
alias a='cd /box/etc/application'
alias i='cd /Users/gwong/workspace/infra'
alias de='cd /Users/gwong/deployment-config'
alias n='cd /box/www/box-notes'
alias nn='cd /box/www/box-notes-desktop'
alias c='cd /box/www/current_local'
alias sshn='ssh -t notes-app-gwong "cat /etc/motd; cd /home/gwong/box-notes; bash;"'
alias sshb='ssh bastion'
alias ll='ls -la'
alias work='echo "Use werk"'
alias history='history -f -100'
# On Mac: requires 'brew install coreutils'
alias readlink='greadlink'
# Always loading nvm slows down terminal startup
alias loadnvm='source $(brew --prefix nvm)/nvm.sh'
# begin devtools provisioning to setup local development
# Remove any existing /box/www/devtools_readonly/bin from the path
PATH=$(echo $PATH | sed 's/\/box\/www\/devtools_readonly\/bin[:]*//g')
# Remove any existing /usr/local/bin from the path
PATH=$(echo $PATH | sed 's/\/usr\/local\/bin[:]*//g')
# Add /usr/local/bin and /box/www/devtools_readonly/bin to the top of your PATH
export PATH=/usr/local/bin:/box/www/devtools_readonly/bin:$PATH
# end devtools provisioning
# Env variables
# Use vim as default editor
export VISUAL="vim"
# Java
export JAVA_8_HOME=$(/usr/libexec/java_home -v1.8)
export JAVA_11_HOME=$(/usr/libexec/java_home -v11)
export JAVA_17_HOME=$(/usr/libexec/java_home -v17)
alias java8='export JAVA_HOME=$JAVA_8_HOME'
alias java11='export JAVA_HOME=$JAVA_11_HOME'
alias java17='export JAVA_HOME=$JAVA_17_HOME'
# Use JDK 8 by default.
export JAVA_HOME=$JAVA_8_HOME
#export JAVA_HOME=$JAVA_11_HOME
#export JAVA_HOME=$JAVA_17_HOME
export NVM_DIR="$HOME/.nvm"
# Use latest (homebrew) version of bison (used to build thrift compiler
# from source, which is then used to build source code from thrift file)
# export PATH="/usr/local/opt/bison/bin:$PATH"
# export LDFLAGS="-L/usr/local/opt/bison/lib-L/usr/local/opt/openssl/lib"
# export CPPFLAGS="-I/usr/local/opt/openssl/include"
# export BOOST_ROOT="/usr/local/opt/boost"
# PATH
# Android NDK
#export PATH="$PATH:/Users/gwong/android-ndk-r17b"
# VS Code
export PATH="$PATH:/Applications/Visual Studio Code.app/Contents/Resources/app/bin"
# Misc.
export PATH="$PATH:/Users/gwong/bin"
# yarn (not needed with npm install yarn)
# export PATH="$HOME/.yarn/bin:$HOME/.config/yarn/global/node_modules/.bin:$PATH"
# ruby
export PATH="/usr/local/opt/ruby/bin:$PATH"
# FPM (ruby gem)
# export PATH="/usr/local/lib/ruby/gems/2.7.0/gems/fpm-1.14.2/bin:$PATH"
# PHP
export PATH="/opt/homebrew/opt/php@8.0/bin:$PATH"
export PATH="/opt/homebrew/opt/php@8.0/sbin:$PATH"
# Python
# export PATH="/usr/local/opt/python/libexec/bin:$PATH" # Not needed with brew install python
alias python=python3
# Force to use python 2 (Doesn't work with newer MacOS versions that no longer bundle python2)
# alias python3=python
# rsync
export PATH="/opt/homebrew/opt/rsync/bin:$PATH"
export DEVPOD_RSYNC_OPTIONS='-i ~/.ssh/id_rsa_devpod'
# Terminal colors
if [[ -n "$ITERM_SESSION_ID" ]]; then
tab-color() {
echo -ne "\033]6;1;bg;red;brightness;$1\a"
echo -ne "\033]6;1;bg;green;brightness;$2\a"
echo -ne "\033]6;1;bg;blue;brightness;$3\a"
}
tab-red() { tab-color 255 0 0 }
tab-green() { tab-color 0 255 0 }
tab-blue() { tab-color 0 0 255 }
tab-reset() { echo -ne "\033]6;1;bg;*;default\a" }
function iterm2_tab_precmd() {
# if [[ "$PWD" == "/box/www/box-notes" ]]; then
# tab-color 87 199 255
# elif [[ "$PWD" == "/box/www/current_local" ]]; then
# tab-color 255 106 193
# else
tab-reset
# fi
}
function iterm2_tab_preexec() {
if [[ "$1" =~ "^ssh " ]]; then
if [[ "$1" =~ "bastion" ]]; then
tab-color 255 92 87
elif [[ "$1" =~ "staging" ]]; then
tab-color 243 249 157
elif [[ "$1" =~ "dev" ]]; then
tab-color 255 106 193
fi
elif [[ "$1" =~ "^sshn" ]]; then
tab-color 87 199 255
elif [[ "$1" =~ "^sshb" ]]; then
tab-color 255 92 87
else
tab-reset
fi
}
autoload -U add-zsh-hook
add-zsh-hook precmd iterm2_tab_precmd
add-zsh-hook preexec iterm2_tab_preexec
fi
# Add confirmation to dangerous commands
git() {
if [[ $@ == "stash clear" ]]; then
echo "Are you sure you want to drop ALL stashes? [y/N]"
read response
case "$response" in
[yY][eE][sS]|[yY])
command git "$@"
;;
esac
elif [[ $@ == "stash drop" ]]; then
echo "Are you sure you want to drop your stash? [y/N]"
read response
case "$response" in
[yY][eE][sS]|[yY])
command git "$@"
;;
esac
else
command git "$@"
fi
}
# The next line updates PATH for the Google Cloud SDK.
if [ -f '/Users/gwong/google-cloud-sdk/path.zsh.inc' ]; then . '/Users/gwong/google-cloud-sdk/path.zsh.inc'; fi
# The next line enables shell command completion for gcloud.
if [ -f '/Users/gwong/google-cloud-sdk/completion.zsh.inc' ]; then . '/Users/gwong/google-cloud-sdk/completion.zsh.inc'; fi
| true |
564cf767731e11177a82cf9b4a3fcb9f20ecc4bc | Shell | ithoq/afrimesh | /provisiond-bundles/mp01.handset/etc/init.d/asterisk | UTF-8 | 5,518 | 3.078125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh /etc/rc.common
# Copyright (C) 2006 OpenWrt.org
START=50
DEST=
DEFAULT=$DEST/etc/default/asterisk
OPTIONS=""
is_false() {
[ "$1" != "true" ] && [ "$1" != "on" ] && [ "$1" != "1" ] && [ "$1" != "yes" ]
}
append_sip_parm() {
local section="$1"
local option="$2"
local default="$3"
local key="$4"
if [ -z "$key" ] ; then
key="$option"
fi
local value
config_get value "$section" "$option"
if [ -z "$value" ] ; then
value="$default"
fi
if [ ! -z "$key" ] && [ ! -z "$value" ] ; then
append sipconf "\n$key=$value"
fi
}
append_sip_entry() {
local cfg="$1"
append sipconf "\n\n[$cfg]"
append_sip_parm "$cfg" "context" "default"
append_sip_parm "$cfg" "type" "peer"
append_sip_parm "$cfg" "permit" "10.130.1.1"
append_sip_parm "$cfg" "host" "10.130.1.1"
append_sip_parm "$cfg" "port" ""
append_sip_parm "$cfg" "username" ""
#append_sip_parm "$cfg" "username" "" "authuser"
#append_sip_parm "$cfg" "username" "" "fromuser"
append_sip_parm "$cfg" "secret" ""
#append_sip_parm "$cfg" "host" "" "fromdomain"
append_sip_parm "$cfg" "nat" ""
append_sip_parm "$cfg" "dtmfmode" ""
append_sip_parm "$cfg" "bindaddr" ""
append_sip_parm "$cfg" "bindport" ""
append_sip_parm "$cfg" "insecure" ""
append_sip_parm "$cfg" "callerid" ""
append_sip_parm "$cfg" "canreinvite" ""
append_sip_parm "$cfg" "allowguest" ""
append_sip_parm "$cfg" "trustrpid" ""
append_sip_parm "$cfg" "sendrpid" ""
append_sip_parm "$cfg" "disallow" ""
append_sip_parm "$cfg" "allow" ""
append_sip_parm "$cfg" "qualify" "yes"
#append sipconf "\ndisallow=all"
#append sipconf "\nallow=$(uci get asterisk.@sipgeneral[0].allow)"
}
configure_sip() {
local sipremote="$1"
local sipextension="$2"
local sipcodec="$3"
# build & write out potato.sip.conf
[ $(grep -c "#include \"potato\.sip\.conf\"" /etc/asterisk/sip.conf) -eq 0 ] && \
echo "#include \"potato.sip.conf\"" >> /etc/asterisk/sip.conf
local sipconf=""
config_foreach append_sip_entry sip
echo -e "register => ${sipremote}" > /etc/asterisk/potato.sip.conf # TODO - register incoming sip providers for all sip entries
echo -e "${sipconf}" >> /etc/asterisk/potato.sip.conf
# build & write out potato.extensions.conf
[ $(grep -c "#include \"potato\.extensions\.conf\"" /etc/asterisk/extensions.conf) -eq 0 ] && \
echo "#include \"potato.extensions.conf\"" >> /etc/asterisk/extensions.conf
cat > /etc/asterisk/potato.extensions.conf << EOF
; VT dial rules to support SIP client functionality
[villagetelco-incoming]
exten => s,1,Dial(MP/1) ; answer incoming SIP calls
EOF
#; exten => ${sipextension},1,Dial(MP/1)
#; exten => _XXXX,1,Dial(SIP/\${EXTEN}@villagetelco,30,r) ; route 4 digit extensions to SIP provider
#; exten => _0.,1,Dial(SIP/\${EXTEN}@villagetelco,30,r) ; trunk everything after '0' through SIP provider
}
configure_asterisk() {
# load asterisk configuration from UCI
config_load afrimesh
#config_get sip_enable "voip" "sip"
sip_enable=1 # provisioned potatos always have sip enabled
config_get iax_enable "voip" "iax"
# load asterisk configuration from UCI
config_load asterisk
# clear any existing potato configuration
echo > /etc/asterisk/potato.sip.conf
echo > /etc/asterisk/potato.extensions.conf
# configure sip trunks
if is_false $sip_enable ; then
return
else
config_get host "villagetelco" "host"
config_get username "villagetelco" "username"
config_get secret "villagetelco" "secret"
config_get codec "villagetelco" "allow"
#configure_sip "${username}:${secret}@${host}/${username}" "${username}" "${codec}"
configure_sip "${username}:${secret}@${host}" "${username}" "${codec}"
fi
}
restart() {
# kill asterisk
[ -f $DEST/var/run/asterisk.pid ] && kill $(cat $DEST/var/run/asterisk.pid) >/dev/null 2>&1
sleep 2
# reconfigure asterisk
configure_asterisk
# restart asterisk
/usr/sbin/asterisk -f 2>&1 > /dev/null &
}
reload() {
# reconfigure asterisk
configure_asterisk
# reload dialplan
/usr/sbin/asterisk -rx 'reload' > /dev/null
# it takes asterisk a few moments to update peer status
sleep 4
}
start() {
# create asterisk directories
[ -f $DEFAULT ] && . $DEFAULT
[ -d $DEST/var/run ] || mkdir -p $DEST/var/run
[ -d $DEST/var/log/asterisk ] || mkdir -p $DEST/var/log/asterisk
[ -d $DEST/var/spool/asterisk ] || mkdir -p $DEST/var/spool/asterisk
[ -d /var/spool/asterisk ] || mkdir -p /var/spool/asterisk
[ -h $DEST/usr/lib/asterisk/astdb ] || ln -sf /var/spool/asterisk/astdb $DEST/usr/lib/asterisk/astdb
# create device nodes if needed
[ ! -c /dev/8250mp ] && mknod -m 666 /dev/8250mp c 33 0
[ ! -c /dev/mp ] && mknod -m 666 /dev/mp c 34 0
# load kernel modules if needed
[ $(cut -d ' ' -f 1 /proc/modules | grep -c "^serial_core") -eq 0 ] && insmod /usr/lib/serial_core.ko
[ $(cut -d ' ' -f 1 /proc/modules | grep -c "^8250mp") -eq 0 ] && insmod /usr/lib/8250mp.ko
[ $(cut -d ' ' -f 1 /proc/modules | grep -c "^mp") -eq 0 ] && insmod /usr/lib/mp.ko
sleep 1
# generate the includes for asterisk configuration
configure_asterisk
# start up asterisk
/usr/sbin/asterisk -f 2>&1 > /dev/null &
}
stop() {
[ -f $DEST/var/run/asterisk.pid ] && kill $(cat $DEST/var/run/asterisk.pid) >/dev/null 2>&1
}
| true |
1528937b339bdddfc39f8ec27149718ed8b6efce | Shell | JeromeFitz/dotfiles | /scripts/bootstrap | UTF-8 | 4,747 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env bash
#
# bootstrap installs things.
cd "$(dirname "$0")/.."
BREWFILE_TYPE="Brewfile"
DOTFILES_ROOT=$(pwd -P)
PKG_NAME="@jeromefitz/dotfiles"
set -e
while test $# -gt 0; do
case "$1" in
"-s"|"--slim")
BREWFILE_TYPE="Brewfile-Slim"
# shift
;;
*)
echo "Invalid option: $1"
exit
;;
esac
shift
done
if which node > /dev/null; then
PKG_NAME=$(node -pe "require('./package.json')['name']")
fi
# @note(PKG) variable setting for outputs
if [[ ! -d ".git" ]] || test ! $(which yarn); then
PKG_NAME="๐๏ธ $PKG_NAME"
PKG_VERSION="x.y.z (init)"
else
PKG_NAME="๐ฆ๏ธ $PKG_NAME"
PKG_VERSION="$(git fetch origin --tags -f -q && git describe --tags --abbrev=0 | sed 's/v//')"
fi
msg () {
printf "\r [ \033[00;34m$1\033[0m ] $2\n"
}
info () {
printf "\r [ \033[00;34m..\033[0m ] $1\n"
}
user () {
printf "\r [ \033[0;33m๐๏ธ\033[0m ] $1\n"
}
success () {
printf "\r\033[2K [ \033[00;32mOK\033[0m ] $1\n"
}
fail () {
printf "\r\033[2K [\033[0;31mFAIL\033[0m] $1\n"
echo ''
exit
}
setup_gitconfig () {
if ! [ -f symlinks/gitconfig.private.symlink ]
then
msg ๐ ๏ธ "gitconfig"
git_credential='cache'
if [ "$(uname -s)" == "Darwin" ]
then
git_credential='osxkeychain'
fi
user ' - ๐บ๏ธ [git] What is your username? ๐๏ธ'
read -e git_username
user ' - ๐ธ๏ธ [git] What is your author name? ๐๏ธ'
read -e git_authorname
user ' - ๐น๏ธ [git] What is your author email? ๐๏ธ'
read -e git_authoremail
sed -e "s/USERNAME/$git_username/g" -e "s/AUTHORNAME/$git_authorname/g" -e "s/AUTHOREMAIL/$git_authoremail/g" -e "s/GIT_CREDENTIAL_HELPER/$git_credential/g" $DOTFILES_ROOT/symlinks/gitconfig.private.symlink.example > $DOTFILES_ROOT/symlinks/gitconfig.private.symlink
success "๐ ๏ธ gitconfig"
msg ๐ ๏ธ "complete"
info ""
fi
}
link_file () {
local src=$1 dst=$2
local overwrite= backup= skip=
local action=
if [ -f "$dst" -o -d "$dst" -o -L "$dst" ]
then
if [ "$overwrite_all" == "false" ] && [ "$backup_all" == "false" ] && [ "$skip_all" == "false" ]
then
local currentSrc="$(readlink $dst)"
if [ "$currentSrc" == "$src" ]
then
skip=true;
else
user "File already exists: $dst ($(basename "$src")), what do you want to do?\n\
[s]kip, [S]kip all, [o]verwrite, [O]verwrite all, [b]ackup, [B]ackup all?"
read -n 1 action
case "$action" in
o )
overwrite=true;;
O )
overwrite_all=true;;
b )
backup=true;;
B )
backup_all=true;;
s )
skip=true;;
S )
skip_all=true;;
* )
;;
esac
fi
fi
overwrite=${overwrite:-$overwrite_all}
backup=${backup:-$backup_all}
skip=${skip:-$skip_all}
if [ "$overwrite" == "true" ]
then
rm -rf "$dst"
success "๐ฅ๏ธ removed: $dst"
fi
if [ "$backup" == "true" ]
then
mv "$dst" "${dst}.backup"
success "๐๏ธ moved: $dst -> ${dst}.backup"
fi
if [ "$skip" == "true" ]
then
success "๐ป๏ธ skipped: $src"
fi
fi
if [ "$skip" != "true" ] # "false" or empty
then
ln -s "$1" "$2"
success "๐๏ธ linked: $1 -> $2"
fi
}
install_dotfiles () {
msg ๐ฝ๏ธ "dotfiles"
local overwrite_all=false backup_all=false skip_all=false
msg ๐ฝ๏ธ "โบ ๐๏ธ symlinks"
for src in $(find -H "$DOTFILES_ROOT" -maxdepth 2 -name '*.symlink' -not -path '*.git*' | sort)
do
dst="$HOME/.$(basename "${src%.*}")"
link_file "$src" "$dst"
done
#
# vim.install.sh
for f in {ftdetect,ftplugin}; do
mkdir -p "$HOME/.vim/$f"
for src in $(find -H "$DOTFILES_ROOT/symlinks/vim/$f" -maxdepth 2 -name '*.vim' | sort)
do
dst="$HOME/.vim/$f/$(basename "${src%.*}.vim")"
link_file "$src" "$dst"
done
done
success "โบ ๐๏ธ symlinks"
success "๐ฝ๏ธ dotfiles"
msg ๐ฝ๏ธ "complete"
info ""
}
install_homebrew () {
# If we're on a Mac, let's install and setup homebrew.
if [ "$(uname -s)" == "Darwin" ]
then
msg ๐บ๏ธ "Homebrew"
if source bin/dot -t $BREWFILE_TYPE | while read -r data; do info "$data"; done
then
success "โบ ๐ป๏ธ dependencies"
else
fail "โบ ๐คฎ๏ธ dependencies"
fi
success "๐บ๏ธ Homebrew"
msg ๐บ๏ธ "complete"
info ""
fi
}
msg ๐ค๏ธ ""
msg ๐ค๏ธ "$PKG_NAME@$PKG_VERSION"
msg ๐ค๏ธ "๐บ๏ธ $BREWFILE_TYPE"
msg ๐ค๏ธ ""
setup_gitconfig
install_dotfiles
install_homebrew
msg ๐๏ธ ""
msg ๐๏ธ "$PKG_NAME@$PKG_VERSION"
msg ๐๏ธ ""
| true |
1cf36253dbbd4cd532bd1deeddc7307113b3afc5 | Shell | jmackey-astro/PION | /test_problems/untested/RT_1D_rec/run_A1A3_tests.sh | UTF-8 | 14,605 | 3.125 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
test_dir=${1}/RT_1D_rec
code_dir=$2
DATA=$3/RT_1D_rec
mkdir $DATA
cd ${code_dir}
echo "MAKE IN" $code_dir
# compile the code
./compile_code.sh
if [ ! -f ../bin/main_serial ] || [ ! -f ../bin/icgen_serial ]
then
echo "Cannot compile code"
exit
else
echo "MAKE SUCEEDED"
fi
cd $test_dir
rm icgen_serial main_serial
bin_dir=${code_dir}/../bin
cp ${bin_dir}/icgen_serial $test_dir/icgen_serial
cp ${bin_dir}/main_serial $test_dir/main_serial
A1BASE01=SS1D_nh1_dT01_A1
A1BASE02=SS1D_nh1_dT03_A1
A1BASE03=SS1D_nh1_dT10_A1
A1BASE04=SS1D_nh1_dT30_A1
A1BASE11=SS1D_nh2_dT01_A1
A1BASE12=SS1D_nh2_dT03_A1
A1BASE13=SS1D_nh2_dT10_A1
A1BASE14=SS1D_nh2_dT30_A1
A1BASE21=SS1D_nh3_dT01_A1
A1BASE22=SS1D_nh3_dT03_A1
A1BASE23=SS1D_nh3_dT10_A1
A1BASE24=SS1D_nh3_dT30_A1
### TEMP TEMP
################
# make plots #
################
### TEMP TEMP
TSTEP=( [0]=dt00 [1]=dt01 [2]=dt02 [3]=dt03 [4]=dt04 [5]=dt05 [6]=dt06 [7]=dt07 [8]=dt08 [9]=dt09 [10]=dt10 [11]=dt11 [12]=dt12 )
rm ${DATA}/*SS1D*A1*dt05*
# Generate initial conditions
./icgen_serial pf_${A1BASE01}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE02}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE03}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE04}.txt silo redirect=msg_temp
#
./icgen_serial pf_${A1BASE11}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE12}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE13}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE14}.txt silo redirect=msg_temp
#
./icgen_serial pf_${A1BASE21}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE22}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE23}.txt silo redirect=msg_temp
./icgen_serial pf_${A1BASE24}.txt silo redirect=msg_temp
#
rm msg_temp*
##############################################
# run models with A1-dt05. #
##############################################
EXE=./main_serial
TSTEP=( [0]=dt00 [1]=dt01 [2]=dt02 [3]=dt03 [4]=dt04 [5]=dt05 [6]=dt06 [7]=dt07 [8]=dt08 [9]=dt09 [10]=dt10 [11]=dt11 [12]=dt12 )
for ii in 5
do
OPF=1
if [ $ii -eq 0 ]; then OPF=1; fi
if [ $ii -eq 1 ]; then OPF=1; fi
if [ $ii -eq 2 ]; then OPF=1; fi
if [ $ii -eq 3 ]; then OPF=1; fi
if [ $ii -eq 4 ]; then OPF=5; fi
if [ $ii -eq 5 ]; then OPF=10; fi
if [ $ii -eq 6 ]; then OPF=20; fi
if [ $ii -eq 7 ]; then OPF=40; fi
if [ $ii -eq 8 ]; then OPF=80; fi
if [ $ii -eq 9 ]; then OPF=10; fi
if [ $ii -eq 10 ]; then OPF=20; fi
if [ $ii -eq 11 ]; then OPF=40; fi
if [ $ii -eq 12 ]; then OPF=80; fi
TS=''
if [ $ii -ge 5 ]; then TS='limit_timestep=5'; fi
#
${EXE} IC_${A1BASE01}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE01}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE01}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE11}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE11}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE11}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE21}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE21}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE21}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
#
${EXE} IC_${A1BASE02}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE02}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE02}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE12}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE12}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE12}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE22}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE22}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE22}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
#
${EXE} IC_${A1BASE03}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE03}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE03}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE13}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE13}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE13}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE23}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE23}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE23}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
#
${EXE} IC_${A1BASE04}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE04}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE04}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE14}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE14}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE14}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE} IC_${A1BASE24}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A1BASE24}_${TSTEP[ii]} redirect=${DATA}/msg_${A1BASE24}_${TSTEP[ii]}_ $TS optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
done
#exit
############################################
# calculate IF radius as function of time. #
############################################
#
for ii in 5
do
OPF=1
if [ $ii -eq 0 ]; then OPF=1; fi
if [ $ii -eq 1 ]; then OPF=1; fi
if [ $ii -eq 2 ]; then OPF=1; fi
if [ $ii -eq 3 ]; then OPF=3; fi
if [ $ii -eq 4 ]; then OPF=30; fi
if [ $ii -eq 5 ]; then OPF=10; fi
if [ $ii -eq 6 ]; then OPF=20; fi
if [ $ii -eq 7 ]; then OPF=40; fi
if [ $ii -eq 8 ]; then OPF=80; fi
if [ $ii -eq 9 ]; then OPF=10; fi
if [ $ii -eq 10 ]; then OPF=20; fi
if [ $ii -eq 11 ]; then OPF=40; fi
if [ $ii -eq 12 ]; then OPF=80; fi
#
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE01}_${TSTEP[ii]} ${DATA}/${A1BASE01}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE02}_${TSTEP[ii]} ${DATA}/${A1BASE02}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE03}_${TSTEP[ii]} ${DATA}/${A1BASE03}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE04}_${TSTEP[ii]} ${DATA}/${A1BASE04}_${TSTEP[ii]} 0 $OPF 5 silo
#
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE11}_${TSTEP[ii]} ${DATA}/${A1BASE11}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE12}_${TSTEP[ii]} ${DATA}/${A1BASE12}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE13}_${TSTEP[ii]} ${DATA}/${A1BASE13}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE14}_${TSTEP[ii]} ${DATA}/${A1BASE14}_${TSTEP[ii]} 0 $OPF 5 silo
#
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE21}_${TSTEP[ii]} ${DATA}/${A1BASE21}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE22}_${TSTEP[ii]} ${DATA}/${A1BASE22}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE23}_${TSTEP[ii]} ${DATA}/${A1BASE23}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A1BASE24}_${TSTEP[ii]} ${DATA}/${A1BASE24}_${TSTEP[ii]} 0 $OPF 5 silo
#
done
##################################################################
############## -------------------------------- ##################
############## NOW DO THE SAME AGAIN FOR A3 ##################
############## -------------------------------- ##################
##################################################################
A3BASE01=SS1D_nh1_dT01_A3
A3BASE02=SS1D_nh1_dT03_A3
A3BASE03=SS1D_nh1_dT10_A3
A3BASE04=SS1D_nh1_dT30_A3
A3BASE11=SS1D_nh2_dT01_A3
A3BASE12=SS1D_nh2_dT03_A3
A3BASE13=SS1D_nh2_dT10_A3
A3BASE14=SS1D_nh2_dT30_A3
A3BASE21=SS1D_nh3_dT01_A3
A3BASE22=SS1D_nh3_dT03_A3
A3BASE23=SS1D_nh3_dT10_A3
A3BASE24=SS1D_nh3_dT30_A3
TSTEP=( [0]=dt00 [1]=dt01 [2]=dt02 [3]=dt03 [4]=dt04 [5]=dt05 [6]=dt06 [7]=dt07 [8]=dt08 [9]=dt09 [10]=dt10 [11]=dt11 [12]=dt12 )
echo "rm ${DATA}/*A3*dt02*"
rm ${DATA}/*SS1D*A3*dt02*
# Generate initial conditions
./icgen_serial pf_${A3BASE01}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE02}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE03}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE04}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE11}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE12}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE13}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE14}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE21}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE22}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE23}.txt silo redirect=msg_temp
./icgen_serial pf_${A3BASE24}.txt silo redirect=msg_temp
#
rm msg_temp*
##############################################
# run models with A3-dt02. #
##############################################
EXE=./main_serial
TSTEP=( [0]=dt00 [1]=dt01 [2]=dt02 [3]=dt03 [4]=dt04 [5]=dt05 [6]=dt06 [7]=dt07 [8]=dt08 [9]=dt09 [10]=dt10 [11]=dt11 [12]=dt12 )
for ii in 2
do
OPF=1
if [ $ii -eq 0 ]; then OPF=4; fi
if [ $ii -eq 1 ]; then OPF=8; fi
if [ $ii -eq 2 ]; then OPF=16; fi
if [ $ii -eq 3 ]; then OPF=32; fi
if [ $ii -eq 4 ]; then OPF=64; fi
if [ $ii -eq 5 ]; then OPF=20; fi
if [ $ii -eq 6 ]; then OPF=40; fi
if [ $ii -eq 7 ]; then OPF=80; fi
if [ $ii -eq 8 ]; then OPF=160; fi
if [ $ii -eq 9 ]; then OPF=20; fi
if [ $ii -eq 10 ]; then OPF=40; fi
if [ $ii -eq 11 ]; then OPF=80; fi
if [ $ii -eq 12 ]; then OPF=160; fi
#
${EXE[ii]} IC_${A3BASE01}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE01}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE01}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE11}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE11}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE11}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE21}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE21}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE21}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
#
${EXE[ii]} IC_${A3BASE02}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE02}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE02}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE12}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE12}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE12}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE22}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE22}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE22}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
#
${EXE[ii]} IC_${A3BASE03}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE03}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE03}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE13}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE13}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE13}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE23}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE23}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE23}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
#
${EXE[ii]} IC_${A3BASE04}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE04}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE04}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3BASE14}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3BASE14}_${TSTEP[ii]} redirect=${DATA}/msg_${A3BASE14}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
${EXE[ii]} IC_${A3A3BASE24}.silo 5 1 cfl=10000.0 outfile=${DATA}/${A3A3BASE24}_${TSTEP[ii]} redirect=${DATA}/msg_${A3A3BASE24}_${TSTEP[ii]}_ optype=5 opfreq=$OPF checkpt_freq=100000 &
wait
done
#exit
############################################
# calculate IF radius as function of time. #
############################################
#
for ii in 2
do
OPF=1
if [ $ii -eq 0 ]; then OPF=4; fi
if [ $ii -eq 1 ]; then OPF=8; fi
if [ $ii -eq 2 ]; then OPF=16; fi
if [ $ii -eq 3 ]; then OPF=32; fi
if [ $ii -eq 4 ]; then OPF=64; fi
if [ $ii -eq 5 ]; then OPF=20; fi
if [ $ii -eq 6 ]; then OPF=40; fi
if [ $ii -eq 7 ]; then OPF=80; fi
if [ $ii -eq 8 ]; then OPF=160; fi
if [ $ii -eq 9 ]; then OPF=20; fi
if [ $ii -eq 10 ]; then OPF=40; fi
if [ $ii -eq 11 ]; then OPF=80; fi
if [ $ii -eq 12 ]; then OPF=160; fi
#
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE01}_${TSTEP[ii]} ${DATA}/${A3BASE01}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE02}_${TSTEP[ii]} ${DATA}/${A3BASE02}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE03}_${TSTEP[ii]} ${DATA}/${A3BASE03}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE04}_${TSTEP[ii]} ${DATA}/${A3BASE04}_${TSTEP[ii]} 0 $OPF 5 silo
#
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE11}_${TSTEP[ii]} ${DATA}/${A3BASE11}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE12}_${TSTEP[ii]} ${DATA}/${A3BASE12}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE13}_${TSTEP[ii]} ${DATA}/${A3BASE13}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE14}_${TSTEP[ii]} ${DATA}/${A3BASE14}_${TSTEP[ii]} 0 $OPF 5 silo
#
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE21}_${TSTEP[ii]} ${DATA}/${A3BASE21}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE22}_${TSTEP[ii]} ${DATA}/${A3BASE22}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE23}_${TSTEP[ii]} ${DATA}/${A3BASE23}_${TSTEP[ii]} 0 $OPF 5 silo
../RT_1D_norec/plot_radius ${DATA}/rad_${A3BASE24}_${TSTEP[ii]} ${DATA}/${A3BASE24}_${TSTEP[ii]} 0 $OPF 5 silo
#
done
################
# make plots #
################
echo "PLOTTING PLOTTING PLOTTING PLOTTING V1"
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE01} ${DATA}/rad_${A1BASE01} 6.0e19 3.861e11 3840
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE02} ${DATA}/rad_${A1BASE02} 6.0e19 3.861e11 1280
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE03} ${DATA}/rad_${A1BASE03} 6.0e19 3.861e11 384
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE04} ${DATA}/rad_${A1BASE04} 6.0e19 3.861e11 128
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE11} ${DATA}/rad_${A1BASE11} 6.0e18 3.861e10 3840
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE12} ${DATA}/rad_${A1BASE12} 6.0e18 3.861e10 1280
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE13} ${DATA}/rad_${A1BASE13} 6.0e18 3.861e10 384
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE14} ${DATA}/rad_${A1BASE14} 6.0e18 3.861e10 128
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE21} ${DATA}/rad_${A1BASE21} 6.0e17 3.861e09 3840
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE22} ${DATA}/rad_${A1BASE22} 6.0e17 3.861e09 1280
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE23} ${DATA}/rad_${A1BASE23} 6.0e17 3.861e09 384
./make_A1A3_fig.sh ${DATA}/rad_${A3BASE24} ${DATA}/rad_${A1BASE24} 6.0e17 3.861e09 128
mv *.jpeg *.eps ../
exit
| true |
e805ad729c6e8a3d6ee64258bd4f7b16688a40f5 | Shell | twendt/ccu-addon-hmq | /addon_files/update_script | UTF-8 | 509 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
ADDONS_DIR=/usr/local/addons
HMQ_DIR=$ADDONS_DIR/hmq
BIN_DIR=/usr/local/bin
LIB_DIR=/usr/local/lib
CONF_DIR=/usr/local/etc/config
mount | grep /usr/local 2>&1 >/dev/null
if [ $? -eq 1 ]; then
mount /usr/local
fi
mkdir -p $ADDONS_DIR && chmod 755 $ADDONS_DIR
if [ -f $CONF_DIR/rc.d/hmq ]; then
$CONF_DIR/rc.d/hmq stop
fi
cp -af hmq $ADDONS_DIR/
ln -sf $HMQ_DIR/rc.d/hmq $CONF_DIR/rc.d/hmq
ln -sf $HMQ_DIR/www $CONF_DIR/addons/www/hmq
# Migration
/etc/config/rc.d/hmq restart
exit 0
| true |
4b9bcb0782dd8c8c67c0b0f58d7c9e3de19971eb | Shell | gavin0723/hdp-utils | /operations/hdp-nfs3 | UTF-8 | 3,160 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# chkconfig: 2345 20 80
# description: hdp-nfs3 startup script
# processname: hdp-nfs3
#/*
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
RETVAL=0
HDP_PORTMAP_PID=/var/run/hadoop/root/hadoop-root-portmap.pid
HDP_NFS3_PID=/var/run/hadoop/hdfs/hadoop-hdfs-nfs3.pid
HADOOP_DAEMON_BIN=/usr/hdp/2.2.0.0-2041/hadoop/sbin/hadoop-daemon.sh
case "$1" in
start)
echo "============================="
echo "Starting hdp-hfs3..."
echo "============================="
echo "Starting Portmapper:"
$HADOOP_DAEMON_BIN start portmap
sleep 2
echo "Starting NFS3 Gateway:"
su - hdfs $HADOOP_DAEMON_BIN start nfs3
sleep 5
;;
stop)
echo "=================================="
echo "Shutting down hdp-nfs3..."
echo "=================================="
echo "Stopping NFS3 Gateway:"
su - hdfs $HADOOP_DAEMON_BIN stop nfs3
echo "Stopping Portmapper:"
$HADOOP_DAEMON_BIN stop portmap
rm -f ${HDP_PORTMAP_PID} ${HDP_NFS3_PID}
;;
restart|reload)
$0 stop
$0 start
;;
status)
echo "======================================="
echo "Checking status of hdp-nfs3..."
echo "======================================="
if [ -f ${HDP_NFS3_PID} ]
then
eval "ps -ef | grep `cat ${HDP_NFS3_PID}` | grep -v grep" > /dev/null
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
echo "NFS3 Gateway running with PID " `cat ${HDP_NFS3_PID}`
elif [ $RETVAL -eq 1 ]
then
echo "NFS3 Gateway is not running."
exit 1
fi
else
echo "NFS3 Gateway is not running."
exit 1
fi
if [ -f ${HDP_PORTMAP_PID} ]
then
eval "ps -ef | grep `cat ${HDP_PORTMAP_PID}` | grep -v grep" > /dev/null
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
echo "Portmapper running with PID " `cat ${HDP_PORTMAP_PID}`
elif [ $RETVAL -eq 1 ]
then
echo "Portmapper is not running."
exit 1
fi
else
echo "Portmapper is not running."
exit 1
fi
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
esac
exit $RETVAL
| true |
a82bac9a786be07c2ac6e8e90d0f2510ad8fc55c | Shell | JustForkin/site-deployer | /functions/proftpd.sh | UTF-8 | 475 | 3.28125 | 3 | [] | no_license | #!/bin/bash
function ftpasswd() {
source ${MY_SCRIPT_PATH}/functions/vars.sh
export FTP_PASSWORDHASH=$(mkpasswd --hash=md5 -s "$2")
export UID_CLIENT=$(id -u ${CLIENT_NAME})
echo "$1:$FTP_PASSWORDHASH:$UID_CLIENT:33::$3:/bin/sh" >> ${PROFTPD_PASSWD_FILE}
echo "ftpuser=$1" >> ${SECRET_FILE}
echo "ftproot=$3" >> ${SECRET_FILE}
echo "ftppassword=$2" >> ${SECRET_FILE}
echo -e " -> FTP account for $1 ${GREEN}successfully created${CLASSIC}"
} | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.