blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9024bf46f6140d017c6ad88428bd45bd55b18032
|
Shell
|
Cognitip/artemis_dashboard
|
/bin/docker/config
|
UTF-8
| 270
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source .env
# Print the docker-compose.yml file after variable substitution.
# Can be used to debug environmental variable issues.
# NOTE: For application environmental config values, use `.env` in the
# application root directory.
docker-compose config
| true
|
9d7e0064c59f335ef754f5a7130fe0d776593acf
|
Shell
|
rust-lang/glacier
|
/ices/83961.sh
|
UTF-8
| 792
| 2.625
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
cat > Cargo.toml <<'EOF'
[package]
name = "abc"
version = "0.0.1"
edition = "2018"
EOF
mkdir -p src
cat > src/main.rs <<'EOF'
fn main() {
println!("Hello, world!");
}
EOF
cargo test
cat >> src/main.rs <<'EOF'
/// Examples:
/// ```
/// assert_eq!(fun(), 5);
/// ```
fn fun() -> u8 {
5
}
EOF
cargo test
cat >> Cargo.toml <<'EOF'
[[bin]]
name = "icebin"
path = "src/bin.rs"
[lib]
name = "icelib"
path = "src/lib.rs"
EOF
cat > src/main.rs <<'EOF'
fn main() {
println!("Hello, world!");
}
EOF
cat > src/lib.rs <<'EOF'
/// Examples:
/// ```
/// assert_eq!(icelib::fun(), 5);
/// ```
pub fn fun() -> u8 {
5
}
EOF
mv src/main.rs src/bin.rs
cargo test
mkdir -p src/bin
mv src/bin.rs src/bin/main.rs
sed 's|src/bin.rs|src/bin/main.rs|' Cargo.toml
cargo test
| true
|
cbc28e9eb7099e5bf100526a8f007c49366c400b
|
Shell
|
lattera/my3status
|
/util.zsh
|
UTF-8
| 179
| 2.8125
| 3
|
[] |
no_license
|
# Copyright (c) 2015 Shawn Webb
# License: 2-clause BSD
function do_header() {
cat<<EOF
{
"version": 1
}
[
[]
EOF
}
function add_module() {
module="${1}"
mymodules+=${module}
}
| true
|
e59ee3483b0f993658ae6de3d42439cd48c07ee5
|
Shell
|
matthew-parlette/ubuntu-kickstart
|
/kickstart.sh
|
UTF-8
| 1,495
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#################
# Configuration #
#################
install_software=true
install_fonts=true
install_bashprompt=true
# Development Libraries
dev="build-essential git ruby lamp-server"
# IDE
ide="geany"
# Utilities
util="keepassx mirage gimp vym geary"
# Install software
##################
if $install_software ; then
echo "Installing software..."
sudo apt-get install -y $dev $ide $util > /dev/null
echo "...ok"
fi
# Install and configure fonts
#############################
if $install_fonts ; then
echo "Installing fonts..."
sudo unzip fonts.zip -d /usr/share/fonts > /dev/null
## Ubuntu doesn't have .fonts.conf in the user dir by default,
## so we can just do a straight copy
cp fonts.conf ~/.fonts.conf
echo "...ok"
fi
# Setup bash prompt
###################
if $install_bashprompt ; then
echo "Setting up bash prompt..."
cp bashprompt ~/.bashprompt
#use printf: from http://stackoverflow.com/questions/8467424/echo-new-line-in-bash-prints-literal-n
printf "\n#Setup bash prompt\nsource ~/cat .bashprompt\nnano " >> ~/.bashrc
echo "...ok"
fi
# User post-install actions
###########################
if $install_fonts ; then
echo -e "\n\nTo configure fonts on chrome, set the following:"
echo "------------------------------------------------"
echo -e "\tStandard Font - Sans"
echo -e "\tSerif Font - Serif"
echo -e "\tSans-serif font - Sans"
echo -e "\tFixed-width font - monospace"
echo -e "\tMinimum font-size - 11px"
fi
| true
|
37155e48ac2f7c0433590cee3e77a91c7619b366
|
Shell
|
laomaiweng/scripts
|
/misc/dotview
|
UTF-8
| 2,278
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
warn() {
printf "warning: %s\n" "$*"
} >&2
die() {
printf "error: %s\n" "$*"
exit 1
} >&2
require() { # 1:prog
command -v "$1" &>/dev/null || die "missing dependency: $1"
}
gen_output_name() { # 1:dot_filename
$keep || return
local out="${1%.dot}"
if [[ "$out" == "$1" ]]; then
warn "filename doesn't end in .dot, not keeping: $1"
return
fi
printf "%s.%s" "$out" "$DOTVIEW_FORMAT"
}
render() { # 1:input
# kept output
[[ -n "$1" ]] && out="$(gen_output_name "$1")"
if $keep && [[ -n "$out" ]]; then
dot -T"$DOTVIEW_FORMAT" "$1" | tee "$out"
else
dot -T"$DOTVIEW_FORMAT" ${1:+"$1"}
fi
}
render_tmp() { # 1:input
tmp="$tmpdir/${1##*/}.$DOTVIEW_FORMAT"
render "$1" >"$tmp"
}
cleanup() {
[[ -d "$tmpdir" ]] && rm -rf "$tmpdir"
}
# defaults
: "${DOTVIEW_FORMAT:=png}"
: "${DOTVIEW_PROGRAM_STDIN:="$DOTVIEW_PROGRAM"}"
if [[ "$1" =~ ^(-h|--help)$ ]]; then
cat <<-EOF
usage: ${0##*/} [options] <file|dir> ...
variables:
DOTVIEW_FORMAT dot output format (current: $DOTVIEW_FORMAT)
DOTVIEW_PROGRAM viewer program (current: $DOTVIEW_PROGRAM)
DOTVIEW_PROGRAM_STDIN viewer program for single image (current: $DOTVIEW_PROGRAM_STDIN)
options:
-k, --keep keep generated images (.dot -> .\$DOTVIEW_FORMAT)
EOF
exit
fi
# check viewer
require "${DOTVIEW_PROGRAM_STDIN%% *}" # only require the single-image program, we can fall back to it if missing the multi-image program
require dot
require tee
require mktemp
require rm
[[ "$1" =~ ^(-k|--keep)$ ]] && { shift; keep=true; } || keep=false
# special case if only 1 file: don't touch the disk
if (( $# == 0 )) || { (( $# == 1 )) && [[ -f "$1" ]]; }; then
render "$1" | $DOTVIEW_PROGRAM_STDIN
exit
fi
tmpdir="$(mktemp --tmpdir -d dotview.XXXXXXXX)"
trap cleanup EXIT
# render all dots first
shopt -s nullglob
for f in "$@"; do
if [[ -d "$f" ]]; then
for ff in "$f"/*.dot; do
render_tmp "$ff"
done
else
render_tmp "$f"
fi
done
# now invoke the viewer
if [[ -n "$DOTVIEW_PROGRAM" ]]; then
$DOTVIEW_PROGRAM "$tmpdir"
else
for f in "$tmpdir"/*; do
$DOTVIEW_PROGRAM_STDIN <"$f"
done
fi
| true
|
93d385e59987276a8df2041d40558cd0d62b7aeb
|
Shell
|
varlink/libvarlink
|
/lib/test-symbols.sh
|
UTF-8
| 1,034
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
set -e
libvarlink_sym=${1:-lib/libvarlink.sym}
libvarlink_a=${2:-libvarlink.a}
# 77 skip test
which readelf >/dev/null 2>&1 || exit 77
test -e "${libvarlink_sym}" || exit 77
test -e "${libvarlink_a}" || exit 77
rm -f symbols.list symbols.lib
if readelf -s -W "${libvarlink_a}" | grep -q 'FUNC GLOBAL DEFAULT.*varlink_'; then
readelf -s -W "${libvarlink_a}" |
grep 'FUNC GLOBAL DEFAULT.*varlink_' |
awk '{ print $8 }' |
sort >symbols.list
elif readelf -s -W "${libvarlink_a}" | grep -q gnu_lto; then
if ! readelf -s -W --lto-syms "${libvarlink_a}" &>/dev/null; then
echo "readelf is too old and does not understand \"--lto-syms\"" >&2
exit 77
fi
readelf -s -W --lto-syms "${libvarlink_a}" 2>/dev/null |
grep ' DEF.*DEFAULT.*FUNCTION.*varlink_' |
while read _ _ _ _ _ _ _ f; do
echo ${f#_}
done |
sort >symbols.list
else
exit 1
fi
grep varlink_ "${libvarlink_sym}" | sed 's/[ ;]//g' | sort >symbols.lib
diff -u symbols.list symbols.lib
r=$?
rm -f symbols.list symbols.lib
exit $r
| true
|
173734505e3c8f235d599aa68227bd5e1de4f938
|
Shell
|
jiaxicheng/xc_python
|
/service_xc_python.sh
|
UTF-8
| 1,598
| 4.3125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# pre-defined Action OPTS and regex to match one of the OPTS
OPTS=(build build-run up down start stop)
regex=$(IFS=\|;echo "${OPTS[*]}")
# usage
USAGE="Usage:
$0 [-h|-d folder] <$regex> [SERVICE..]
"
# shared folder to save working codes
SHARED=~/my_code
# process the command options
while getopts "hd:" opt; do
case $opt in
h)
echo "$USAGE"; exit 0
;;
d)
SHARED=$2
;;
esac
done
shift $((OPTIND-1))
# action must be one of the pre-defined OPTS
# export environments when build and run the applications
[[ $1 =~ ^$regex$ ]] || { echo "$USAGE"; exit; }
if [[ $1 =~ ^(up|build) ]]; then
# SHARED folder supplied by -d argument or default to '$HOME/my_code'
[[ -d "$SHARED" ]] || { echo "the folder '$SHARED' does not exist"; exit 1; }
export SHARED
# set username, user_uid and user_gid the same as the owner of the SHARED folder
export USER=$(stat -c "%U" "$SHARED")
export USER_UID=$(stat -c "%u" "$SHARED")
export USER_GID=$(stat -c "%g" "$SHARED")
# setup container xauth cookie and DISPLAY based on login
./xauth.init.sh
fi
# wrapper for docker-compose command with required environments for the services
case $1 in
up)
docker-compose up -d "${@:2}"
;;
start)
docker-compose start "${@:2}"
;;
build-run)
docker-compose up -d --build "${@:2}"
;;
build)
docker-compose build --force-rm "${@:2}"
;;
down)
docker-compose down --remove-orphans
;;
stop)
docker-compose stop "${@:2}"
;;
*)
echo "$USAGE"
exit 1
;;
esac
| true
|
49ecab1fdccb18e191e6c97fb0db4ae9215f042d
|
Shell
|
y2q-actionman/cl-aws-custom-runtime-test
|
/build-bootstrap-out/build_bootstrap_in_vm.sh
|
UTF-8
| 1,073
| 3.546875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
ZIP_NAME=$1
BIN_NAME="bootstrap"
# Makes a 'bootstrap' binary with SBCL.
# This code does following:
# - Load some small libraries and quicklisp libraries.
# - Load our custom runtime.
# - Prints list of installed systems. (for AWS-lambda function writers.)
# - Makes a single binary named $BIN_NAME. To start as a bootstrap,
# I specified :toplevel to our bootstrap function.
/usr/local/bin/sbcl \
--non-interactive \
--eval "(ql:quickload '#:aws-lambda-function-util)" \
--eval "(ql:quickload '#:aws-lambda-runtime-additional-libraries)" \
--eval "(ql:quickload '#:aws-lambda-runtime)" \
--eval "(with-open-file (stream \"/out/installed-libs.txt\" :direction :output :if-exists :supersede) \
(pprint (aws-lambda-function-util:list-installed-systems) stream))" \
--eval "(sb-ext:save-lisp-and-die \"$BIN_NAME\" :executable t :toplevel 'aws-lambda-runtime::bootstrap)"
# Make a zip file from the binary. This will be uploaded to AWS Lambda as a custom runtime.
zip /out/$ZIP_NAME $BIN_NAME
# cleanup
rm $BIN_NAME
| true
|
36339f1632df37e6c4b3098d46e750edefb6e22d
|
Shell
|
ianatol/minic
|
/minic/benchmark.sh
|
UTF-8
| 2,095
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
function usage {
echo "syntax error : $1"
echo "Usage: $0 <benchmark_name> (compile_target) [optimizations]"
exit 1
}
[ $# -eq 2 -o $# -eq 3 ] || usage "You must provide two or three parameters"
BENCHMARK_NAME="$1"
echo "##################################"
echo "#### Benchmarking : $BENCHMARK_NAME ####"
echo "##################################"
TEMPFILE="./benchmark.temp.out"
CLANG="clang"
MINIC="./target/debug/minic"
BENCHMARK_DIR="benchmarks/$BENCHMARK_NAME"
BENCHMARK_SOURCE="$BENCHMARK_DIR/$BENCHMARK_NAME.mini"
[ -f "$BENCHMARK_SOURCE" ] || usage "Couldn't find benchmark at '$BENCHMARK_SOURCE'"
LLVM_FILE="intermediate.ll"
if [ "$2" == "-a" -o "$2" == "-ar" ]
then
LLVM_FILE="intermediate.s"
fi
if [ ! -f "$BENCHMARK_SOURCE" ]
then
echo "Couldn't find $BENCHMARK_SOURCE"
exit 999
fi
echo "Benchmarking : $BENCHMARK_NAME"
echo "Compiling : $BENCHMARK_SOURCE"
"$MINIC" "$BENCHMARK_SOURCE" "$2" $3 > /dev/null
if [ "$?" -ne 0 ]
then
echo "Minic compilation failed"
echo "${BENCHMARK_NAME},<minic>" >> "TIMES"
exit 1
fi
SOURCE_SIZE=`wc -l "$LLVM_FILE" | sed 's/^ *//;s/ .*//;'`
echo "Compiling LLVM..."
"$CLANG" "$LLVM_FILE" "src/helper.c" > /dev/null
if [ "$?" -ne 0 ]
then
echo "LLVM compilation failed"
echo "${BENCHMARK_NAME},<llvm>" >> "TIMES"
exit 2
fi
COMPILED_SIZE=`wc -c "a.out" | sed 's/^ *//;s/ .*//;'`
TIMES=""
for ENDING in "" ".longer"
do
INPUT="${BENCHMARK_DIR}/input${ENDING}"
OUTPUT="${BENCHMARK_DIR}/output${ENDING}"
echo "Executing a.out with input from '$INPUT'"
RUNTIME=`(time -p ./a.out < "$INPUT" > "$TEMPFILE" ) 2>&1 | grep user | sed 's/user[^0-9]*//'`
if [ $? -ne 0 ]
then
echo "Execution with input \"$INPUT\" failed"
echo "${BENCHMARK_NAME},<crash>" >> "TIMES"
exit 4
else
if (diff "$OUTPUT" "$TEMPFILE" > /dev/null)
then
echo "Success : $RUNTIME"
TIMES="$TIMES,$RUNTIME"
else
echo "Output didn't match reference version"
echo "${BENCHMARK_NAME},<diff>" >> "TIMES"
exit 3
fi
fi
rm "$TEMPFILE"
done
echo "${BENCHMARK_NAME}${TIMES},${SOURCE_SIZE},${COMPILED_SIZE}" >> "TIMES"
exit 0
| true
|
9d5544a77c3ef664e6a8cd8fefe53790e8483dd5
|
Shell
|
danvk/git-helpers
|
/admin/git-convert-links-to-aliases
|
UTF-8
| 1,960
| 4.1875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
source "$GIT_HELPERS_HOME"/config/.helpers-rc
cd "$(git root)"
for arg in "$@"; do
if [ ! -d "$arg" ]; then
echo "Bad arg, not a dir: $arg" 1>&2
continue
fi
aliases_dir="$(find "$arg" -type d -name 'aliases')"
pushd "$aliases_dir" &> /dev/null
config_file_path="../.gitconfig"
config_file_path_from_root="$(git relpath "$(git root)" "$config_file_path")"
master_config_file="$(git root)"/config/.gitconfig
config_file_path_from_master_config="$(git relpath "$master_config_file" "$config_file_path")"
if ! git config -f "$master_config_file" --get-all include.path | grep -q "$config_file_path_from_master_config"; then
echo "Adding config file: $config_file_path_from_root"
touch "$config_file_path"
git add "$config_file_path"
echo "Adding $config_file_path_from_master_config to [include] list of $master_config_file"
git config -f "$master_config_file" --add include.path "$config_file_path_from_master_config"
echo "Done. Included paths from $master_config_file:"
git config -f "$master_config_file" --get-all include.path
echo ''
fi
for full_link in $(git links); do
base_link="$(basename "$full_link")"
if [[ ! "$base_link" =~ ^git- ]]; then
echo "Link $full_link in $PWD doesn't start with 'git-'" 1>&2
continue
fi
link="${base_link#git-}"
base_dest="$(basename "$(readlink "$base_link")")"
if [[ ! "$base_dest" =~ ^git- ]]; then
echo "Link destination $base_dest in $PWD doesn't start with 'git-'" 1>&2
continue
fi
dest="${base_dest#git-}"
if git config -f "$config_file_path" alias."$link" &> /dev/null; then
echo "Found alias.$link in $config_file_path_from_root already existing" 1>&2
else
git config -f "$config_file_path" alias."$link" "$dest"
echo "Set alias.$link to $dest in $config_file_path_from_root"
fi
git rm -f "$full_link"
done
popd &> /dev/null
done
| true
|
fa7d3a930f2108f089b71f10d0b0cdd3fed8a613
|
Shell
|
RTEMS/rtems-release
|
/rtems-source-packages
|
UTF-8
| 2,352
| 3.046875
| 3
|
[] |
no_license
|
#! /bin/sh
#
# RTEMS Tools Project (http://www.rtems.org/)
# Copyright 2015,2016,2019 Chris Johns (chrisj@rtems.org)
# All rights reserved.
#
# This file is part of the RTEMS Tools package in 'rtems-tools'.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The script collects the sources packages for RTEMS for a version. If
# there are no packages set the variable to None.
#
rtems_pkgs="${version}/rtems-all"
bare_pkgs=None
if [ ${version} == 4.11 ]; then
# RTEMS packages
rtems_pkgs="${rtems_pkgs} ${version}/graphics/libjpeg"
rtems_pkgs="${rtems_pkgs} ${version}/databases/sqlite"
rtems_pkgs="${rtems_pkgs} ${version}/net-mgmt/net-snmp"
rtems_pkgs="${rtems_pkgs} ${version}/net/ntp"
rtems_pkgs="${rtems_pkgs} ${version}/net/protobuf"
# Bare packages
bare_pkgs="devel/dtc"
bare_pkgs="${bare_pkgs} devel/qemu"
elif [ ${version} -ge 5 ]; then
# RTEMS packages
rtems_pkgs="${rtems_pkgs} ${version}/rtems-packages"
# Bare packages
bare_pkgs="devel/qemu"
bare_pkgs="${bare_pkgs} devel/qemu4"
bare_pkgs="${bare_pkgs} devel/qemu-couverture"
bare_pkgs="${bare_pkgs} devel/sis"
bare_pkgs="${bare_pkgs} devel/spike"
fi
| true
|
d60b39ee4d6d3bc71cab258e94f9cfc695c6f648
|
Shell
|
RayHuo/GLoop
|
/readFiles/run.sh
|
UTF-8
| 146
| 2.875
| 3
|
[] |
no_license
|
#! /bin/bash
cd ./files/
argus=""
for fileName in `ls | xargs realpath`
do
argus=${argus}${fileName}" "
done
#echo $argus
cd ..
./a.out $argus
| true
|
c8476741b06f27d0b422f5629cdba72659387802
|
Shell
|
cp2017/docker-geth
|
/start.sh
|
UTF-8
| 472
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
set -m
if [ "X${GETH_TEST_DATABASE}" == "Xtrue" ];then
echo ">>>>>> Use GETH test database"
rm -rf /data/db
cp -r /opt/db /data/
fi
geth --datadir /data/db --nodiscover --port 30301 --rpc --rpcapi db,eth,miner,net,web3,personal,shh --shh --rpcaddr 0.0.0.0 --rpccorsdomain "*" --rpcport 8545 --ipcpath /root/.ethereum/geth.ipc --ipcapi "db,eth,net,web3,miner" &
if [ "X${GETH_BOOTSTRAP}" == "Xtrue" ];then
sleep 3
/opt/bootstrap.sh
fi
fg
| true
|
2d3c31eb77d9e7c289a79076606e4ae80f07461e
|
Shell
|
si-medbif/exomeseq
|
/run_analysis.sh
|
UTF-8
| 679
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#################################################
# Check if script is run from the exomeseq folder
# If yes, move to the parent (main project) folder
##################################################
folder=${PWD##*/}
if [ ${folder} = 'exomeseq' ]
then
cd ..
fi
#################################################
# ANALYSIS:
# Run the analysis for each sample:
# alignment, variant calling, annotation,
# output-parsing (after all samples are analysed)
#################################################
for SAMPLE in `cat samples.paired.list samples.single.list`;
do
${SAMPLE}/Scripts/${SAMPLE}_GATK.sh
done
exomeseq/parsevcf.py -o full_report.txt
| true
|
f75242faf34d037a9e576d9fabd26a105b6938bb
|
Shell
|
graeme-hill/gbox
|
/init.sh
|
UTF-8
| 1,510
| 2.90625
| 3
|
[] |
no_license
|
cp .zshrc ~/.zshrc
mkdir -p ~/.config
mkdir -p ~/.config/nvim
cp init.vim ~/.config/nvim/init.vim
sudo rm -r /etc/pacman.d/gnupg
sudo pacman-key --init
sudo pacman-key --populate archlinux
sudo pacman -Sy --noconfirm --needed git base-devel
# Install yay itself
if [ ! -d "yay" ] ; then
git clone https://aur.archlinux.org/yay.git yay
pushd yay
makepkg -si --noconfirm
popd
fi
# Update package list and upgrade anything existing out of date packages
yay -Syu --needed
# Install a bunch of things
yay -S --noconfirm --needed \
docker docker-compose nvm-git neovim zsh oh-my-zsh-git yarn tree ripgrep python \
python-pip python2 python2-pip ruby rubygems clang dos2unix
# Make sure config files have correct line endings
dos2unix $HOME/.zshrc
dos2unix $HOME/.config/nvim/init.vim
# Allow neovim python plugins
pip3 install --user neovim
pip2 install --user neovim
# Allow neovim ruby plugins
gem install neovim
# Get a node
source /usr/share/nvm/init-nvm.sh
nvm install node
# Typescript and co + neovim plugin support
npm install -g typescript prettier neovim concurrently
# Install vim plugins so they are there on first run
nvim +'PlugInstall --sync' +UpdateRemotePlugins +qa
# golang things
go get -u github.com/kardianos/govendor
# change shell to zsh
sudo chsh -s "$(command -v zsh)" graeme
# allow docker without sudo
sudo usermod -aG docker $USER
mkdir -p ~/temp
cp graeme.zsh-theme ~/temp/graeme.zsh-theme
sudo mv /home/graeme/temp/graeme.zsh-theme /usr/share/oh-my-zsh/themes
| true
|
427ee04d8e60eaec758dd18dc78832d5dfdc4dad
|
Shell
|
ghassan-alaka/GPLOT
|
/ush/get_ECMWF4.sh
|
UTF-8
| 7,092
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh --login
# get_ECMWF4.sh
# This script is retrieves ECMWF model output
# from the ECMWF data store. It uses 'wget' to
# download the GRIB2 data and 'wgrib2' to check
# the status of the GRIB2 file once downloaded.
# Data will be kept on disk for one week
echo "MSG: get_ECMWF4.sh started at `date`"
# Load modules
module load intel/2022.1.2
module load wgrib2/2.0.8
# Get input arguments
ODIR="${1}"
REPACK="${2:-NO}"
# Define important variables
HTTP_DIR="https://data.ecmwf.int/forecasts"
RESOL="0p4-beta"
STREAM00="oper"
STREAM06="scda"
TYPE="fc"
FHR_LIST00=( $(seq 0 3 144) )
FHR_LIST00+=( $(seq 150 6 240) )
FHR_LIST06=( $(seq 0 3 90) )
# Create and enter the output directory
mkdir -p ${ODIR}
cd ${ODIR}
YMDH_NOW="`date +'%Y%m%d%H'`"
YMDH_OLD="`date --date="4 day ago" +'%Y%m%d%H'`"
YMDH_SCRUB="`date --date="7 day ago" +'%Y%m%d%H'`"
echo "MSG: Current date: ${YMDH_NOW}"
# If REPACK=YES, load ecCodes
if [ "${REPACK}" == "YES" ]; then
minsize=35000000
ECCODES_DIR="${3:-/lfs1/HFIP/hur-aoml/Ghassan.Alaka/software/eccodes/eccodes-2.30.2}"
export PATH="${ECCODES_DIR}/bin:${PATH}"
export LD_LIBRARY_PATH="${ECCODES_DIR}/lib64:${ECCODES_DIR}/lib:${LD_LIBRARY_PATH}"
export CPATH="${ECCODES_DIR}/include:${CPATH}"
fi
echo ""
echo "MSG: ***************************"
echo "MSG: DOWNLOAD SECTION"
HTTP_CYCLES=()
YMDH="${YMDH_NOW}"
N=0
while [ "${YMDH}" -gt "${YMDH_OLD}" ]; do
# Get the current time and the expected offset
# to get the cycle of interest
YMDH_NOW="`date +'%Y%m%d%H'`"
HH_NOW="`echo "${YMDH_NOW}" | cut -c9-10`"
HH_OFF="$(( ${HH_NOW} % 6 + (N * 6) ))"
# Get the cycle of interest.
YMDH="`date --date="${HH_OFF} hour ago" +'%Y%m%d%H'`"
HH=`echo "${YMDH}" | cut -c9-10`
YMD="`date --date="${HH_OFF} hour ago" +'%Y%m%d'`"
# Create the cycle directory.
echo ""
echo "MSG: Current cycle --> ${YMDH}"
mkdir -p ${YMDH}
# Define variables based on the epoch
if [[ "00 12" == *"${HH}"* ]]; then
FHR_LIST=( "${FHR_LIST00[@]}" )
STREAM="${STREAM00}"
elif [[ "06 18" == *"${HH}"* ]]; then
FHR_LIST=( "${FHR_LIST06[@]}" )
STREAM="${STREAM06}"
fi
# Build the expected HTTP path
HTTP_PATH="${HTTP_DIR}/${YMD}/${HH}z/${RESOL}/${STREAM}"
for F in ${FHR_LIST[@]}; do
# 3-digit forecast hour
FFF="$(printf "%03d\n" $((10#${F})))"
# Define HTTP and output file names
HTTP_FNAME="${YMDH}0000-${F}h-${STREAM}-${TYPE}.grib2"
TMP_FNAME="ecmwf.TMP.${YMDH}.f${FFF}.grib2"
O_FNAME="ecmwf.oper-fc.${YMDH}.f${FFF}.grib2"
echo ""
echo "MSG: Retrieving this file: ${HTTP_PATH}/${HTTP_FNAME}"
echo "MSG: Delivering file here: ${PWD}/${YMDH}/${O_FNAME}"
# Check if the file is corrupt. If so, remove it.
if [ -f ${YMDH}/${O_FNAME} ]; then
echo "MSG: The file already exists, so I will check it."
wgrib2 ${YMDH}/${O_FNAME} &> /dev/null
if [ $? -ne 0 ]; then
echo "MSG: File is corrupt, so I will remove it."
rm -f ${YMDH}/${O_FNAME}
else
echo "MSG: The file looks good. Moving on."
if [ "${REPACK}" == "YES" ]; then
echo "MSG: Repack requested (CCSDS-->Simple)"
filesize=$(stat -c%s "${YMDH}/${O_FNAME}")
if (( filesize < minsize )); then
echo "MSG: File size (${filesize}) is too small. Executing simple repack."
grib_set -r -w packingType=grid_ccsds -s packingType=grid_simple ${YMDH}/${O_FNAME} ${YMDH}/${TMP_FNAME}
if ! cmp --silent -- "${YMDH}/${O_FNAME}" "${YMDH}/${TMP_FNAME}"; then
cp -p ${YMDH}/${TMP_FNAME} ${YMDH}/${O_FNAME}
fi
rm -f ${YMDH}/${TMP_FNAME}
else
echo "MSG: File size (${filesize}) indicates simple packing style. No repack required."
fi
fi
continue
fi
fi
# If the file does not exist, download it.
if [ ! -f ${YMDH}/${O_FNAME} ]; then
echo "MSG: Downloading the file with this command:"
echo "MSG: [wget -T 5 -O ${YMDH}/${O_FNAME} ${HTTP_PATH}/${HTTP_FNAME}]"
#wget -T 5 -np -nH --cut-dirs=5 -O ${YMDH}/${O_FNAME} ${HTTP_PATH}/${HTTP_FNAME}
wget -q -T 5 -O ${YMDH}/${O_FNAME} ${HTTP_PATH}/${HTTP_FNAME}
fi
# If the file is empty, delete it.
if [ ! -s ${YMDH}/${O_FNAME} ]; then
echo "MSG: File is empty, so I will remove it."
rm -f ${YMDH}/${O_FNAME}
# Otherwise, check if the file is corrupt.
else
wgrib2 ${YMDH}/${O_FNAME} &> /dev/null
if [ $? -ne 0 ]; then
echo "MSG: File is corrupt, so I will remove it."
rm -f ${YMDH}/${O_FNAME}
else
echo "MSG: File downloaded successfully."
if [ "${REPACK}" == "YES" ]; then
echo "MSG: Repack requested (CCSDS-->Simple)"
filesize=$(stat -c%s "${YMDH}/${O_FNAME}")
if (( filesize < minsize )); then
echo "MSG: File size (${filesize}) is too small. Executing simple repack."
grib_set -r -w packingType=grid_ccsds -s packingType=grid_simple ${YMDH}/${O_FNAME} ${YMDH}/${TMP_FNAME}
if ! cmp --silent -- "${YMDH}/${O_FNAME}" "${YMDH}/${TMP_FNAME}"; then
cp -p ${YMDH}/${TMP_FNAME} ${YMDH}/${O_FNAME}
fi
rm -f ${YMDH}/${TMP_FNAME}
else
echo "MSG: File size (${filesize}) indicates simple packing style. No repack required."
fi
fi
fi
fi
done
# Download the BUFR file with TC tracker info (if available)
HTTP_FNAME="${YMDH}0000-${FHR_LIST[-1]}h-${STREAM}-tf.bufr"
if [ ! -f ${YMDH}/${HTTP_FNAME} ]; then
echo "MSG: Retrieving this file: ${HTTP_PATH}/${HTTP_FNAME}"
echo "MSG: Delivering file here: ${PWD}/${YMDH}/${HTTP_FNAME}"
echo "MSG: Downloading the file with this command:"
echo "MSG: [wget -q -T 5 -O ${YMDH}/${HTTP_FNAME} ${HTTP_PATH}/${HTTP_FNAME}]"
wget -q -T 5 -O ${YMDH}/${HTTP_FNAME} ${HTTP_PATH}/${HTTP_FNAME}
# If the file is empty, delete it.
if [ ! -s ${YMDH}/${HTTP_FNAME} ]; then
echo "MSG: File is empty, so I will remove it."
rm -f ${YMDH}/${HTTP_FNAME}
fi
fi
# Check if the cycle directory is empty
if [ -z "$(ls -A ${YMDH})" ]; then
rm -rf ${YMDH}
else
HTTP_CYCLES+=( "${YMDH}" )
fi
# Increase the counter
((N++))
done
# Scrub empty directories
echo ""
echo "MSG: ***************************"
echo "MSG: SCRUB SECTION"
echo "MSG: I won't scrub these cycles because they are active --> ${HTTP_CYCLES[*]}"
DISK_CYCLES=( $(ls -d * 2>/dev/null) )
re='^[0-9]+$'
for CYCLE in ${DISK_CYCLES[@]}; do
echo "MSG: Found this cycle on disk: ${CYCLE}"
if ! [[ ${CYCLE} =~ ${re} ]]; then
echo "MSG: ${CYCLE} is not a date, so I will skip it."
continue
elif [ "${CYCLE}" -lt "${YMDH_SCRUB}" ]; then
echo "MSG: Removing ${CYCLE} because it is too old."
rm -rf ${CYCLE}
elif [ -z "$(ls -A ${CYCLE})" ]; then
echo "MSG: Removing ${CYCLE} because it is empty."
rm -rf ${CYCLE}
else:
echo "MSG: ${CYCLE} is still active, so I will keep it."
fi
done
echo "MSG: get_ECMWF4.sh completed at `date`"
| true
|
1d534243034f46ff3cb587c5d075b0a27042361f
|
Shell
|
kibook/1436chan
|
/newthread
|
UTF-8
| 2,024
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
umask 002
cd $(dirname "$0")
. ./params.sh
comment="$@"
len=$(echo -n "$comment" | wc -c)
if [ -e threads ]
then
no=$(cat threads)
else
no=0
fi
no=$(($no + 1))
stamp=$(date +%s)
post=$no
if [ "$len" -gt "$MAX_TITLELEN" ]
then
pherror 'Thread creation failed.'
phline
phinfo "Title too long ($len/$MAX_TITLELEN)"
phitem 1 'Return' "$CHAN_ROOT"
exit 1
fi
if [ "$MAX_THREADS" -lt 1 ]
then
pherror 'New threads are disabled on this board.'
phitem 1 Return "$CHAN_ROOT"
exit 1
fi
if [ "$comment" = "" ]
then
pherror 'Thread creation failed: You must include a title'
phitem 1 Return "$CHAN_ROOT"
exit 1
fi
if ! sh cooldown.sh thread
then
pherror "Thread creation failed"
phline
phinfo "Please wait $POST_LIMIT seconds before creating a new thread."
phitem 1 "Return" "$CHAN_ROOT"
exit 1
fi
nthreads=$(ls -d [0-9]* 2>/dev/null | wc -l)
narchive=$(ls -d [0-9]*/archive 2>/dev/null | wc -l)
if [ $((nthreads - narchive)) -ge "$MAX_THREADS" ]
then
for thread in $(ls -dt [0-9]* 2>/dev/null)
do
if [ -e $thread/archive ]
then
continue
else
old=$thread
fi
done
if [ "$ENABLE_ARCHIVE" != n ]
then
sh archive.sh $old
if [ ! -e $old/archive ]
then
pherror 'Failed to archive thread!'
phitem 1 Return "$CHAN_ROOT"
exit
fi
else
rm -r $old
if [ -e $old ]
then
pherror 'Failed to remove thread!'
phitem 1 Return "$CHAN_ROOT"
exit
fi
fi
fi
mkdir $post
if [ -e $post ]
then
echo "$comment" > $post/gophertag
ln template_gophermap $post/gophermap
ln template_post $post/post
ln template_postlink $post/postlink
ln template_postfile $post/postfile
ln template_postsplr $post/postsplr
ln template_postfileb64 $post/postfileb64
ln template_postb64 $post/postb64
touch $post/postcache
echo $stamp > $post/stamp
phinfo 'Thread created successfully!'
phitem 1 'View thread' "$CHAN_ROOT/$post"
echo $no > threads
sh updatethreadcache.sh > threadcache
else
pherror 'Could not create thread.'
phitem 1 Return "$CHAN_ROOT"
exit 1
fi
| true
|
fe9b02c2ac8e2b66c9640a586d1ef3dd2f5d47b5
|
Shell
|
saintbyte/electronL-lastimage-downloader
|
/get_by_bash.sh
|
UTF-8
| 540
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set +ui
set -x
cd /root/tmp/electro_git/
#ftp://electro:electro@ftp.ntsomz.ru/2014/January/28/1030/140128_1030_original_RGB.jpg
date
mkdir ./images/
HOUR=`date +%H`
MIN=`date +%M`
DATE_PREFIX=`date +%Y/%B/%d`
DATE_PREFIX2=`date +%y%m%d`
echo $HOUR
if [ $MIN -ge 30 ];
then
MIN=30
else
MIN='00'
fi
echo $MIN
URL="ftp://electro:electro@ftp.ntsomz.ru/${DATE_PREFIX}/${HOUR}${MIN}/${DATE_PREFIX2}_${HOUR}${MIN}_original_RGB.jpg"
LOCAL_FILENAME=`basename $URL`
wget --tries=10 -O ./images/${LOCAL_FILENAME} $URL
echo FILENAME:${LOCAL_FILENAME}
| true
|
1c8af3311a195132180053b61cddacb1ced39635
|
Shell
|
casturm/dotfiles
|
/.zshrc
|
UTF-8
| 9,448
| 2.984375
| 3
|
[] |
no_license
|
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Set umask
umask g-w,o-rwx
# If command is a path, cd into it
setopt auto_cd
# Colourfull messages
e_header() { echo -e "\n\033[1m$@\033[0m"; }
e_success() { echo -e " \033[1;32m✔\033[0m $@"; }
e_error() { echo -e " \033[1;31m✖\033[0m $@"; }
# Load zgenom only if a user types a zgenom command
zgenom () {
if [[ ! -s ${ZDOTDIR:-${HOME}}/.zgenom/zgenom.zsh ]]; then
git clone https://github.com/jandamm/zgenom.git ${ZDOTDIR:-${HOME}}/.zgenom
fi
source ${ZDOTDIR:-${HOME}}/.zgenom/zgenom.zsh
zgenom "$@"
}
# Generate zgenom init script if needed
if ! zgenom saved; then
e_header "Creating zgenom save"
zgenom oh-my-zsh plugins/shrink-path
zgenom load zsh-users/zsh-autosuggestions
zgenom load zdharma-continuum/fast-syntax-highlighting
zgenom load zsh-users/zsh-history-substring-search
zgenom load zsh-users/zsh-completions
zgenom save
fi
# Load dircolors
if [ -s ${ZDOTDIR:-${HOME}}/.dircolors ]; then
if (( $+commands[gdircolors] )); then
eval $(command gdircolors -b ${ZDOTDIR:-${HOME}}/.dircolors)
elif (( $+commands[dircolors] )); then
eval $(command dircolors -b ${ZDOTDIR:-${HOME}}/.dircolors)
fi
fi
# Load settings
if [[ ! -s ${ZDOTDIR:-${HOME}}/.config/zsh/cache/settings.zsh ]]; then
source ${ZDOTDIR:-${HOME}}/.config/zsh/functions.zsh
recreateCachedSettingsFile
fi
# theme settings
# ZSH_THEME="juanghurtado"
ZSH_THEME="powerlevel10k/powerlevel10k"
# omz path
export ZSH="$HOME/.oh-my-zsh"
# source omz
source $ZSH/oh-my-zsh.sh
# source ${ZDOTDIR:-${HOME}}/.config/zsh/cache/settings.zsh
alias z='echo dood'
# Remove whitespace after the RPROMPT
ZLE_RPROMPT_INDENT=0
setopt no_beep
#
# alias.zsh:
#
if [ `uname` = Darwin ]; then
alias ls='/usr/local/bin/gls --color=auto'
else
alias ls='/bin/ls --color=auto'
fi
# Easier navigation: .., ..., ...., ....., ~ and -
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias -- -="cd -"
# Shortcuts
alias d="cd ~/Documents/Google\ Drive"
alias dl="cd ~/Downloads"
alias dt="cd ~/Desktop"
alias p="cd ~/dev"
alias g="git"
alias ga="git add"
alias gc="git commit -m"
alias gca="git commit -a -m"
alias gd="git diff"
alias gp="git push"
alias s="git status"
alias h="history"
alias j="jobs"
alias be="bundle exec"
# Vim shortcuts
alias vi=vim
alias :e="\$EDITOR"
alias :q="exit"
alias l="ls -A -F"
alias ll="ls -h -l "
alias la="ls -a"
# List only directories and symbolic links that point to directories
alias lsd='ls -ld *(-/DN)'
# List only file beginning with "."
alias lsa='ls -ld .*'
alias grep="grep --color=auto"
alias know="vim ${HOME}/.ssh/known_hosts"
alias mc="mc --nosubshell"
alias reload!=". ${HOME}/.zshrc"
alias takeover="tmux detach -a"
alias vu="vagrant up"
alias vh="vagrant halt"
alias vp="vagrant provision"
alias vr="vagrant reload"
alias vs="vagrant ssh"
alias vbu="vagrant box update"
for method in GET HEAD POST PUT DELETE TRACE OPTIONS; do
alias "$method"="lwp-request -m '$method'"
done
# Tmux
alias tmux="TERM=xterm-256color tmux"
alias tx="tmuxinator"
alias mx="tmuxinator start mars"
# Vericity
alias flex_db_prod='psql -h database.cisprod.vericity.net -d flex_event_service_v2 -U csturm --password'
alias flex_db_int='psql -h database.marsint.vericity.net -d flex_event_service_v2 -U flex --password'
alias flex_db_qa='psql -h database.marsqa.vericity.net -d flex_event_service_v2 -U flex --password'
alias nbx_db_prod='psql -h database.cisprod.vericity.net -d nbx -U csturm --password'
alias nbx_db_int='psql -h database.marsint.vericity.net -d nbx -U nbx --password'
alias nbx_db_qa='psql -h database.marsqa.vericity.net -d nbx -U nbx --password'
#
# completions.zsh:
#
# Completion
[ -d /usr/local/share/zsh-completions ] && fpath=(/usr/local/share/zsh-completions $fpath)
zstyle ':completion::complete:*' use-cache on # completion caching, use rehash to clear
zstyle ':completion:*' cache-path ${ZDOTDIR:-${HOME}}/.config/zsh/cache # cache path
# Ignore completion functions for commands you don’t have
zstyle ':completion:*:functions' ignored-patterns '_*'
# Zstyle show completion menu if 2 or more items to select
zstyle ':completion:*' menu select=2
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
# Format autocompletion style
zstyle ':completion:*:descriptions' format "%{$fg[green]%}%d%{$reset_color%}"
zstyle ':completion:*:corrections' format "%{$fg[orange]%}%d%{$reset_color%}"
zstyle ':completion:*:messages' format "%{$fg[red]%}%d%{$reset_color%}"
zstyle ':completion:*:warnings' format "%{$fg[red]%}%d%{$reset_color%}"
zstyle ':completion:*' format "--[ %B%F{074}%d%f%b ]--"
zstyle ':completion:*:*:*:*:*' menu select
zstyle ':completion:*:matches' group 'yes'
zstyle ':completion:*:options' description 'yes'
zstyle ':completion:*:options' auto-description '%d'
zstyle ':completion:*:default' list-prompt '%S%M matches%s'
zstyle ':completion:*' group-name ''
zstyle ':completion:*' verbose yes
zstyle ':auto-fu:highlight' input white
zstyle ':auto-fu:highlight' completion fg=black,bold
zstyle ':auto-fu:highlight' completion/one fg=black,bold
zstyle ':auto-fu:var' postdisplay $' -azfu-'
zstyle ':auto-fu:var' track-keymap-skip opp
#zstyle ':auto-fu:var' disable magic-space
# Zstyle kill menu
zstyle ':completion:*:*:kill:*' menu yes select
zstyle ':completion:*:kill:*' force-list always
zstyle ':completion:*:*:kill:*:processes' list-colors "=(#b) #([0-9]#)*=36=31"
# Zstyle ssh known hosts
zstyle -e ':completion::*:*:*:hosts' hosts 'reply=(${=${${(f)"$(cat {/etc/hosts,etc/ssh_,${HOME}/.ssh/known_}hosts(|2)(N) /dev/null)"}%%[# ]*}//,/ })'
# Zstyle autocompletion
zstyle ':auto-fu:highlight' input bold
zstyle ':auto-fu:highlight' completion fg=black,bold
zstyle ':auto-fu:highlight' completion/one fg=white,bold,underline
zstyle ':auto-fu:var' postdisplay $'\n-azfu-'
zstyle ':auto-fu:var' track-keymap-skip opp
# History
zstyle ':completion:*:history-words' stop yes
zstyle ':completion:*:history-words' remove-all-dups yes
zstyle ':completion:*:history-words' list false
zstyle ':completion:*:history-words' menu yes
#
# env.zsh:
#
HISTFILE=~/.zshhistory
HISTSIZE=3000
SAVEHIST=3000
# Share history between tmux windows
setopt SHARE_HISTORY
export GREP_OPTIONS='--color=auto'
export GREP_COLOR='38;5;202'
export LESS_TERMCAP_mb=$'\E[01;31m' # begin blinking
export LESS_TERMCAP_md=$'\E[01;38;5;67m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # end mode
export LESS_TERMCAP_se=$'\E[0m' # end standout-mode
export LESS_TERMCAP_so=$'\E[38;33;65m' # begin standout-mode - info box
export LESS_TERMCAP_ue=$'\E[0m' # end underline
export LESS_TERMCAP_us=$'\E[04;38;5;172m' # begin underline
export LESS=-r
[[ -z $TMUX ]] && export TERM="xterm-256color"
# Midnight commander wants this:
export COLORTERM=truecolor
export GOPATH=${HOME}/Projects/Go
if [[ -e /usr/libexec/java_home ]]; then
export JAVA_HOME=$(/usr/libexec/java_home)
fi
# Set GPG TTY
export GPG_TTY=$(tty)
# default postgres database
export PGDATABASE=postgres
path=(${HOME}/bin $path)
export PATH
#
# keybindings.zsh:
#
bindkey '^w' backward-kill-word
bindkey '^h' backward-delete-char
bindkey '^r' history-incremental-search-backward
bindkey '^s' history-incremental-search-forward
bindkey '^p' history-search-backward
bindkey '^n' history-search-forward
bindkey '^a' beginning-of-line
bindkey '^e' end-of-line
bindkey '^k' kill-line
bindkey "^f" forward-word
bindkey "^b" backward-word
bindkey "${terminfo[khome]}" beginning-of-line # Fn-Left, Home
bindkey "${terminfo[kend]}" end-of-line # Fn-Right, End
#
# style.zsh:
#
# PATTERNS
# rm -rf
ZSH_HIGHLIGHT_PATTERNS+=('rm -rf *' 'fg=white,bold,bg=214')
# Sudo
ZSH_HIGHLIGHT_PATTERNS+=('sudo ' 'fg=white,bold,bg=214')
# autosuggestion highlight
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="fg=4"
# Aliases and functions
FAST_HIGHLIGHT_STYLES[alias]='fg=068'
FAST_HIGHLIGHT_STYLES[function]='fg=028'
# Commands and builtins
FAST_HIGHLIGHT_STYLES[command]="fg=166"
FAST_HIGHLIGHT_STYLES[hashed-command]="fg=blue"
FAST_HIGHLIGHT_STYLES[builtin]="fg=202"
# Paths
FAST_HIGHLIGHT_STYLES[path]='fg=244'
# Globbing
FAST_HIGHLIGHT_STYLES[globbing]='fg=130,bold'
# Options and arguments
FAST_HIGHLIGHT_STYLES[single-hyphen-option]='fg=124'
FAST_HIGHLIGHT_STYLES[double-hyphen-option]='fg=124'
FAST_HIGHLIGHT_STYLES[back-quoted-argument]="fg=065"
FAST_HIGHLIGHT_STYLES[single-quoted-argument]="fg=065"
FAST_HIGHLIGHT_STYLES[double-quoted-argument]="fg=065"
FAST_HIGHLIGHT_STYLES[dollar-double-quoted-argument]="fg=065"
FAST_HIGHLIGHT_STYLES[back-double-quoted-argument]="fg=065"
FAST_HIGHLIGHT_STYLES[default]='none'
FAST_HIGHLIGHT_STYLES[unknown-token]='fg=red,bold'
FAST_HIGHLIGHT_STYLES[reserved-word]='fg=green'
FAST_HIGHLIGHT_STYLES[precommand]='none'
FAST_HIGHLIGHT_STYLES[commandseparator]='fg=214'
FAST_HIGHLIGHT_STYLES[history-expansion]='fg=blue'
FAST_HIGHLIGHT_STYLES[assign]='none'
eval "$(rbenv init -)"
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
| true
|
79206498e23b6ef1d1050e2f549f71890712a877
|
Shell
|
MitchStevens/dotfiles
|
/dot_config/scripts/blocklets/executable_make_colors.sh
|
UTF-8
| 499
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# generate files at ./color/<COLOR>
colors=($(xrdb -query | grep '*color*' | awk '{print $NF}'))
names=(black red green yellow blue magenta cyan light_grey dark_grey light_red light_green light_yellow light_blue light_magenta light_cyan white)
if [ ! -d "~/.color" ]; then
mkdir ~/.color
for name in $names; do
touch "~/.color/$name"
done
fi
for i in $(seq 0 15); do
touch "~/.color/${names[$i]}"
echo "${colors[$i]}" > "~/.color/${names[$i]}"
done
| true
|
cba5fe27812a836ff8180312ab8f5d20f8bfc7d2
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/suitesparse-mkl/PKGBUILD
|
UTF-8
| 1,670
| 2.671875
| 3
|
[] |
no_license
|
# Maintainer: Israel Herraiz <isra@herraiz.org>
pkgname=suitesparse-mkl
pkgver=4.5.5
pkgrel=1
pkgdesc="A collection of sparse matrix libraries (compiled with the Intel MKL lib)"
url="http://faculty.cse.tamu.edu/davis/suitesparse.html"
arch=('i686' 'x86_64')
conflicts=('umfpack', 'suitesparse')
provides=('umfpack', 'suitesparse=${pkgver}')
replaces=('umfpack', 'suitesparse')
depends=('metis' 'intel-mkl' 'intel-tbb')
makedepends=('intel-compiler-base' 'intel-fortran-compiler' 'cmake' 'chrpath')
license=('GPL')
options=('staticlibs')
source=("http://faculty.cse.tamu.edu/davis/SuiteSparse/SuiteSparse-$pkgver.tar.gz" suitesparse-link-tbb.patch)
sha1sums=('60b7778e6d5e7304f1f7f337874de8a631b470b0'
'4f0b3836e8c3c1ec5be01f988f136cee4a2cb936')
prepare() {
# Fix linking with intel-tbb
cd SuiteSparse
patch -p1 -i ../suitesparse-link-tbb.patch
}
build() {
cd "$srcdir"/SuiteSparse
source /opt/intel/mkl/bin/mklvars.sh intel64
source /opt/intel/composerxe/linux/bin/compilervars.sh intel64
export BLAS="-L/opt/intel/mkl/lib/intel64 -lmkl_rt"
TBB=-ltbb SPQR_CONFIG=-DHAVE_TBB MY_METIS_LIB=/usr/lib/libmetis.so make
}
package() {
cd "${srcdir}"/SuiteSparse
install -dm755 "${pkgdir}"/usr/{include,lib}
source /opt/intel/mkl/bin/mklvars.sh intel64
source /opt/intel/composerxe/linux/bin/compilervars.sh intel64
export BLAS="-L/opt/intel/mkl/lib/intel64 -lmkl_rt"
export LAPACK="-L/opt/intel/mkl/lib/intel64 -lmkl_rt"
TBB=-ltbb SPQR_CONFIG=-DHAVE_TBB MY_METIS_LIB=/usr/lib/libmetis.so \
make INSTALL_LIB="${pkgdir}"/usr/lib INSTALL_INCLUDE="${pkgdir}"/usr/include install
# fix RPATH
chrpath -d "$pkgdir"/usr/lib/*
}
| true
|
5854db0993c00a03bbf50add301dbda8e2adf3ca
|
Shell
|
IANBRUNE/config
|
/.zshrc
|
UTF-8
| 3,483
| 2.921875
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
export ANDROID_HOME=/Users/ianbrune/Library/Android/sdk
export PATH=$HOME/bin:/usr/local/bin:$ANDROID_HOME/platform-tools:$PATH
export PATH=~/.local/bin:$PATH
# Path to your oh-my-zsh installation.
export idea=/usr/local/bin/idea
export ZSH=/Users/ianbrune/.oh-my-zsh
export REACT_EDITOR=code
export FIREFOX_HOME=/Applications/Firefox.app/Contents/MacOS/firefox
export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_161.jdk/Contents/Home
eval $(thefuck --alias)
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="sunrise"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git aws git-extras)
source $ZSH/oh-my-zsh.sh
####################
# WORKSPACE #
####################
alias ll="ls -l"
alias work='cd ~/Workspace'
alias zshconfig="code ~/.zshrc"
alias sourcez='source ~/.zshrc'
alias ohmyzsh="code ~/.oh-my-zsh"
alias firefox-debug='$FIREFOX_HOME --start-debugger-server'
####################
# GIT #
####################
push-new-branch() {
git checkout -b $1
branch=$(git rev-parse --abbrev-ref HEAD)
git push --set-upstream origin $branch
}
merge-from-master() {
branch=$(git rev-parse --abbrev-ref HEAD)
git checkout master
git pull
git checkout $branch
git merge origin/master
}
alias diff='git diff | gitx'
alias mm='merge-from-master'
alias pn='push-new-branch'
alias delete-merged='git branch --merged | egrep -v "(^\*|master|dev)" | xargs git branch -d'
####################
# KCL #
####################
start-krazydevs() {
cd ~/workspace/www.krazydevs.com
yarn
yarn start
}
alias krazydevs="start-krazydevs"
| true
|
1b6884432460c76bc25bae7c217e38cf28c25236
|
Shell
|
morristech/dot-files-1
|
/dotprofile/theme/common/node.bash
|
UTF-8
| 284
| 3.515625
| 4
|
[
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
___node_version() {
local npm_prefix node_version
npm_prefix=$(npm prefix 2>/dev/null) || return 1
if [[ -e ${npm_prefix}"/package.json" ]]; then
node_version=$(node --version 2>/dev/null) || return 1
echo -e "$COLOR_GREEN(${COLOR_DARK_BLUE}$node_version$COLOR_GREEN) "
fi
}
| true
|
54bcf49da0e6c4e2fac642513d81a85efe0aa55d
|
Shell
|
xclarifyio/dataset-librispeech-corpus
|
/fill_template.sh
|
UTF-8
| 441
| 3.421875
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/sh
TARGET="$1"
NAME="$2"
URL="$3"
MD5="$4"
cat > $TARGET <<-EOD
FROM busybox:latest
RUN mkdir -p /dataset
WORKDIR /dataset
ENV URL=${URL}
ENV NAME=${NAME}
ENV MD5=${MD5}
RUN wget -O ./data.tar.gz "\$URL" \\
&& echo "\$MD5 data.tar.gz" > md5sum \\
&& md5sum -c md5sum \\
&& rm md5sum \\
&& gzip -d ./data.tar.gz \\
&& mkdir \$NAME \\
&& tar -x -f ./data.tar -C \$NAME \\
&& rm -rf ./data.tar
VOLUME /dataset/\$NAME
EOD
| true
|
662e9a22b181e040162206e27bee7c58e93b0ff3
|
Shell
|
PSC-PublicHealth/pyrhea
|
/src/tools/postprocessing/gen_counts_parallel.proto
|
UTF-8
| 938
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/bash
#
#SBATCH -N 1
#SBATCH --constraint="intel"
#SBATCH --mem-per-cpu=20000
#xxxxSBATCH --ntasks-per-node=2
#xxxxSBATCH -p RM-shared
#SBATCH --time=1:00:00
#SBATCH --array=0-%maxfile%
ar=$SLURM_ARRAY_TASK_ID
scenario=%scenario%
echo $scenario
runyaml=%runyaml%
basedir=%basedir%
bundledir=%bundledir%
workdir=%workdir%
topdir=%topdir%
minNotesSz=%minNotesSz%
pyrheadir=%pyrheadir%
targetdir=%targetdir%
pythonsetup=%pythonsetup%
tooldir=$pyrheadir/src/tools
cd $tooldir
source $pythonsetup
narr=()
#for fn in `$topdir/find_notes.py $targetdir $minNotesSz`
for fn in `$topdir/find_bcz.py $targetdir`
do
narr+=($fn)
done
myNote=${narr[$ar - ${SLURM_ARRAY_TASK_MIN}]}
outName="${workdir}/${myNote}_counts"
sto=$workdir/gather_counts_${ar}.out
runName=$bundledir/$runyaml
python gather_counts.py -n $targetdir/$myNote -m 1 \
--nocsv --nostats --out $outName -x $workdir/xdro_facs.yaml --lowdate 1 \
$runName >& $sto
| true
|
7870957bcc541927dd08e2080546535eb2bb08de
|
Shell
|
barucho/service-installer
|
/php5.5/install/gd.sh
|
UTF-8
| 265
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
. ./conf.sh
cd $SRC_ROOT_PATH
LIBGD_SRC=libgd-2.1.1
tar -zxvf $LIBGD_SRC.tar.gz
cd $LIBGD_SRC
./configure --prefix=$LIB_PATH --with-freetype=$LIB_PATH --with-jpeg=$LIB_PATH --with-png=$LIB_PATH --without-tiff
make
make install
cd ../
rm -rf $LIBGD_SRC
| true
|
7ed1a4dd068e7a59167707d91d71423f4b84e2e3
|
Shell
|
gregnuj/docker-sshd
|
/rootfs/usr/local/bin/entrypoint.sh
|
UTF-8
| 126
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
for script in $(ls /etc/entrypoint.d/*.sh); do
echo "$0: running $script"
$script
echo
done
exec $@
| true
|
348e3f5cecbf9997a97821fb139b8121042f5d74
|
Shell
|
Crypto2099/cardano
|
/sendMyVote.sh
|
UTF-8
| 11,596
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script is brought to you by ATADA_Stakepool, Telegram @atada_stakepool in
# cooperation with @TheRealAdamDean (BUFFY & SPIKE)
###
# Change the following variables to match your configuration
###
CPATH="/home/<user>/cardano-node"
socket="${CPATH}/db/node.socket"
genesisfile="${CPATH}/config/testnet-shelley-genesis.json"
genesisfile_byron="${CPATH}/config/testnet-byron-genesis.json"
cardanocli="/home/<user>/.cabal/bin/cardano-cli"
cardanonode="/home/<user>/.cabal/bin/cardano-node"
byronToShelleyEpochs=208
magicparam="--mainnet"
###
# STOP EDITING!!!
###
if [[ -z $CARDANO_NODE_SOCKET_PATH ]]; then
export CARDANO_NODE_SOCKET_PATH=${socket}
fi
dummyShelleyAddr="addr1vyde3cg6cccdzxf4szzpswgz53p8m3r4hu76j3zw0tagyvgdy3s4p"
###
# Define utility functions
###
exists() {
command -v "$1" >/dev/null 2>&1
}
check_address() {
tmp=$(${cardanocli} shelley address info --address $1 2> /dev/null)
if [[ $? -ne 0 ]]; then echo -e "\e[35mERROR - Unknown address format for address: $1 !\e[0m"; exit 1; fi
}
#-------------------------------------------------------------
#Subroutine for user interaction
ask() {
local prompt default reply
if [ "${2:-}" = "Y" ]; then
prompt="Y/n"
default=Y
elif [ "${2:-}" = "N" ]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
while true; do
# Ask the question (not using "read -p" as it uses stderr not stdout)
echo -ne "$1 [$prompt] "
# Read the answer (use /dev/tty in case stdin is redirected from somewhere else)
read reply </dev/tty
# Default?
if [ -z "$reply" ]; then
reply=$default
fi
# Check if the reply is valid
case "$reply" in
Y*|y*) return 0 ;;
N*|n*) return 1 ;;
esac
done
}
#-------------------------------------------------------
#-------------------------------------------------------
#Subroutines to calculate current epoch from genesis.json offline
get_currentEpoch() {
local startTimeGenesis=$(cat ${genesisfile} | jq -r .systemStart)
local startTimeSec=$(date --date=${startTimeGenesis} +%s) #in seconds (UTC)
local currentTimeSec=$(date -u +%s) #in seconds (UTC)
local epochLength=$(cat ${genesisfile} | jq -r .epochLength)
local currentEPOCH=$(( (${currentTimeSec}-${startTimeSec}) / ${epochLength} )) #returns a integer number, we like that
echo ${currentEPOCH}
}
#-------------------------------------------------------
#-------------------------------------------------------
#Subroutines to calculate current slotHeight(tip)
get_currentTip() {
local currentTip=$(${cardanocli} shelley query tip ${magicparam} | jq -r .slotNo)
echo ${currentTip}
}
#-------------------------------------------------------
#-------------------------------------------------------
#Subroutines to calculate current TTL
get_currentTTL() {
echo $(( $(get_currentTip) + 10000 ))
}
#-------------------------------------------------------
#-------------------------------------------------------
#Displays an Errormessage if parameter is not 0
checkError()
{
if [[ $1 -ne 0 ]]; then echo -e "\n\n\e[35mERROR (Code $1) !\e[0m"; exit 1; fi
}
#-------------------------------------------------------
if ! exists jq; then
echo -e "\nYou need the tool 'jq' !\n"
echo -e "Install it On Ubuntu/Debian like:\n\e[97msudo apt update && sudo apt -y install jq\e[0m\n"
echo -e "Thx! :-)\n"
exit 2
fi
tempDir=$(dirname $(mktemp tmp.XXXX -ut))
#load variables from common.sh
# socket Path to the node.socket (also exports socket to CARDANO_NODE_SOCKET_PATH)
# genesisfile Path to the genesis.json
# magicparam TestnetMagic parameter
# cardanocli Path to the cardano-cli executable
# . "$(dirname "$0")"/00_common.sh
case $# in
2 ) fromAddr="$1";
metafile="$2.json";;
* ) cat >&2 <<EOF
Usage: $(basename $0) <From AddressName> <VoteFileName>
Note: Do not include file suffixes (i.e. .addr or .json)
EOF
exit 1;; esac
#This is a simplified Version of the sendLovelaces.sh script so, it will always be a SendALLLovelaces transaction + Metafile
toAddr=${fromAddr}
lovelacesToSend="ALL"
#Throw an error if the voting.json file does not exist
if [ ! -f "${metafile}" ]; then
echo -e "The specified VoteFileName.json (${metafile} file does not exist. Please try again."
exit 1
fi
function readMetaParam() {
required="${3:-0}"
key=$(jq 'keys[0]' $2)
param=$(jq -r ".$key .$1" $2 2> /dev/null)
if [[ $? -ne 0 ]]; then echo "ERROR - ${2} is not a valid JSON file" >&2; exit 1;
elif [[ "${param}" == null && required -eq 1 ]]; then echo "ERROR - Parameter \"$1\" in ${2} does not exist" >&2; exit 1;
elif [[ "${param}" == "" && !required -eq 1 ]]; then echo "ERROR - Parameter \"$1\" in ${2} is empty" >&2; exit 1;
fi
echo "${param}"
}
objectType=$(readMetaParam "ObjectType" "${metafile}" 1); if [[ ! $? == 0 ]]; then exit 1; fi
objectVersion=$(readMetaParam "ObjectVersion" "${metafile}"); if [[ ! $? == 0 ]]; then exit 1; fi
if [[ $objectType == 'VoteBallot' ]]; then
# Check VoteBallot required fields
networkId=$(readMetaParam "NetworkId" "${metafile}" 1); if [[ ! $? == 0 ]]; then exit 1; fi
proposalId=$(readMetaParam "ProposalId" "${metafile}" 1); if [[ ! $? == 0 ]]; then exit 1; fi
voterId=$(readMetaParam "VoterId" "${metafile}" 1); if [[ ! $? == 0 ]]; then exit 1; fi
yesnovote=$(readMetaParam "Vote" "${metafile}"); if [[ ! $? == 0 ]]; then exit 1; fi
choicevote=$(readMetaParam "Choices" "${metafile}"); if [[ ! $? == 0 ]]; then exit 1; fi
if [[ $yesnovote == null && $choicevote == null ]]; then
echo "ERROR - No voting preferences found in ballot.";
exit 1;
fi
#else
# echo "ERROR - JSON is not of type VoteBallot.";
# exit 1;
fi
sendFromAddr=$(cat ${fromAddr}.addr)
sendToAddr=$(cat ${toAddr}.addr)
check_address "${sendFromAddr}"
# check_address "${sendToAddr}"
rxcnt="1"
#Choose between sending ALL funds or a given amount of lovelaces out
# if [[ ${lovelacesToSend^^} == "ALL" ]]; then
#Sending ALL lovelaces, so only 1 receiver address
# rxcnt="1"
# else
#Sending a free amount, so 2 receiver addresses
# rxcnt="2" #transmit to two addresses. 1. destination address, 2. change back to the source address
# fi
echo
echo -e "\e[0mUsing lovelaces from Address\e[32m ${fromAddr}.addr\e[0m to send the metafile\e[32m ${metafile}\e[0m:"
echo
#get live values
currentTip=$(get_currentTip)
ttl=$(get_currentTTL)
currentEPOCH=$(get_currentEpoch)
echo -e "\e[0mCurrent Slot-Height:\e[32m ${currentTip} \e[0m(setting TTL to ${ttl})"
echo
echo -e "\e[0mSource/Destination Address ${fromAddr}.addr:\e[32m ${sendFromAddr} \e[90m"
echo -e "\e[0mAttached Metafile:\e[32m ${metafile} \e[90m"
echo
#Get UTXO Data for the sendFromAddr
utx0=$(${cardanocli} shelley query utxo --address ${sendFromAddr} --cardano-mode ${magicparam})
utx0linecnt=$(echo "${utx0}" | wc -l)
txcnt=$((${utx0linecnt}-2))
if [[ ${txcnt} -lt 1 ]]; then echo -e "\e[35mNo funds on the source Addr!\e[0m"; exit; else echo -e "\e[32m${txcnt} UTXOs\e[0m found on the source Addr!"; fi
echo
#Calculating the total amount of lovelaces in all utxos on this address
totalLovelaces=0
txInString=""
while IFS= read -r utx0entry
do
fromHASH=$(echo ${utx0entry} | awk '{print $1}')
fromHASH=${fromHASH//\"/}
fromINDEX=$(echo ${utx0entry} | awk '{print $2}')
sourceLovelaces=$(echo ${utx0entry} | awk '{print $3}')
echo -e "HASH: ${fromHASH}\t INDEX: ${fromINDEX}\t LOVELACES: ${sourceLovelaces}"
totalLovelaces=$((${totalLovelaces}+${sourceLovelaces}))
txInString=$(echo -e "${txInString} --tx-in ${fromHASH}#${fromINDEX}")
done < <(printf "${utx0}\n" | tail -n ${txcnt})
echo -e "Total lovelaces in UTX0:\e[32m ${totalLovelaces} lovelaces \e[90m"
echo
#Getting protocol parameters from the blockchain, calculating fees
${cardanocli} shelley query protocol-parameters --cardano-mode ${magicparam} > protocol-parameters.json
#Get the current minUTxOvalue
minUTXO=$(jq -r .minUTxOValue protocol-parameters.json 2> /dev/null)
#Generate Dummy-TxBody file for fee calculation
txBodyFile="${tempDir}/dummy.txbody"
rm ${txBodyFile} 2> /dev/null
if [[ ${rxcnt} == 1 ]]; then #Sending ALL funds (rxcnt=1)
${cardanocli} shelley transaction build-raw ${txInString} --tx-out ${dummyShelleyAddr}+0 --ttl ${ttl} --fee 0 --metadata-json-file ${metafile} --out-file ${txBodyFile}
checkError "$?"
else #Sending chosen amount (rxcnt=2)
${cardanocli} shelley transaction build-raw ${txInString} --tx-out ${dummyShelleyAddr}+0 --tx-out ${dummyShelleyAddr}+0 --metadata-json-file ${metafile} --ttl ${ttl} --fee 0 --out-file ${txBodyFile}
checkError "$?"
fi
fee=$(${cardanocli} shelley transaction calculate-min-fee --tx-body-file ${txBodyFile} --protocol-params-file protocol-parameters.json --tx-in-count ${txcnt} --tx-out-count ${rxcnt} ${magicparam} --witness-count 1 --byron-witness-count 0 | awk '{ print $1 }')
checkError "$?"
echo -e "\e[0mMinimum Transaction Fee for ${txcnt}x TxIn & ${rxcnt}x TxOut: \e[32m ${fee} lovelaces \e[90m"
#If sending ALL funds
if [[ ${rxcnt} == 1 ]]; then lovelacesToSend=$(( ${totalLovelaces} - ${fee} )); fi
#calculate new balance for destination address
lovelacesToReturn=$(( ${totalLovelaces} - ${fee} - ${lovelacesToSend} ))
#Checking about minimum funds in the UTXO
if [[ ${lovelacesToReturn} -lt 0 || ${lovelacesToSend} -lt 0 ]]; then echo -e "\e[35mNot enough funds on the source Addr!\e[0m"; exit; fi
#Checking about the minimum UTXO that can be transfered according to the current set parameters
lovelacesMinCheck=$(( ${totalLovelaces} - ${fee} )) #hold the value of the lovelaces that will be transfered out no mather what type of transaction
if [[ ${lovelacesMinCheck} -lt ${minUTXO} ]]; then echo -e "\e[35mAt least ${minUTXO} lovelaces must be transfered (ParameterSetting)!\e[0m"; exit; fi
echo -e "\e[0mLovelaces to return to ${toAddr}.addr: \e[33m ${lovelacesToSend} lovelaces \e[90m"
echo
txBodyFile="${tempDir}/$(basename ${fromAddr}).txbody"
txFile="${tempDir}/$(basename ${fromAddr}).tx"
echo
echo -e "\e[0mBuilding the unsigned transaction body: \e[32m ${txBodyFile} \e[90m"
echo
#Building unsigned transaction body
rm ${txBodyFile} 2> /dev/null
if [[ ${rxcnt} == 1 ]]; then #Sending ALL funds (rxcnt=1)
${cardanocli} shelley transaction build-raw ${txInString} --tx-out ${sendToAddr}+${lovelacesToSend} --ttl ${ttl} --fee ${fee} --metadata-json-file ${metafile} --out-file ${txBodyFile}
checkError "$?"
else #Sending chosen amount (rxcnt=2)
${cardanocli} shelley transaction build-raw ${txInString} --tx-out ${sendToAddr}+${lovelacesToSend} --tx-out ${sendFromAddr}+${lovelacesToReturn} --metadata-json-file ${metafile} --ttl ${ttl} --fee ${fee} --out-file ${txBodyFile}
checkError "$?"
fi
cat ${txBodyFile}
echo
echo -e "\e[0mSign the unsigned transaction body with the \e[32m${fromAddr}.skey\e[0m: \e[32m ${txFile} \e[90m"
echo
#Sign the unsigned transaction body with the SecureKey
rm ${txFile} 2> /dev/null
${cardanocli} shelley transaction sign --tx-body-file ${txBodyFile} --signing-key-file ${fromAddr}.skey ${magicparam} --out-file ${txFile}
checkError "$?"
cat ${txFile}
echo
if ask "\e[33mDoes this look good for you, continue ?" N; then
echo
echo -ne "\e[0mSubmitting the transaction via the node..."
${cardanocli} shelley transaction submit --tx-file ${txFile} --cardano-mode ${magicparam}
checkError "$?"
echo -e "\e[32mDONE\n"
fi
echo -e "\e[0m\n"
| true
|
cbae63c7df725b710b928c662f2890fa98916313
|
Shell
|
madhuchary/bash-scripts
|
/break-continue.sh
|
UTF-8
| 241
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
while true
do
echo "Do you want to Continue press y/n"
read ans
if [ "$ans" == "y" ] || [ "$ans" == "Y" ]
then
continue
elif [ "$ans" == "n" ] || [ "$ans" == "N" ]
then
break
else
echo "please enter only y|Y or n|N"
fi
done
| true
|
ad3f07fd738a68259142b596ec94d01222c25866
|
Shell
|
majklovec/bivac
|
/test/integration/docker/tests/01_basic
|
UTF-8
| 6,894
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Prepare environment
echo "[*] Retrieving internal IP..."
export INTERNAL_IP=$(ip -o -4 addr show dev `ls /sys/class/net | grep -E "^eth|^en" | head -n 1` | cut -d' ' -f7 | cut -d'/' -f1)
test_valid=true
canary=8ft8HJ3teCg8S1WeH5bwhNBZEtBJNs
export AWS_ACCESS_KEY_ID=OBQZY3DV6VOEZ9PG6NIM
export AWS_SECRET_ACCESS_KEY=7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty
echo "[*] Creating test volumes..."
docker volume create canary
echo $canary | sudo tee /var/lib/docker/volumes/canary/_data/canary
sudo mkdir -p /var/lib/docker/volumes/canary/_data/foo/bar
echo $canary | sudo tee /var/lib/docker/volumes/canary/_data/foo/bar/canary
echo "[*] Starting test services..."
docker run -d --name mysql -v mysql:/var/lib/mysql -e MYSQL_DATABASE=foo -e MYSQL_USER=foo -e MYSQL_PASSWORD=bar -e MYSQL_ROOT_PASSWORD=root mysql:5.6
docker run -d --name postgres -v postgres:/var/lib/postgresql/data postgres:latest
sleep 10
# Start Bivac
echo "[*] Starting Bivac..."
docker run -d --hostname=testing --name testing --rm -v /var/run/docker.sock:/var/run/docker.sock:ro \
-e BIVAC_TARGET_URL=s3:http://${INTERNAL_IP}:9000/bivac-testing/docker \
-e BIVAC_LOG_LEVEL=$2 \
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
-e RESTIC_PASSWORD=toto \
-e BIVAC_SERVER_PSK=toto \
-e BIVAC_REFRESH_RATE=10s \
-e BIVAC_AGENT_IMAGE=$1 \
-p 8182:8182 \
$1 manager
sleep 10
echo "[*] Waiting for backups..."
canary_waiting=true
mysql_waiting=true
postgres_waiting=true
while $canary_waiting || $mysql_waiting || $postgres_waiting; do
docker logs testing
canary_volume=$(curl -s -H "Authorization: Bearer toto" http://127.0.0.1:8182/volumes | jq -r '.[] | select(.ID | contains("canary"))')
if [ "$(echo $canary_volume | jq -r '.LastBackupStatus')" = "Success" ]; then
canary_waiting=false
elif [ "$(echo $canary_volume | jq -r '.LastBackupStatus')" = "Failed" ]; then
echo $(echo $canary_volume | jq -r '.Logs')
canary_waiting=false
elif [ "$(echo $canary_volume | jq -r '.LastBackupStatus')" = "Unknown" ]; then
echo "Volume already backed up, the remote repository may not have been cleaned up."
canary_waiting=false
test_valid=false
fi
mysql_volume=$(curl -s -H "Authorization: Bearer toto" http://127.0.0.1:8182/volumes | jq -r '.[] | select(.ID | contains("mysql"))')
if [ "$(echo $mysql_volume | jq -r '.LastBackupStatus')" = "Success" ]; then
mysql_waiting=false
elif [ "$(echo $mysql_volume | jq -r '.LastBackupStatus')" = "Failed" ]; then
echo $(echo $mysql_volume | jq -r '.Logs')
mysql_waiting=false
elif [ "$(echo $mysql_volume | jq -r '.LastBackupStatus')" = "Unknown" ]; then
echo "Volume already backed up, the remote repository may not have been cleaned up."
mysql_waiting=false
test_valid=false
fi
postgres_volume=$(curl -s -H "Authorization: Bearer toto" http://127.0.0.1:8182/volumes | jq -r '.[] | select(.ID | contains("postgres"))')
if [ "$(echo $postgres_volume | jq -r '.LastBackupStatus')" = "Success" ]; then
postgres_waiting=false
elif [ "$(echo $postgres_volume | jq -r '.LastBackupStatus')" = "Failed" ]; then
echo $(echo $postgres_volume | jq -r '.Logs')
postgres_waiting=false
elif [ "$(echo $postgres_volume | jq -r '.LastBackupStatus')" = "Unknown" ]; then
echo "Volume already backed up, the remote repository may not have been cleaned up."
postgres_waiting=false
test_valid=false
fi
sleep 10
done
echo "[*] Checking backups..."
# Canaries
dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/canary dump latest /var/lib/docker/volumes/canary/_data/canary)
dump=${dump%$'\r'}
if [[ $dump != $canary ]]; then
echo "Canary backup failed."
docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/canary ls latest
test_valid=false
fi
dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/canary dump latest /var/lib/docker/volumes/canary/_data/foo/bar/canary)
dump=${dump%$'\r'}
if [[ $dump != $canary ]]; then
echo "Canary backup failed."
docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/canary ls latest
test_valid=false
fi
# MySQL
dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/mysql dump latest /var/lib/docker/volumes/mysql/_data/backups/all.sql)
dump=${dump%$'\r'}
if [[ $dump != *"Dump completed"* ]]; then
echo "Mysql backup failed."
docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/mysql ls latest
test_valid=false
fi
# PostgreSQL
dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/postgres dump latest /var/lib/docker/volumes/postgres/_data/backups/all.sql)
dump=${dump%$'\r'}
if [[ $dump != *"dump complete"* ]]; then
echo "Postgresql backup failed."
docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/docker/testing/postgres ls latest
test_valid=false
fi
# Clean up environment
echo "[*] Cleaning up environment..."
docker kill testing
docker volume rm canary
docker kill mysql
docker rm mysql
docker kill postgres
docker rm postgres
docker volume rm mysql
docker volume rm postgres
docker pull minio/mc
docker run --rm -e MC_HOST_minio=http://OBQZY3DV6VOEZ9PG6NIM:7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty@${INTERNAL_IP}:9000 minio/mc rb --force minio/bivac-testing
docker run --rm -e MC_HOST_minio=http://OBQZY3DV6VOEZ9PG6NIM:7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty@${INTERNAL_IP}:9000 minio/mc mb minio/bivac-testing
if [ "$test_valid" = true ]; then
echo -e "\e[32m[+] Basic : standard data dir + mysql + postgresql\e[39m"
else
echo -e "\e[31m[-] Basic : standard data dir + mysql + postgresql\e[39m"
exit 1
fi
| true
|
236ab54a9ac8ad914b950169b72a31d55b6d3f0a
|
Shell
|
sprice/pantheon
|
/bcfg2/Probes/varnish_vcl_hash
|
UTF-8
| 322
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#If you wish to replace our default value for vcl_hash in /etc/varnish/default.vlc,
#edit the VARNISH_VCL_RECV variable in /etc/pantheon/server_tuneables
if [[ -a /etc/pantheon/server_tuneables ]]; then
. /etc/pantheon/server_tuneables
fi
if [[ -n "$VARNISH_VCL_HASH" ]]; then
echo "$VARNISH_VCL_HASH"
fi
| true
|
a75350961ea649512d51092b375526c471e192d2
|
Shell
|
JohnOmernik/zetaextra
|
/drill/build.sh
|
UTF-8
| 2,981
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
checkdocker
check4package "$APP_TGZ" BUILD
reqdockerimg "${REQ_APP_IMG_NAME}"
# Drill Specific Directories in the shared staging location
mkdir -p ${APP_ROOT}/extrajars
mkdir -p ${APP_ROOT}/libjpam
echo "Place to store custom jars" > ${APP_ROOT}/extrajars/jars.txt
# Check for libjpam (Required)
JPAM=$(ls ${APP_ROOT}/libjpam)
if [ "$JPAM" == "" ]; then
if [ "$FS_PROVIDER" == "mapr" ]; then
echo "No Lib JPAM found, should we grab one from a MapR container?"
@go.log WARN "No libjpam found in APP_ROOT"
if [ "$UNATTEND" != "1" ]; then
read -e -p "Pull libjpam.so from maprdocker? " -i "Y" PULLJPAM
else
@go.log WARN "Since -u provided, we will automatically pull libjpam"
PULLJPAM="Y"
fi
if [ "$PULLJPAM" == "Y" ]; then
IMG=$(sudo docker images --format "{{.Repository}}:{{.Tag}}"|grep \/maprdocker)
CID=$(sudo docker run -d $IMG /bin/bash)
sudo docker cp $CID:/opt/mapr/lib/libjpam.so $APP_ROOT/libjpam
else
@go.log FATAL "Cannot continue with Drill installation without libjpam - exiting"
fi
else
@go.log WARN "The Zeta version of Apache Drill requires libjpam.so for authentication and security. Please provide a path to find libjpam.so to include in your package installation directory"
read -e -p "Full path to libjpam.so: " -i "/path/to/libjpam.so" JPAM_PATH
while [ ! -f "$JPAM_PATH" ]; do
@go.log WARN "No file found at $JPAM_PATH - Try Again?"
read -e -p "Try again? (Y/N): " -i "Y" THERETRY
if [ "$THERETRY" == "Y" ]; then
read -e -p "Full path to libjpam.so: " -i "/path/to/libjpam.so" JPAM_PATH
else
@go.log FATAL "Please find a libjpam.so to include in the install directory, or the Apache Drill install will not work"
fi
done
fi
fi
# If Build is Y let's do this
if [ "$BUILD" == "Y" ]; then
rm -rf $BUILD_TMP
mkdir -p $BUILD_TMP
cd $BUILD_TMP
TMP_IMG="zeta/drillbuild"
cat > ./pkg_drill.sh << EOF
wget $APP_URL
rpm2cpio $APP_URL_FILE | cpio -idmv
echo "Moving ./opt/mapr/drill/${APP_VER} to ./"
mv ./opt/mapr/drill/${APP_VER} ./
echo "cd into ${APP_VER}"
cd ${APP_VER}
mv ./conf ./conf_orig
cd ..
chown -R ${IUSER}:${IUSER} ${APP_VER}
tar zcf ${APP_TGZ} ${APP_VER}
rm -rf ./opt
rm -rf ${APP_VER}
rm ${APP_URL_FILE}
EOF
chmod +x ./pkg_drill.sh
cat > ./Dockerfile << EOL
FROM ${ZETA_DOCKER_REG_URL}/buildbase
ADD pkg_drill.sh ./
RUN ./pkg_drill.sh
CMD ["/bin/bash"]
EOL
sudo docker build -t $TMP_IMG .
sudo docker run --rm -v=`pwd`:/app/tmp $TMP_IMG cp $APP_TGZ /app/tmp/
sudo docker rmi -f $TMP_IMG
mv ${APP_TGZ} ${APP_PKG_DIR}/
cd $MYDIR
rm -rf $BUILD_TMP
echo ""
@go.log INFO "$APP_NAME package build with $APP_VERS_FILE"
echo ""
else
@go.log WARN "Not rebuilding $APP_NAME - $APP_VERS_FILE"
fi
| true
|
3c3703fcf4b78fdb9f8340721885c3b0725495b6
|
Shell
|
abhiaiyer91/kots
|
/kotsadm/operator/deploy/install-krew.sh
|
UTF-8
| 307
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
(
set -x; cd "$(mktemp -d)" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz" &&
tar zxvf krew.tar.gz &&
KREW=./krew-"$(uname | tr '[:upper:]' '[:lower:]')_$(uname -m | sed -e 's/x86_64/amd64/' -e 's/arm.*$/arm/')" &&
"$KREW" install krew
)
| true
|
beee781a82cb129721f3749535511ea2ac463672
|
Shell
|
dernasherbrezon/r2cloud
|
/src/main/deb/etc/cron.daily/r2cloud
|
UTF-8
| 367
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# r2cloud is executed under user pi, thus unable to change cron.daily.
if [ -f /home/pi/r2cloud/DO_NOT_UPDATE ]; then
printf "auto update is disabled" | systemd-cat -t r2cloud
exit 0
fi
MAILTO=""
printf "updating r2cloud" | systemd-cat -t r2cloud
systemd-cat -t r2cloud apt-get -y update
systemd-cat -t r2cloud apt-get -y install r2cloud r2cloud-ui
| true
|
c7d3c5b9bcabd1d3a76ec861bcb1c4d5b38b5fea
|
Shell
|
clementmiao/clementmiao-cs123
|
/compile.sh
|
UTF-8
| 532
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -lt 2 ]; then
echo "usage `basename $0` <Name> <Java file> [<Java file> ...]"
exit 1
fi
set -e
NAME=$1
shift
FILES=$@
CLASS_DIR=${NAME}-classes
JAR_FILE=${NAME}.jar
module load midway-hadoop
mkdir -p ${CLASS_DIR}
# Compile
javac -target 1.6 -source 1.6 -Xlint:deprecation -cp "${HADOOP_HOME}/*:${HADOOP}/parcels/CDH/lib/hadoop-mapreduce/*:${HADOOP}/parcels/CDH/lib/hadoop-fs/*:${HADOOP_HOME}/lib/*" -d ${CLASS_DIR} $FILES
# Package
jar -cvf ${JAR_FILE} -C ${CLASS_DIR} .
echo "Saved ${JAR_FILE}"
| true
|
7b439c44023be25c8f6bc73f5313b86b07fd27bb
|
Shell
|
douglasnaphas/sg-getatt-v-ref
|
/itest/itest.sh
|
UTF-8
| 569
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
STACKNAME=$(npx @cdk-turnkey/stackname@1.1.0 --suffix app)
BUCKET_NAME=$(aws cloudformation describe-stacks \
--stack-name ${STACKNAME} | \
jq '.Stacks[0].Outputs | map(select(.OutputKey == "BucketName"))[0].OutputValue' | \
tr -d \")
CONTENT_DATA="$(aws s3 cp s3://${BUCKET_NAME}/content.json - | \
jq '.Data' | \
tr -d \")"
EXPECTED_DATA="Something"
if [[ "${CONTENT_DATA}" != "${EXPECTED_DATA}" ]]
then
echo "Integration test failed. Expected content data:"
echo "${EXPECTED_DATA}"
echo "Got:"
echo "${CONTENT_DATA}"
exit 2
fi
| true
|
f6150ac68c1e4b160af48d9f4f58d02a6f5e67f3
|
Shell
|
fabiojna02/OpenCellular
|
/firmware/utilities/jenkins/clang_patch
|
UTF-8
| 773
| 3.5625
| 4
|
[
"CC-BY-4.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Get all files that are different from master and only lint those.
fileList=`git --no-pager diff --name-only HEAD origin/master ./firmware/ec | grep ".\.c$\|.\.h$"`
for f in $fileList; do
clang-format -style=file -i ${f}
done
echo "Linting the following files:"
echo $fileList
git diff > clang_format.patch
# Delete if 0 size and exit with 0
if [ ! -s clang_format.patch ]
then
exit 0
fi
# There were lint issues and should exit with error
exit 1
| true
|
e3970e1160dd22eb2df52a2810bca6fffeae1f4e
|
Shell
|
wddwzc/AutoNav_KIT
|
/script/install-code.sh
|
UTF-8
| 2,274
| 3.359375
| 3
|
[] |
no_license
|
#
#By Wang Zichen
#2020-01
#
#define vars
workspace_name="robot_ws"
basepath=$(cd `dirname $0`; pwd)
# 1) create catkin workspace
echo "---->1. Create catkin space...\n"
#judge if workspace exists?
if [ -d ~/$workspace_name/src ]; then
echo -n "workspace already exists, overlap ? [y/n]:"
read res
if [ $res == "n" ]; then
exit 0
else
rm -fr ~/$workspace_name/
fi
fi
#create workspace
mkdir -p ~/$workspace_name/src
cd ~/$workspace_name/src
catkin_init_workspace
cd ~/$workspace_name/
catkin_make
source ~/$workspace_name/devel/setup.bash
#check if success
if echo $ROS_PACKAGE_PATH |grep -a $workspace_name; then
echo "Successfully create workspace!"
else
echo "Create workspace failed!"
exit 1
fi
#2) create pkgs and copy files
echo "---->2. Create pckgs and copy files...\n"
#copy files into the folders
echo ========================$basepath
cd $basepath
cp -rf ./gnss_driver/ ~/$workspace_name/src
cp -rf ./xsens_imu_driver/ ~/$workspace_name/src
cp -rf ./dut_mr_drv/ ~/$workspace_name/src
cp -rf ./gps2odometry/ ~/$workspace_name/src
cp -rf ./Insrob_server/ ~/$workspace_name/src
cp -rf ./local_map/ ~/$workspace_name/src
cp -rf ./path_planner/ ~/$workspace_name/src
cp -rf ./Base_control/ ~/$workspace_name/src
cp -rf ./velodyne-master/ ~/$workspace_name/src
cp -rf ./laser_slam_algorithm/ ~/$workspace_name/src
cp -rf kill.sh ~/$workspace_name/
cp -rf run-all.sh ~/$workspace_name/
cp -rf run-data.sh ~/$workspace_name/
cp -rf run-gps.sh ~/$workspace_name/
mkdir -p ~/$workspace_name/log/
mkdir -p ~/$workspace_name/map/
mkdir -p ~/$workspace_name/data/
#3) catkin make
echo "---->3. Catkin make...\n"
cd ~/$workspace_name/
#catkin_make -DCATKIN_WHITELIST_PACKAGES="velodyne-master"
#catkin_make -DCATKIN_WHITELIST_PACKAGES="xsens_imu_driver"
#catkin_make -DCATKIN_WHITELIST_PACKAGES="laser_slam_algorithm"
#catkin_make -DCATKIN_WHITELIST_PACKAGES="server"
source devel/setup.bash
catkin_make --pkg Base_control
catkin_make --pkg xsens_imu_driver
catkin_make --pkg gnss_driver
catkin_make --pkg dut_mr_drv
catkin_make --pkg gps2odometry
catkin_make --pkg Insrob_server
catkin_make --pkg local_map
catkin_make --pkg path_planner
catkin_make --pkg velodyne-master
catkin_make --pkg laser_slam_algorithm
echo "---->OK! Install Completely.\n"
| true
|
40ae1a4c47a8264f4365dd9965f15db54186c812
|
Shell
|
fae75933/BNIF8940
|
/Chipseq.sh
|
UTF-8
| 3,138
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=chipseq
#SBATCH --partition=batch
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=6
#SBATCH --mem=24gb
#SBATCH --time=24:00:00
#SBATCH --output=/scratch/fae75933/log.%j
#SBATCH --mail-user=fae75933@uga.edu
#SBATCH --mail-type=END,FAIL
#SBATCH --error=.%j.log.err
# Iterate over fastq files in a directory
# Step2 - map fastq files to N. crassa genome, output aligned reads in bam format, then sorted and indexed bam files
#Step 3 - create a bigwig file for viewing in a genome browser and for downstream analysis;
#User input needed!!! Add path to directory containing the fastq files. Include a wild card symbol at end so the script will analyze all files
THREADS=6
#Directory to iterate over with a * at the end
FILES="/scratch/fae75933/finalproject/Felicia/chipseq/*.fastq" #Don't forget the *
##manually create a directory to store output and then put the path to that output directory here for writing
OUTDIR="/scratch/fae75933/finalproject"
##make output directories that you need. These should be modified to match the software in your specific pipeline
mkdir "$OUTDIR/SortedBamFiles"
mkdir "$OUTDIR/Bigwigs"
mkdir "$OUTDIR/MACSout"
module load deepTools/3.3.1-intel-2019b-Python-3.7.4
ml BWA/0.7.17-GCC-8.3.0
# ml SAMtools/1.9-foss-2016b
ml SAMtools/1.9-GCC-8.3.0
#Iterate over the files in the fastq folder and perform desired analysis steps
for f in $FILES
do
##define the variable $file to extract just the filename from each input file. Note that the variable $f will contain the entire path. Here you will extract the name of the file from the path and use this to name files that descend from this original input file.
file=${f##*/}
#Examples to Get Different parts of the file name
#See here for details: http://tldp.org/LDP/abs/html/refcards.html#AEN22664
#if you need to extract the directory but not the file, use the syntax below for variable dir
#dir=${f%/*} #
#create file name variables to use in the downstream analysis
#use sed to get the second read matching the input file
#filename variable for the sorted bam file
sorted="$OUTDIR/SortedBamFiles/$file"
#filename variable for the deeptools big wig output
bigwig="$OUTDIR/Bigwigs/$file.bw"
############# Map Reads and convert to sorted bam files #########################
#http://bio-bwa.sourceforge.net/bwa.shtml
#http://www.htslib.org/doc/1.2/samtools.html
####load modules just before use, because these modules load all dependencies
##map reads and convert to sorted bam file. This is a bwa command, then output is piped to "samtools view", them this output is piped to "samtools sort"
bwa mem -M -v 3 -t $THREADS /scratch/fae75933/genomesfolder/GCA_000182925_neurospora.fna $f | samtools view -bhu - | samtools sort -@ $THREADS -T $file -o "$sorted.bam" -O bam -
samtools index "$sorted.bam"
#create bw
bamCoverage -p $THREADS -bs 1 --smoothLength 25 -of bigwig -b "${sorted}.bam" -o "$bigwig"
#######For CutandRun data, you need to analyze the bam using the --MNase option. This
#bamCoverage -p 12 --MNase -bs 1 --smoothLength 25 -of bigwig -b "$sorted.bam" -o "$bigwig"
done
| true
|
d01e7f0a4844a37eb1e0ac4afad13f83476221df
|
Shell
|
hivesolutions/scudum
|
/scripts/build/tools/python3.sh
|
UTF-8
| 324
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
VERSION=${VERSION-3.11.1}
set -e +h
wget --no-check-certificate --content-disposition "https://www.python.org/ftp/python/$VERSION/Python-$VERSION.tgz"
rm -rf Python-$VERSION && tar -zxf "Python-$VERSION.tgz"
rm -f "Python-$VERSION.tgz"
cd Python-$VERSION
./configure --prefix=$PREFIX --enable-shared
make && make install
| true
|
69dec6bd84f2af96bb162d80020319ed22bb95c5
|
Shell
|
daveschaefer/psv-admin
|
/run_backup.sh
|
UTF-8
| 272
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
cd Perspectives-Server
python notary_util/db2file.py ../notary_backup/notary_dump.txt
cd ../notary_backup
rm notary_dump.txt.bz2
bzip2 notary_dump.txt
git add notary_dump.txt.bz2
DATE=`date`
git commit -a -m "binary backup at: $DATE"
git push origin master
| true
|
37d65bac24661d5494bf98963fc36fe089711eae
|
Shell
|
mono/msbuild
|
/mono/build/gen_msbuild_wrapper.sh
|
UTF-8
| 363
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ $# -ne 2 ]; then
echo "Usage: $0 <mono_prefix_dir> <out_dir>"
exit 1
fi
REPO_ROOT="$PWD/../../"
sed -e 's,@bindir@,'$1'/bin,' -e 's,@mono_instdir@,'$1/lib/mono',' $REPO_ROOT/msbuild-mono-deploy.in > msbuild-mono-deploy.tmp
chmod +x msbuild-mono-deploy.tmp
mkdir -p $2
cp msbuild-mono-deploy.tmp $2/msbuild
rm -f msbuild-mono-deploy.tmp
| true
|
e63a62c8f9bb53ad227099c74a0a596af1152f0f
|
Shell
|
vayizmir/gargoyle-plugins
|
/plugin-gargoyle-logs/files/www/utility/logs_download.sh
|
UTF-8
| 1,546
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/haserl
<?
# Copyright (c) 2013 Saski
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
eval $( gargoyle_session_validator -c "$COOKIE_hash" -e "$COOKIE_exp" -a "$HTTP_USER_AGENT" -i "$REMOTE_ADDR" -r "login.sh" -t $(uci get gargoyle.global.session_timeout) -b "$COOKIE_browser_time" )
type=$(uci get system.@system[0].log_type -q)
if [ "$type" = "file" ]; then
path=$(uci get system.@system[0].log_file)$GET_plik
else
logread > /tmp/logi.txt;
path="/tmp/logi.txt";
fi
if [ ! -e "$path"] ; then
echo "Content-type: text/plain"
echo ""
echo "Blad: Plik nie istnieje."
else
cd "$path" >/dev/null 2>&1
echo "Content-type: application/octet-stream"
echo "Content-Disposition: attachment; filename=$path"
echo ""
cat $path
if [ "$type" != "file" ]; then
rm /tmp/logi.txt >/dev/null 2>&1
fi
fi
?>
| true
|
8ccbf69f29dc7d45ea44f7f24835dfec599aae82
|
Shell
|
medined/accumulo_stackscript
|
/install_to_home_directory/install-packages.sh
|
UTF-8
| 748
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
source ./setup.sh
# setup a source for maven3 which is required by Accumulo.
echo "deb http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main" | sudo tee -a /etc/apt/sources.list
echo "deb-src http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main" | sudo tee -a /etc/apt/sources.list
sudo apt-get update
sudo apt-get -y install curl maven2 openssh-server openssh-client terminator
sudo apt-get -y install openjdk-6-jdk subversion screen g++ make meld build-essential g++-multilib
sudo apt-get -y --force-yes install maven3
# remove the symbolic link to maven2. You can still access it via /usr/share/maven2/bin/mvn
sudo rm /usr/bin/mvn
sudo ln -s /usr/share/maven3/bin/mvn /usr/bin/mvn
echo "Installed packages"
| true
|
385a2ccdd12a0ac84118632222beb35b375a638b
|
Shell
|
kaarelvent/skriptimine
|
/praks8/yl4
|
UTF-8
| 600
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#Küsib arvu
echo -n "Sisesta suvaline täisarv: "
read sisend
jagaja=2
#Kontrollib et kas arv on 0 või 1
if [ $sisend -eq 0 -o $sisend -eq 1 ]
then
echo "Sisestatud arv ei ole algarv"
else #Jagab 2ga nii kaua kuni saab, lõppeb kui jääk on 0
jaak=$(($sisend % $jagaja))
while [ $jaak -ne 0 ]
do
#Jagaja ühe võrra suurendamine ja selle jagamine
jagaja=$((jagaja +1))
jaak=$(($sisend % $jagaja))
done
#Kontrollib, et kas ring on lõppenud ja jagaja on võrdne arvuga
if [ $sisend -eq $jagaja ]
then
echo "$sisend on algarv"
else
echo "$sisend ei ole algarv"
fi
fi
| true
|
85cb1f4e98e224dadb3cc18e73f2199e332a123a
|
Shell
|
jseun/ddcos
|
/scripts/run/0225all-rootfs-squash
|
UTF-8
| 163
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
source ./functions
log_begin_msg
for rootfs in ${SQUASHFSDIR}/*/; do
mksquashfs $rootfs ${rootfs%%/}.squashfs -comp xz -noappend
done
log_end_msg
| true
|
13067409494118acc20c5172e44b2a1c4ae2665b
|
Shell
|
nichtich/minecraft-pi
|
/mcpi-message
|
UTF-8
| 360
| 3
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -e
# exit code of this script can be used to check of minecraft is running
if [ $# -gt 0 ]; then
python <<PYTHON 2>/dev/null
from mcpi.minecraft import Minecraft
Minecraft.create().postToChat("$1")
PYTHON
else
# just connect without message
python <<PYTHON 2>/dev/null
from mcpi.minecraft import Minecraft
Minecraft.create()
PYTHON
fi
| true
|
13647b0f3900527132e65f05d32b8706932a456a
|
Shell
|
hargup/dotfiles
|
/zshrc
|
UTF-8
| 3,732
| 2.890625
| 3
|
[] |
no_license
|
# Hooks
typeset -ga chpwd_functions
typeset -ga precmd_functions
typeset -ga preexec_functions
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
ulimit -Sv 5000000
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
# ZSH_THEME="robbyrussell"
# ZSH_THEME="random"
ZSH_THEME="juanghurtado"
alias rm="rm -i"
alias youtube-dl="youtube-dl --restrict-filename --add-metadata --xattrs --ignore-errors"
alias youtube-dl-ea="youtube-dl --extract-audio"
alias xclip="xclip -selection clipboard"
alias asearch="sudo apt-cache search"
alias ainstall="sudo apt-get install search"
alias halt="sudo shutdown -h"
alias reboot="sudo shutdown -r"
alias open="xdg-open"
alias du='du -ch --apparent-size'
alias pdftotext="pdftotext -layout"
alias sudo="sudo -E" # necessary to pass proxy enviornment variable to sudo
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to disable command auto-correction.
# DISABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git)
source $ZSH/oh-my-zsh.sh
# User configuration
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
rc=$HOME/.zsh
if [ -f $rc/*([1]) ]; then
for script in $rc/*; do
source $script
done
fi
function cd {
builtin cd "$@" && ls -F
}
fpath=($HOME/.zsh/functions $fpath)
autoload -U $HOME/.zsh/functions/*(:t)
# ssh
# export SSH_KEY_PATH="~/.ssh/dsa_id"
export VMAIL_VIM=gvim
# added by Anaconda 2.0.1 installer
export PATH="/home/hargup/anaconda/bin:$PATH"
# export PATH="/home/hargup/install/bin:$PATH"
# source activate py3.4
# xrandr --output HDMI1 --mode 1920x1080 --left-of LVDS1
# xrandr --output LVDS1 --mode 1366x768 --right-of HDMI1
# source activate py3.4
unsetproxy
eval $(thefuck --alias)
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
export PATH="$PATH:$HOME/.rvm/bin"
source ~/.rvm/scripts/rvm
| true
|
59c4f3743b7d827fbca884b401e0c971d77f7811
|
Shell
|
shaunwbell/FOCI_Analysis
|
/WorldViewRetrieval/worldview_modis_wget.sh
|
UTF-8
| 458
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
progdir="/Volumes/WDC_internal/Users/bell/Programs/Python/FOCI_Analysis/WorldViewRetrieval/"
for i in `seq 174 194`;
do
python ${progdir}EasternEqPacific.py ${i} VIIRS jpeg large
done
img_dir="/Volumes/WDC_internal/Users/bell/Programs/Python/FOCI_Analysis/WorldViewRetrieval/*.jpeg"
for files in $img_dir
do
convert ${files} -fill white -undercolor '#00000080' -pointsize 50 -gravity NorthEast -annotate +10+10 %t ${files}.jpg
done
| true
|
61145b4897e13fd20678c789713c47efaa462543
|
Shell
|
Lohkrii/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/102-lets_parse_apache_logs
|
UTF-8
| 146
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This script parses an apache server logs.
FILE="apache-access.log"
if [ -e $FILE ]; then
awk '{ print $1 " " $9}' $FILE
fi
| true
|
1ae052c8de419234c3102736b2883db45bce687c
|
Shell
|
cunguyen-agilityio/oh-my-ops
|
/test/plugin/git/util-remote-test.sh
|
UTF-8
| 2,193
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
_do_plugin 'git'
_do_plugin 'gitlab'
fake_repo_dir=""
function test_setup() {
# Makes an empty git repository
{
fake_repo_dir=$(_do_dir_random_tmp_dir) &&
_do_git_util_init "${fake_repo_dir}" &&
cd "$fake_repo_dir"
} || _do_assert_fail
_do_repo_dir_add "${fake_repo_dir}" "fakerepo"
_do_gitlab 'fakerepo'
do-fakerepo-gitlab-stop &> /dev/null
}
function test_teardown() {
# Removes the temp repository
do-fakerepo-gitlab-stop &> /dev/null
[ -z "$fake_repo_dir" ] || rm -rfd "$fake_repo_dir"
}
function test_git_util_commit() {
echo "Hello" > 'README.md'
_do_git_util_commit "${fake_repo_dir}" 'initial commit' || _do_assert_fail
! _do_git_util_is_dirty "${fake_repo_dir}" || _do_assert_fail
! _do_git_util_remote_exists "${fake_repo_dir}" "gitlab" || _do_assert_fail
local url
url=$(_do_gitlab_util_root_user_git_repo_url 'fakerepo' 'fakerepo') || _do_assert_fail
# Creates gitlab remote
_do_git_util_create_remote "${fake_repo_dir}" "gitlab" "${url}" || _do_assert_fail
_do_git_util_remote_exists "${fake_repo_dir}" "gitlab" || _do_assert_fail
# Makes sure the url is stored correctly
_do_assert_eq "${url}" "$(_do_git_util_get_remote_url "${fake_repo_dir}" "gitlab")"
# Creates again, should be ignored
_do_git_util_create_remote_if_missing "${fake_repo_dir}" "gitlab" "${url}" || _do_assert_fail
# Creates another, should be executed
_do_git_util_create_remote_if_missing "${fake_repo_dir}" "another" "${url}" || _do_assert_fail
_do_git_util_remote_exists "${fake_repo_dir}" "another" || _do_assert_fail
_do_git_assert_remote_list_size "${fake_repo_dir}" "2"
_do_git_util_remove_remote "${fake_repo_dir}" "another"
! _do_git_util_remote_exists "${fake_repo_dir}" "another" || _do_assert_fail
_do_git_assert_remote_list_size "${fake_repo_dir}" "1"
# Runs gitlab server
do-fakerepo-gitlab-start || _do_assert_fail
# Create a gitlab project named fake repo
_do_gitlab_util_create_project 'fakerepo' 'fakerepo' || _do_assert_fail
# Push code
_do_git_util_push "${fake_repo_dir}" 'gitlab' || _do_assert_fail
# Pull code
_do_git_util_pull "${fake_repo_dir}" 'gitlab' || _do_assert_fail
}
| true
|
23a00100e33eeb29fdab310d640cd8cbfb05dba7
|
Shell
|
Anirudh-C/dotfiles
|
/setup.sh
|
UTF-8
| 996
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
setosconfig() {
rm ~/.xinitrc
cp OS/xinitrc ~/.xinitrc
rm ~/.Xresources
cp OS/Xresources ~/.Xresources
rm ~/.xmodmap
cp OS/Keyboard/xmodmap ~/.xmodmap
}
settmuxconfig() {
rm ~/.tmux.conf
cp Work/tmux.conf ~/.tmux.conf
}
setshellconfig() {
}
setconfigdir() {
rm -rf ~/.config
cp -r Config/ ~/.config
}
loading() {
mypid=$!
loadingText=$1
echo -ne "$loadingText\r"
while kill -0 $mypid 2>/dev/null; do
echo -ne "$mypid :: $loadingText.\r"
sleep 0.5
echo -ne "$mypid :: $loadingText..\r"
sleep 0.5
echo -ne "$mypid :: $loadingText...\r"
sleep 0.5
echo -ne "\r\033[K"
echo -ne "$mypid :: $loadingText\r"
sleep 0.5
done
echo "$loadingText...Finished"
}
setosconfig & loading "Getting OS config files"
settmuxconfig & loading "Getting tmux config"
setshellconfig & loading "Getting shell config"
setconfigdir & loading "Getting .config directory"
| true
|
38b21b61bee5f72d40cf24f5693b045bb348ac54
|
Shell
|
socc19-p10/vSMT-IO
|
/scripts/update_submodule.sh
|
UTF-8
| 469
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
APP_DIR="$HOME/workshop/vPair/apps"
PARSEC="$APP_DIR/parsec"
PHOENIX="$APP_DIR/phoenix"
IMAGIC="$APP_DIR/ImageMagick6"
pushd $PARSEC
git add -A
git commit -m "update parsec app"
git push
popd
pushd $PHOENIX
git add -A
git commit -m "update phoenix app"
git push
popd
pushd $IMAGIC
git add -A
git commit -m "update imagemagic app"
git push
popd
git pull
git submodule foreach git pull origin master
git add -A
git commit -m "update submodule"
git push
| true
|
0f22af826a42319943a073ba8d8fcac4539e8673
|
Shell
|
play-linux/shell-play
|
/shell-demo/demo5.sh
|
UTF-8
| 639
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#------数组和map------
#1.数组定义
arr=(10 20 30 40 50)
echo "${arr[1]}" "${arr[4]}"
#2.遍历数组
for i in "${arr[@]}"; do
printf "%d," "$i"
done
echo
for ((i = 0; i < ${#arr[@]}; i++)); do
printf "%d->%d\n" "$i" "${arr[i]}"
done
#3.map 需要先使用declare -A声明变量为关联数组(支持索引下标为字符串) 普通数组则不需要声明即可使用
declare -A m
m["name"]="lyer"
m["age"]="18"
echo ${m["name"]} ${m["age"]}
#4. 遍历map
#遍历所有的key
for key in "${!m[@]}"; do
echo $key ${m[${key}]}
done
#遍历所有的val
for val in "${m[@]}";do
echo "$val"
done
| true
|
f405fc4d191efc535a238851ba1b08b1ac1aa53f
|
Shell
|
rctraining/rmacc_2018_container_tutorial
|
/run_container_w_udocker/make_udocker.sh
|
UTF-8
| 796
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#set base dir where you want the software installed
BASE_DIR=/projects/$USER
#now remove any previous instances of .udocker from these directories
#(don't do this unless you want to get rid of everything previously installed for udocker)
rm -Rf $BASE_DIR/.udocker
rm -Rf $HOME/.udocker
echo "done"
#Now download and install udocker
curl https://raw.githubusercontent.com/indigo-dc/udocker/master/udocker.py > udocker
chmod u+rx ./udocker
./udocker install
#move the .udocker directory from $HOME to $BASE_DIR
mv $HOME/.udocker $BASE_DIR/
#move the udocker executable from $PWD to the $UDOCKER_DIR/bin and add to PATH
#mv $PWD/udocker $BASE_DIR/.udocker/bin/
#Now create a symbolic link in your home directory to the ./udocker directory
cd $HOME
ln -sf $BASE_DIR/.udocker .
| true
|
a6d4d18d295b8d39cd36407895e84981720172fe
|
Shell
|
grml/grml-live
|
/etc/grml/fai/config/scripts/GRMLBASE/91-update-pciids
|
UTF-8
| 1,540
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Filename: ${GRML_FAI_CONFIG}/config/scripts/GRMLBASE/91-update-pciids
# Purpose: update pciids
# Authors: (c) Michael Prokop <mika@grml.org>
# Bug-Reports: see http://grml.org/bugs/
# License: This file is licensed under the GPL v2.
################################################################################
if ifclass NO_ONLINE ; then
echo "Ignoring script 91-update-pciids as NO_ONLINE is set."
exit 0
fi
set -u
bailout() {
if [ "${1:-}" = "4" ] ; then
echo "Warning: update-pciids returned with exit code 4." >&2
# be verbose in logs
echo "Warning: update-pciids returned with exit code 4."
echo "-> This indicates that networking inside the chroot did not work"
echo " while GRMLBASE/91-update-pciids was running."
echo " To address this issue you can either configure /etc/resolv.conf"
echo " accordingly or just run dnsmasq on your host."
exit 0
fi
exit "${1:-0}"
}
[ -x $target/usr/bin/timeout ] && TIMEOUT="10" || TIMEOUT=""
if ! [ -x "${target}/usr/sbin/update-pciids" ] && ! [ -x "${target}/usr/bin/update-pciids" ] ; then
echo "Warning: update-pciids not installed (neither /usr/sbin/update-pciids nor /usr/bin/update-pciids exists"
exit 0
fi
echo "Updating PCI-IDs"
if [ -n "$TIMEOUT" ] ; then
$ROOTCMD timeout $TIMEOUT update-pciids
bailout $?
else
$ROOTCMD update-pciids
bailout $?
fi
## END OF FILE #################################################################
# vim:ft=sh expandtab ai tw=80 tabstop=4 shiftwidth=2
| true
|
5fa7d66a01512ca08c6d8895da3111b9dafdeb9d
|
Shell
|
TheusZer0/ctf-archives
|
/Balsn/2020/House_of_Cats/share/run.sh
|
UTF-8
| 1,816
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
pow(){
difficulty=$1
if [ $difficulty -gt 60 ]; then
echo 'too hard'
exit
fi
chalprefix=$(hexdump -n 8 -e '2/4 "%08x" 1 "\n"' /dev/urandom)
echo "sha256($chalprefix+???) == $(printf '0%.0s' $(seq 0 $difficulty))($difficulty)..."
printf "> "
read -t 600 answer
res=$(printf "$chalprefix$answer"|sha256sum|awk '{print $1}'|cut -c1-15|tr [a-f] [A-F])
rshift=$((60-$difficulty))
res=$(echo "obase=10; ibase=16; $res" | bc)
if [ $(($res>>$rshift)) -ne 0 ]; then
echo 'POW failed'
exit
else
echo 'POW passed'
fi
}
prepare_env(){
WORKDIR=`mktemp -d -p "/tmp/"`
mkdir -p $WORKDIR/lib/x86_64-linux-gnu/
mkdir -p $WORKDIR/lib64/
mkdir -p $WORKDIR/usr/bin
mkdir -p $WORKDIR/home/HouseofCats/writable/
cp /lib/x86_64-linux-gnu/libc-2.31.so $WORKDIR/lib/x86_64-linux-gnu/libc.so.6
cp /lib/x86_64-linux-gnu/ld-2.31.so $WORKDIR/lib64/ld-linux-x86-64.so.2
cp /lib/x86_64-linux-gnu/libpthread-2.31.so $WORKDIR/lib/x86_64-linux-gnu/libpthread.so.0
cp /lib/x86_64-linux-gnu/librt-2.31.so $WORKDIR/lib/x86_64-linux-gnu/librt.so.1
cp /usr/bin/timeout $WORKDIR/usr/bin/timeout
cp /home/HouseofCats/house_of_cats $WORKDIR/home/HouseofCats/house_of_cats
chmod -R 555 $WORKDIR
chmod 777 $WORKDIR/home/HouseofCats/writable/
}
exec 2>/dev/null
pow 22
prepare_env
chroot --userspec=HouseofCats:HouseofCats $WORKDIR timeout 900 /home/HouseofCats/house_of_cats
RES=`head -c 10 $WORKDIR/home/HouseofCats/writable/check`
if [ $RES = "asimplechk" ];
then
echo "Check passed"
cp /home/HouseofCats/flag $WORKDIR/home/HouseofCats/flag
chroot --userspec=HouseofCats:HouseofCats $WORKDIR timeout 900 /home/HouseofCats/house_of_cats
else
echo "Check failed"
fi
rm -fr $WORKDIR
| true
|
84768c31f188167e09ba4d47292092bf190432ed
|
Shell
|
termux/termux-packages
|
/packages/gitoxide/build.sh
|
UTF-8
| 1,778
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://github.com/Byron/gitoxide
TERMUX_PKG_DESCRIPTION="Rust implementation of Git"
TERMUX_PKG_LICENSE="Apache-2.0, MIT"
TERMUX_PKG_LICENSE_FILE="LICENSE-APACHE, LICENSE-MIT"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=0.1.0
TERMUX_PKG_REVISION=1
_RELEASE_PREFIX="git-hashtable"
TERMUX_PKG_SRCURL=https://github.com/Byron/gitoxide/archive/refs/tags/${_RELEASE_PREFIX}-v${TERMUX_PKG_VERSION}.tar.gz
TERMUX_PKG_SHA256=33f761b9e6bb268a2ad725bf88e85808e4a9c7e06cface2fd637ac14dc2382fc
TERMUX_PKG_AUTO_UPDATE=true
TERMUX_PKG_UPDATE_VERSION_REGEXP="\d+\.\d+\.\d+"
TERMUX_PKG_DEPENDS="resolv-conf"
TERMUX_PKG_BUILD_IN_SRC=true
termux_pkg_auto_update() {
# Get latest release tag:
local tag
tag="$(termux_github_api_get_tag "${TERMUX_PKG_SRCURL}")"
# check if this is not a ${_RELEASE_PREFIX} release:
if grep -qP "^${_RELEASE_PREFIX}-v${TERMUX_PKG_UPDATE_VERSION_REGEXP}\$" <<<"$tag"; then
termux_pkg_upgrade_version "$tag"
else
echo "WARNING: Skipping auto-update: Not a ${_RELEASE_PREFIX} release($tag)"
fi
}
termux_step_pre_configure() {
termux_setup_cmake
termux_setup_rust
: "${CARGO_HOME:=$HOME/.cargo}"
export CARGO_HOME
cargo fetch --target "${CARGO_TARGET_NAME}"
for d in $CARGO_HOME/registry/src/*/trust-dns-resolver-*; do
sed -e "s|@TERMUX_PREFIX@|$TERMUX_PREFIX|" \
$TERMUX_PKG_BUILDER_DIR/trust-dns-resolver.diff \
| patch --silent -p1 -d ${d} || :
done
}
termux_step_make() {
cargo build \
--jobs $TERMUX_MAKE_PROCESSES \
--target $CARGO_TARGET_NAME \
--release \
--no-default-features \
--features max-pure
}
termux_step_make_install() {
install -Dm755 -t $TERMUX_PREFIX/bin target/${CARGO_TARGET_NAME}/release/gix
install -Dm755 -t $TERMUX_PREFIX/bin target/${CARGO_TARGET_NAME}/release/ein
}
| true
|
ba20f48059a24d6df5d47a5a807481b41f2e415c
|
Shell
|
free2rhyme/lct_rpm_tool
|
/rpm/rpm_build.sh
|
UTF-8
| 4,524
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
set -u
#set -x
CUR_DIR=$( cd "$( dirname "$0" )" && pwd )
LCT_SVC_PRJ_ROOT=$CUR_DIR/../../..
declare -a so_array=""
declare -a so_dir_array=""
declare -a process_array=""
declare rpm_service=""
RPM_PROCESS_DIR=${LCT_SVC_PRJ_ROOT}/src/lct_rpm_tool/process
usage()
{
echo "usage: $(basename $0) [-release | -debug ] [-pre] [-publish] [targets]"
exit 1
}
fatal()
{
echo "[Error] $1"
exit 1
}
retrieve_svc_name()
{
if [ -d $RPM_PROCESS_DIR ]; then
for file in `ls $RPM_PROCESS_DIR`
do
file_path=$RPM_PROCESS_DIR/$file
if [ -f $file_path ]; then
if [ ${file##*.}x = "name"x ]; then
for i in `cat $file_path`
do
if [ -n "$i" ]; then
rpm_service=$i
break;
fi
done
fi
fi
done
fi
}
export_ctx()
{
## source environment variables and library functions
. ${LCT_SVC_PRJ_ROOT}/src/lct_rpm_tool/rpm/rpm_build.env
. ${LCT_SVC_PRJ_ROOT}/src/lct_rpm_tool/rpm/rpm_build_lib.sh
. ${LCT_SVC_PRJ_ROOT}/src/lct_rpm_tool/rpm/rpm_build_bin.sh
. ${LCT_SVC_PRJ_ROOT}/src/lct_rpm_tool/rpm/rpm_build_config.sh
. ${LCT_SVC_PRJ_ROOT}/src/lct_rpm_tool/rpm/rpm_build_script.sh
}
root_disallowed()
{
if [ $(whoami) = "root" ]
then
fatal "This script ($0) should not be run by root"
fi
}
retrieve_svc()
{
if [ -d $RPM_PROCESS_DIR ]; then
for file in `ls $RPM_PROCESS_DIR*`
do
file_path=$RPM_PROCESS_DIR/$file
if [ -f $file_path ]; then
if [ ${file##*.}x = "type"x ]; then
process_array="$process_array `cat $file_path`"
fi
fi
done
fi
}
check_dir()
{
if [ -d "$PREBUILD_DIR" ]; then
rm -fr "$PREBUILD_DIR"
fi
if [ ! -d "$PREBUILD_LIB_DIR" ]; then
mkdir -p "$PREBUILD_LIB_DIR"
fi
if [ ! -d "$PREBUILD_BIN_DIR" ]; then
mkdir -p "$PREBUILD_BIN_DIR"
fi
if [ ! -d "$PREBUILD_CONFIG_DIR" ]; then
mkdir -p "$PREBUILD_CONFIG_DIR"
fi
if [ ! -d "$PREBUILD_DATA_DIR" ]; then
mkdir -p "$PREBUILD_DATA_DIR"
fi
if [ ! -d "$PREBUILD_DOC_DIR" ]; then
mkdir -p "$PREBUILD_DOC_DIR"
fi
if [ ! -d "$PREBUILD_SCRIPT_DIR" ]; then
mkdir -p "$PREBUILD_SCRIPT_DIR"
fi
if [ ! -d "$PREBUILD_LOG_DIR" ]; then
mkdir -p "$PREBUILD_LOG_DIR"
fi
}
handle_mk()
{
local mak_file=${1}
local rpm_type_name=$2[@]
local rpm_type=("${!rpm_type_name}")
for rt in ${rpm_type[@]}
do
if grep -wq TARGET "${mak_file}" && grep -wq $rt "${mak_file}" ; then
readarray -t so_lines < <(grep -w SYS_LIB "${mak_file}" | grep -v '#')
handle_so_define so_lines[@]
#echo $so_array
readarray -t so_dir_lines < <(grep -w SYS_LIB_DIR "${mak_file}" | grep -v '#')
handle_so_dir_define so_dir_lines[@]
# echo $so_dir_array
if [ 0 -ne ${#so_dir_array[@]} ]; then
prepare_lib so_dir_array[@] so_array[@]
prepare_bin $rt
fi
fi
done
}
handle_mk_list()
{
for mf in ${MAKEFILES_LIST}
do
if grep -wq TARGET_TYPE "$mf" && grep -wq app "$mf" ; then
handle_mk ${mf} process_array[@]
fi
done
}
version=""
prepare_version()
{
declare -a versions=($(grep constexpr ${LCT_VERSON_FILE} | sed 's/ \{2,80\}/ /g' | awk -F '[ ;]' '{print $6}'))
version_size=${#versions[@]}
if [ $version_size -eq 4 ]; then
version=${versions[0]}.${versions[1]}.${versions[2]}.${versions[3]}
else
fatal "version file is invalid"
fi
}
prepare_tar()
{
local tar_name=${RPM_TAR_NAME_PRE}_${version}.tar.gz
echo "tarfile: $(pwd)/${tar_name}"
cd ${LCT_SVC_PRJ_ROOT}/build/rpm
if [ -f ${tar_name} ]; then
rm -f ${tar_name}
fi
tar -czpf ${LCT_SVC_PRJ_ROOT}/build/rpm/${tar_name} `(basename ${PREBUILD_DIR})`
rm -fr ${PREBUILD_DIR}
echo "rpm ${tar_name} is successful built"
}
retrieve_svc_name
export_ctx
root_disallowed
retrieve_svc
check_dir
prepare_version
handle_mk_list
prepare_script
prepare_config
prepare_tar
| true
|
3ab0db0b93e2f64fc1602c5325e30f49155a6682
|
Shell
|
MoisesTapia/InfoSystem
|
/InfoSystem.sh
|
UTF-8
| 2,733
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#fecha: 08.092019
#AUTOR: EQUINOKX
#Proposito: Informacion del sistema con las herramientas mas usadas
NOMBRE="Dart - Security"
clear
echo " "
echo " Herramientas de Informacion del sistema "
echo " $NOMBRE "
echo " "
echo " by:Equinockx hc-security.com.mx "
echo " "
echo " "
echo " ----------------------------------------------"
echo " | 1- uname | 11- uptime(en Desarrollo) |"
echo " | 2- fdisk | 12- w (en Desarrollo) |"
echo " | 3- blkid | 13- lsof (en Desarrollo) |"
echo " | 4- free | 14- last (en Desarrollo) |"
echo " | 5- mount | 15- lastb (en Desarrollo) |"
echo " | 6- lsmod | 16- dmesg (en Desarrollo) |"
echo " | 7- lspci | 17- ps (en Desarrollo) |"
echo " | 8- lsusb | 18- Salir |"
echo " | 9- top | |"
echo " | 10- df | |"
echo " ----------------------------------------------"
echo " El Script se seguira actualizando "
read -p "> " opc
if [ $opc -eq 1 ];then
clear
echo "uname"
chmod +x uname.sh
./uname.sh
elif [ $opc -eq 2 ]; then
clear
echo "fdisk"
chmod +x fdisk.sh
./fdisk.sh
elif [ $opc -eq 3 ]; then
clear
echo "blkid"
sleep 3
chmod +x blkid.sh
./blkid.sh
elif [ $opc -eq 4 ]; then
clear
echo "free"
chmod +x freecomand.sh
./freecomand.sh
elif [ $opc -eq 5 ]; then
clear
echo "mount"
chmod +x mountcomand.sh
./mountcomand.sh
elif [ $opc -eq 6 ]; then
clear
echo "lsmod"
chmod +x lsmodcomand.sh
./lsmodcomand.sh
elif [ $opc -eq 7 ]; then
clear
echo "lspci"
sleep 3
chmod +x lspcicomand.sh
./lspcicomand.sh
elif [ $opc -eq 8 ]; then
clear
echo "lsusb"
sleep 3
chmod +x lsusbcomand.sh
./lsusbcomand.sh
elif [ $opc -eq 9 ]; then
clear
echo "top"
chmod +x topcomand.sh
./topcomand.sh
elif [ $opc -eq 10 ]; then
clear
echo "df"
chmod +x dfcomand.sh
./dfcomand.sh
elif [ $opc -eq 11 ]; then
clear
echo "uptime"
echo "Desarrollo"
elif [ $opc -eq 12 ]; then
clear
echo "w"
echo "Desarrollo"
elif [ $opc -eq 13 ]; then
clear
echo "lsof"
echo "Desarrollo"
elif [ $opc -eq 14 ]; then
clear
echo "last"
echo "Desarrollo"
elif [ $opc -eq 15 ]; then
clear
echo "lastb"
echo "Desarrollo"
elif [ $opc -eq 16 ]; then
clear
echo "dmesg"
echo "Desarrollo"
elif [ $opc -eq 17 ]; then
clear
echo "ps"
echo "Desarrollo"
else
clear
fi
| true
|
c73bd49b8ea42ec97217ca37f2f1a1fe5a137446
|
Shell
|
osen/openbsd_emscripten
|
/files/emcc
|
UTF-8
| 537
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# Find absolute path of Emscripten
PREFIX="$(cd "$(dirname "$(which "$0")")" && cd .. && pwd)"
EMDIR="$PREFIX/libexec/emscripten"
BASENAME="$(basename "$0")"
EMPROG="$EMDIR/$BASENAME"
# Emscripten now correctly finds python
#export PYTHON=python3.8
# Environmental variables for the site_emscripten script to use
export EMSCRIPTEN="$EMDIR"
export LLVM="$EMDIR/llvm/bin"
export BINARYEN="$EMDIR/binaryen"
export NODE=node
export JAVA=java
# Forward arguments to respective script in Emscripten directory
exec "$EMPROG" "$@"
| true
|
aecc5f88a8f76742a0c7865d81edab894ed305df
|
Shell
|
rgamici/proxy
|
/proxy.bash
|
UTF-8
| 1,540
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Functions to change proxy automatically to use browser and cloud storage
function ProxyOn() {
local PROXY="proxy.noc.titech.ac.jp" # proxy host
local PORT=3128 # proxy port
gsettings set org.gnome.system.proxy mode 'manual'
gsettings set org.gnome.system.proxy.http host $PROXY
gsettings set org.gnome.system.proxy.http port $PORT
gsettings set org.gnome.system.proxy.https host $PROXY
gsettings set org.gnome.system.proxy.https port $PORT
gsettings set org.gnome.system.proxy.ftp host $PROXY
gsettings set org.gnome.system.proxy.ftp port $PORT
gsettings set org.gnome.system.proxy.socks host $PROXY
gsettings set org.gnome.system.proxy.socks port $PORT
gsettings set org.gnome.system.proxy ignore-hosts '["localhost", "127.0.0.0/8", "wlanauth.noc.titech.ac.jp"]'
export http_proxy=$PROXY:$PORT
export https_proxy=$PROXY:$PORT
export ftp_proxy=$PROXY:$PORT
export socks_proxy=$PROXY:$PORT
export HTTP_PROXY=$PROXY:$PORT
export HTTPS_PROXY=$PROXY:$PORT
export FTP_PROXY=$PROXY:$PORT
export SOCKS_PROXY=$PROXY:$PORT
RestartCloud
}
function KillProxy() {
gsettings reset-recursively org.gnome.system.proxy
unset HTTP_PROXY
unset http_proxy
unset HTTPS_PROXY
unset https_proxy
unset FTP_proxy
unset ftp_proxy
unset SOCKS_proxy
unset socks_proxy
RestartCloud
}
function RestartCloud() {
# dropbox and megasync only change the proxy setting on startup
killall dropbox
dropbox start
killall megasync
megasync &
git config --global http.proxy $http.proxy # not sure if it works
}
| true
|
a7ecafa96b3507e97a2ed7e792b7a66e9ca1531d
|
Shell
|
kudumi/scripts
|
/nix/dd-progress
|
UTF-8
| 892
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# $1: if
# $2: of
if [[ $# -ne 2 ]]; then
cat <<EOF
Usage: ${0##/} if of
This is a wrapper for dd, that will display progress.
EOF
fi
fsize=`ls -s --block-size=1048576 $1 | cut -d' ' -f 1`
file="$$.dd"
dd if=$1 of=$2 bs=1K count=10000 > $file & # start the dd transfer
pid=`pgrep -l '^dd$' | head -n1 | cut -d' ' -f 1`
delay="2s"
ddprint="sudo kill -USR1 $pid"
function control_c() {
rm -f $file
}
trap control_c SIGINT
( while [ $pid ]; do sleep $delay && $ddprint; done ) &
while [ $pid ]; do
while [ ! -e $file ]; do
utimer -c 500ms > /dev/null
done # wait for output
out=`tail -n3 $file`
out=`echo $out | tail -n1 | cut -d' ' -f 3`
out=${out#"("}
ratio=$((100*out/fsize))
echo -ne "$out/$fzise\t$ratio%\r"
done
sleep $delay # wait for last output
sleep $delay
rm $file > /dev/null # destroy all evidence
| true
|
946d2984b149df9d0db344a3263e0def06ea337f
|
Shell
|
SysSynBio/parPE
|
/ThirdParty/installGoogleTest.sh
|
UTF-8
| 632
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Download and build googletest
set -euo pipefail
script_path="$(dirname "$0")"
script_path="$(cd "${script_path}" && pwd)"
cd "${script_path}"
if [[ ! -d "googletest" ]]; then
echo "googletest/ does not exist"
if [[ ! -f "googletest.zip" ]]; then
echo "Downloading googletest..."
wget "https://github.com/google/googletest/archive/master.zip" -O "googletest.zip"
fi
echo "Unpacking and building googletest..."
unzip "googletest"
mv "googletest-master" "googletest"
cd "googletest"
mkdir -p build
cd build
cmake ..
make -j 4
else
echo "googletest/ exists. nothing to do."
fi
| true
|
f1c4146daf19d9d2c5831929fcfe3cefbfa4dd61
|
Shell
|
mgijax/pgmgddbschema
|
/comments/DAG_Label_create.object
|
UTF-8
| 616
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
cd `dirname $0` && . ./Configuration
cat - <<EOSQL | ${PG_DBUTILS}/bin/doisql.csh $0
COMMENT ON TABLE mgd.DAG_Label IS 'A record in this table represents a "label" for a Node or an Edge. A "label" is an attribute of the Note or Edge
which may be specific to a given DAG. A "label" is not necessarily something which is printed.';
COMMENT ON COLUMN DAG_Label.creation_date IS 'date record was created';
COMMENT ON COLUMN DAG_Label.label IS 'the label';
COMMENT ON COLUMN DAG_Label._Label_key IS 'primary key';
COMMENT ON COLUMN DAG_Label.modification_date IS 'date record was last modified';
EOSQL
| true
|
707df1a9fcfbc826a71a6d8a8e3c3d161922fd99
|
Shell
|
PIPplware/es-pipplware
|
/etc/emulationstation/pipplware-menus/system/functions.inc
|
UTF-8
| 8,160
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
enable_joystick() {
local params=("$@")
if [[ "${#params[@]}" -eq 0 ]]; then
params=(kcub1 kcuf1 kcuu1 kcud1 0x0a 0x20)
fi
# get the first joystick device (if not already set)
[[ -c "$__joy2key_dev" ]] || __joy2key_dev="/dev/input/jsX"
# if no joystick device, or joy2key is already running exit
[[ -z "$__joy2key_dev" ]] || pgrep -f joy2key.py >/dev/null && return 1
# if joy2key.py is installed run it with cursor keys for axis, and enter + space for buttons 0 and 1
if "/home/pi/RetroPie-Setup/scriptmodules/supplementary/runcommand/joy2key.py" "$__joy2key_dev" "${params[@]}" & 2>/dev/null; then
__joy2key_pid=$!
return 0
fi
return 1
}
kill_joystick() {
if [[ -n $__joy2key_pid ]]; then
kill -INT $__joy2key_pid 2>/dev/null
sleep 1
fi
}
selectDrive() {
local usb_only=0
local text=$(sudo lsblk --output KNAME,LABEL,TYPE,MOUNTPOINT,TRAN -p -P | grep -v -E "MOUNTPOINT=\".*/boot\"")
if [ $1 ]; then
usb_only=1
declare -A usb_disks
local SAVEDIFS=$IFS
local IFS=$'\n'
for l in $text
do
[[ -z $(echo $l | grep -E "TRAN=\"usb\"") ]] && continue
local DEV=$(echo $l | sed -n 's/.*KNAME="\([^"]*\)".*/\1/p')
usb_disks+=(["$DEV"]="$DEV")
done
IFS=$SAVEDIFS
fi
local current_mp=$(stat --printf "%m" .)
local home_mp=$(stat --printf "%m" ~)
declare -A drives
local SAVEDIFS=$IFS
local IFS=$'\n'
for l in $text
do
# We are only interested in partitions
[[ -z $(echo $l | grep -E "TYPE=\"part\"") ]] && continue
local DEV=$(echo $l | sed -n 's/.*KNAME="\([^"]*\)".*/\1/p')
local LABEL=$(echo $l | sed -n 's/.*LABEL="\([^"]*\)".*/\1/p')
local MOUNTPOINT=$(echo $l | sed -n 's/.*MOUNTPOINT="\([^"]*\)".*/\1/p')
# Only if mountpoint exist
[[ -z $MOUNTPOINT ]] && continue
# If usb only check if this partition belongs to a usb device.
if [[ "$usb_only" == "1" ]]; then
[[ ${#usb_disks[@]} -eq 0 ]] && break
local continue_outer=0
for u in ${!usb_disks[@]}
do
if [[ -z $(echo $DEV | grep -E "$u") ]]; then
continue_outer=1
else
continue_outer=0
break
fi
done
[[ $continue_outer -eq 1 ]] && continue
fi
local VOL="$DEV"
if [ -n "$LABEL" ]; then
VOL+=" ($LABEL)"
fi
if [ $MOUNTPOINT == $current_mp ]; then
VOL+=" *"
fi
if [ $MOUNTPOINT == $home_mp ]; then
MOUNTPOINT=`echo ~`
fi
drives+=(["$MOUNTPOINT"]="$VOL")
done
IFS=$SAVEDIFS
local MENU_OPTIONS=()
local COUNT=0
for d in "${!drives[@]}"; do
COUNT=$[COUNT+1]
local STATE="OFF"
[ $COUNT == 1 ] && STATE="ON"
MENU_OPTIONS+=("${d}" "${drives[$d]}")
done
[ "$COUNT" -eq "0" ] && return 0
DRIVE=$(dialog --menu "Select drive:" 16 76 $COUNT "${MENU_OPTIONS[@]}" 3>&1 1>&2 2>&3)
echo $DRIVE
}
backup() {
dpkg-query -f '${binary:Package}=${Version} ' -W > ~/installed_packages.txt
sudo tar cpzvf "$1"/PI_BACKUP.tar.gz --exclude='PI_BACKUP.tar.gz' ~ /etc /opt/retropie/configs /boot/config.txt /boot/cmdline.txt
if [ $? -eq 0 ]; then
dialog --title "Backup Settings" --msgbox "BACKUP $1/PI_BACKUP.tar.gz COMPLETE!" 12 56
else
sudo rm "$1"/PI_BACKUP.tar.gz &>/dev/null
dialog --title "Backup Settings" --msgbox "INSUFFICIENT DISK SPACE!\n\nPlease free some space and try again." 12 56
fi
rm ~/installed_packages.txt &> /dev/null
}
restore() {
if [ ! -f "$1/PI_BACKUP.tar.gz" ]; then
dialog --title "Restore Settings" --msgbox "No BACKUP found!\n\nPlease create a backup first, or choose a device with a Backup." 12 56
else
sudo tar zxpvf "$1"/PI_BACKUP.tar.gz --wildcards --no-anchored -C / '*installed_packages.txt*' '*etc/apt/*'
if [ $? -eq 0 ]; then
if [ -f "$HOME/installed_packages.txt" ]; then
sudo apt-get update && sudo apt-get install -y $(cat ~/installed_packages.txt)
fi
sudo tar zxpvf "$1"/PI_BACKUP.tar.gz -C /
fi
if [ $? -eq 0 ]; then
rm "$HOME"/installed_packages.txt &> /dev/null
dialog --title "Restore Settings" --msgbox "RESTORE COMPLETE!" 12 56
else
dialog --title "Restore Settings" --msgbox "INSUFFICIENT DISK SPACE!\n\nPlease free some space and try again." 12 56
fi
fi
}
export_roms() {
mkdir -p "$1"/RetroPie/configs &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/gpsp/raspberrypi/gba_bios.bin "$1"/RetroPie/BIOS/gba_bios.bin &>/dev/null
mkdir -p "$1"/RetroPie/emulators/gpsp/raspberrypi &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/gpsp/raspberrypi/*.s* "$1"/RetroPie/emulators/gpsp/raspberrypi &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/gpsp/raspberrypi/*.cfg "$1"/RetroPie/emulators/gpsp/raspberrypi &>/dev/null
mkdir -p "$1"/RetroPie/emulators/pifba/saves &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/pifba/saves/* "$1"/RetroPie/emulators/pifba/saves &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/pifba/*.cfg "$1"/RetroPie/emulators/pifba/ &>/dev/null
mkdir -p "$1"/RetroPie/emulators/mame4all-pi/hi &>/dev/null
mkdir -p "$1"/RetroPie/emulators/mame4all-pi/sta &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/mame4all-pi/hi/* "$1"/RetroPie/emulators/mame4all-pi/hi &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/mame4all-pi/sta/* "$1"/RetroPie/emulators/mame4all-pi/sta &>/dev/null
cp -uv --preserve=timestamps /opt/retropie/emulators/mame4all-pi/mame.cfg "$1"/RetroPie/emulators/mame4all-pi/mame.cfg &>/dev/null
rsync -ruv --times /opt/retropie/configs/ "$1"/RetroPie/configs
rsync -ruv --times ~/RetroPie/ "$1"/RetroPie
if [ $? -eq 0 ]; then
dialog --title "Export ROMs to USB" --msgbox "ROMS EXPORTED TO $1 !\n\nTo load ROMs directly from this device, please go to Emulation Station Tools and select the apropriate option." 12 56 >/dev/tty
else
dialog --title "Export ROMs to USB" --msgbox "INSUFFICIENT DISK SPACE IN $1 !\n\nPlease free some space in $1 and try again." 12 56 >/dev/tty
fi
}
import_roms() {
sudo cp -uv --preserve=timestamps "$1"/RetroPie/roms/emulators/gpsp/raspberrypi/* ~/RetroPie/emulators/gpsp/raspberrypi &>/dev/null
sudo cp -uv --preserve=timestamps "$1"/RetroPie/BIOS/gba_bios.bin /opt/retropie/emulators/gpsp/raspberrypi/gba_bios.bin &>/dev/null
cp -uv --preserve=timestamps "$1"/RetroPie/BIOS/neogeo.zip ~/RetroPie/roms/mame/ &>/dev/null
cp -uv --preserve=timestamps "$1"/RetroPie/BIOS/neogeo.zip ~/RetroPie/roms/fba/ &>/dev/null
cp -uv --preserve=timestamps "$1"/RetroPie/BIOS/neogeo.zip ~/RetroPie/roms/neogeo/ &>/dev/null
sudo cp -uv --preserve=timestamps "$1"/RetroPie/emulators/pifba/* /opt/retropie/emulators/pifba &>/dev/null
sudo cp -uv --preserve=timestamps "$1"/RetroPie/emulators/pifba/saves/* /opt/retropie/emulators/pifba/saves &>/dev/null
sudo cp -uv --preserve=timestamps "$1"/RetroPie/emulators/mame4all-pi/* /opt/retropie/emulators/mame4all-pi/ &>/dev/null
sudo cp -uv --preserve=timestamps "$1"/RetroPie/emulators/mame4all-pi/hi/* /opt/retropie/emulators/mame4all-pi/hi &>/dev/null
sudo cp -uv --preserve=timestamps "$1"/RetroPie/emulators/mame4all-pi/sta/* /opt/retropie/emulators/mame4all-pi/sta &>/dev/null
rsync -ruv --times --exclude=emulators --exclude=configs --exclude=.emulationstation "$1"/RetroPie/ ~/RetroPie
rsync -ruv --times "$1"/RetroPie/configs/ /opt/retropie/configs
if [ $? -eq 0 ]; then
dialog --title "Import ROMs to SD Card" --msgbox "ROMS IMPORTED TO SD CARD!\n\nTo load ROMs from the SD Card (original behaviour), please go to Emulation Station Tools and select the apropriate option." 12 56 >/dev/tty
else
dialog --title "Import ROMs to SD Card" --msgbox "No ROMs found in the USB device $1/RetroPie/ !\n\nPlease run Export ROMs to USB first, copy your ROMs to the apropriate folders and try again." 12 56 >/dev/tty
fi
}
| true
|
d200787899bab24c7f7f6a2083b2d69aae5cf010
|
Shell
|
draschke/hana-python-securestore
|
/tools/bas_install_cfdefenv.sh
|
UTF-8
| 1,647
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
do_run=1 # Execute(evaluate) the commands
#do_run=0 # Don't evaluate any commands
#do_echo=1 # Echo the commands
do_echo=0 # Don't echo any commands
#
echo ""
#destdir="/home/user/projects/hana-python-securestore/tools"
#destdir="/home/user"
pluginver="1_0_0"
pluginmin="1.0.0"
rcfile="~/.bashrc"
#rcfile="bashrc"
if [ "$#" -ge 1 ]; then
pluginver=$1
if [ $pluginver = "1_0_0" ]; then
echo "Version 1_0_0 cool."
pluginmin="1.0.0"
else
if [ $pluginver = "1_0_1" ]; then
echo "Version 1_0_1 cool."
pluginmin="1.0.1"
else
echo "Version $pluginver is not supported, try again."
exit 1
fi
fi
fi
if [ "$#" -ge 2 ]; then
destdir=$2
else
destdir="/home/user"
fi
echo ""
echo "Installing CF DefaultEnv Plugin Version $pluginver."
echo ""
echo "Changing to "$destdir
cmd='cd '$destdir
if [ $do_echo -eq 1 ]; then echo $cmd; fi
if [ $do_run -eq 1 ]; then eval $cmd; fi
echo ""
echo "Downloading CF DefaultEnv Plugin "$pluginmin".linux64"
cmd='curl -LO https://github.com/saphanaacademy/DefaultEnv/releases/download/v'$pluginmin'/DefaultEnv.linux64'
if [ $do_echo -eq 1 ]; then echo $cmd; fi
if [ $do_run -eq 1 ]; then eval $cmd; fi
//cf install-plugin DefaultEnv.linux64
echo ""
echo "Installing CF DefaultEnv Plugin "$pluginmin".linux64"
cmd='cf install-plugin DefaultEnv.linux64 -f'
if [ $do_echo -eq 1 ]; then echo $cmd; fi
if [ $do_run -eq 1 ]; then eval $cmd; fi
echo ""
echo "Check CF Plugins Version"
cmd='cf plugins'
if [ $do_echo -eq 1 ]; then echo $cmd; fi
if [ $do_run -eq 1 ]; then eval $cmd; fi
echo ""
echo "Install of CF DefaultEnv Plugin "$pluginmin" finished."
| true
|
690de5222f645758febdc0f1f68c8c634a561040
|
Shell
|
dujincan/rsync_script
|
/rsync.sh
|
UTF-8
| 1,839
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# rsync daemon service
# 2018 11 07
# jason
# v1.0
status1=$(ps -ef | grep "rsync --daemon" | grep -v 'grep' | wc -l)
pidfile="/var/run/rsyncd.pid"
function rsyncstart() {
if [ $status1 -eq 0 ];then
rm -rf $pidfile
/usr/bin/rsync --daemon
status2=$(ps -ef | grep "rsync --daemon" | grep -v 'grep' | wc -l)
if [ $status2 -eq 1 ];then
echo "rsync service start.......OK"
fi
else
echo "rsync service is running !"
fi
}
function rsyncstop() {
if [ $status1 -eq 1 ];then
kill -9 $(cat $pidfile)
status2=$(ps -ef | egrep "rsync --daemon" | grep -v 'grep' | wc -l)
if [ $status2 -eq 0 ];then
echo "rsync service stop.......OK"
fi
else
echo "rsync service is not running !"
fi
}
function rsyncstatus() {
if [ $status1 -eq 1 ];then
echo "rsync service is running !"
else
echo "rsync service is not running !"
fi
}
function rsyncrestart() {
if [ $status1 -eq 0 ];then
echo "rsync service is not running..."
rsyncstart
else
rsyncstop
rsyncstart
fi
}
case $1 in
"start")
rsyncstart
;;
"stop")
rsyncstop
;;
"status")
rsyncstatus
;;
"restart")
rsyncrestart
;;
*)
echo
echo "Usage: $0 start|stop|restart|status"
echo
esac
| true
|
968cad84b139e1f005e04d699f380de0721675e3
|
Shell
|
rocknsm/rock-dashboards
|
/ecs-configuration/kibana/import-saved-items.sh
|
UTF-8
| 2,070
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
_URL=$1
KIBANA_URL=${_URL:=http://127.0.0.1:5601}
KIBANA_VERSION=$(curl -sI ${KIBANA_URL} | awk '/kbn-version/ { print $2 }')
updated=0
created=0
failed=0
echo "Please be patient as we import 200+ custom dashboards, visualizations, and searches..."
for item in config index-pattern search visualization dashboard url map canvas-workpad canvas-element timelion; do
cd ${item} 2>/dev/null || continue
for id in $(cat index.json | jq -r '.[]'); do
file="${id}.ndjson"
if curl -sI "${KIBANA_URL}/api/saved_objects/${item}/${id}" | grep -q '^HTTP.*404'; then
# object doesn't exist, create it
echo "Creating ${item} with id ${id}" > /dev/stderr
response=$(
curl -s -XPOST \
"${KIBANA_URL}/api/saved_objects/_import" \
-H "kbn-xsrf: true" \
--form file=@"${file}"
)
result=$(echo "${response}" | jq -r '.success')
if [[ "${result}" == "true" ]]; then
created=$((created+1))
else
failed=$((failed+1))
echo -e "Failed creating ${item} named ${file}: \n ${response}\n"
fi
else
# object already exists, apply update
echo "Overwriting ${item} named ${id}" > /dev/stderr
response=$(
curl -s -XPOST \
"${KIBANA_URL}/api/saved_objects/_import?overwrite=true" \
-H "kbn-xsrf: true" \
--form file=@"${file}"
)
result=$(echo "${response}" | jq -r '.success')
if [[ ${result} == "true" ]]; then
updated=$((updated+1))
else
failed=$((failed+1))
echo -e "Failed updating ${item} named ${file}: \n ${response}\n"
fi
fi
done
cd ..
done
# Set default index
defaultIndex=$(jq -r '.userValue' index-pattern/default.json)
echo "Setting defaultIndex to ${defaultIndex}" > /dev/stderr
curl -s -XPOST -H"kbn-xsrf: true" -H"Content-Type: application/json" \
"${KIBANA_URL}/api/kibana/settings/defaultIndex" -d"{\"value\": \"${defaultIndex}\"}" >/dev/null
echo "Created: ${created}"
echo "Updated: ${updated}"
echo "Failed: ${failed}"
| true
|
c477262912986a196d1ad025275e2900fb9f2ef3
|
Shell
|
jmas/fetch-to-request
|
/make
|
UTF-8
| 339
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]; then
echo './make <default|minify|test>'
exit
fi
if [ $1 == 'default' ]; then
npm run dist:default
elif [ $1 == 'minify' ]; then
npm run dist:minify
elif [ $1 == 'test' ]; then
npm run test
npm run dist:default
npm run dist:minify
else
echo './make <default|minify|test>'
exit
fi
| true
|
ee108b1ecfbb91bee97bd8aa07669ace915af3d5
|
Shell
|
szigyi/expose-to-the-light-web
|
/ettl-web
|
UTF-8
| 186
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# -help
# ettl-web INFO
artif="expose-to-the-light-web_2.13-0.1.33.jar"
export LOG_LEVEL=$1
shift
echo "Starting ettl-web application..."
java -jar "$artif" "$@"
| true
|
ed6b51a4573e4f1426cc9f4b738caf760c082f47
|
Shell
|
DavidBorstner/FaksNalogePreverjanja
|
/1. letnik 17_18/Operacijski sistemi/Preverjanja/Naloga01/nadgradnja2.sh
|
UTF-8
| 276
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
name="Rudolf Eror"
geslo=""
if [ $# -eq 0 ]; then
echo "Napaka!"
exit 42
else
geslo=$1
sudo useradd beton -c "Rudolf Eror" -p $(mkpasswd -m sha-512 $1)
echo "Uporabnik ustvarjen!"
fi
for i in $(seq 0 ${#geslo}); do
echo ${geslo:$i:1}
done
echo "Debeli zajec."
| true
|
cf45f6955cd336a2c6775d6ccd8b0ba4ebe6f832
|
Shell
|
leftiness/ton-hub
|
/bin/clean-fast.sh
|
UTF-8
| 223
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
IGNORE[0]='vendor*'
IGNORE[1]='font*'
IGNORE[2]='picnic*'
IGNORE[3]='dialog*'
CONCAT=''
for each in "${IGNORE[@]}"
do
CONCAT="$CONCAT ! -iname $each"
done
CMD="find ./dist/* $CONCAT | xargs rm -rf"
eval $CMD
| true
|
ac1ffcf4f449684b2fbe6055a9c954b3b3feedac
|
Shell
|
hansode/ifutils
|
/test/unit/ifcfg-setup/t.render_ifcfg_bonding_slave.sh
|
UTF-8
| 648
| 3.09375
| 3
|
[
"Beerware"
] |
permissive
|
#!/bin/bash
#
# requires:
# bash
#
## include files
. $(cd ${BASH_SOURCE[0]%/*} && pwd)/helper_shunit2.sh
## variables
declare device=eth0
## functions
function setUp() {
:
}
function tearDown() {
:
}
function test_render_ifcfg_bonding_slave_no_opts() {
local body="DEVICE=${device}
BOOTPROTO=none
ONBOOT=yes
MASTER=${master}
SLAVE=yes"
assertEquals "${body}" "$(render_ifcfg_bonding_slave)"
}
function test_render_ifcfg_bonding_slave_opts() {
local body="DEVICE=${device}
BOOTPROTO=none
ONBOOT=yes
MASTER=${master}
SLAVE=yes"
assertEquals "${body}" "$(render_ifcfg_bonding_slave ${device})"
}
## shunit2
. ${shunit2_file}
| true
|
0c38753e60081c8fe76b2b048eadf17542498968
|
Shell
|
dareni/shellscripts
|
/rcsCheck.sh
|
UTF-8
| 443
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# Find all rcs files with outstanding commits.
read -p "Run updatedb? y/[N]" UPDATE
if [ "y" = "$UPDATE" ]; then
sudo updatedb
fi
locate RCS | grep ",v" | awk '{
segNum=split($0,segs,"/")
rcsfile=segs[segNum]
rcsfile=substr(rcsfile,1,length(rcsfile)-2)
filePathPOS=index($0,"RCS")
filePath=substr($0,1,filePathPOS-1)
absFile=filePath""rcsfile
print $0" "absFile
}' |xargs -n 2 rcsdiff 2>&1 |vim -R -
| true
|
a90ecaa6468ecdc9051662619b3881489b7ac021
|
Shell
|
keshava/cb
|
/scripts/setBasePath.sh
|
UTF-8
| 243
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
toBeReplaced=$1
replacement=$2
for i in $(find spec -type f -print)
do
sed -i s,${toBeReplaced},${replacement},g "${i}"
done
for i in $(find schema -type f -print)
do
sed -i s,${toBeReplaced},${replacement},g "${i}"
done
| true
|
3a699564f373d4de808903ebe9751fa93ccd8e13
|
Shell
|
m0gjr/unifi
|
/shell_tap/tap_setup.sh
|
UTF-8
| 250
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/ash
#host scp tsh ubnt@ap:/bin/tsh
chmod +x /bin/tsh
echo /bin/tsh >> /etc/shells
sed -i -e 's:/bin/ash:/bin/tsh:g' /etc/passwd
mkfifo /tmp/cmd-fifo
while true
do
< /tmp/cmd-fifo nc 10.11 5000
done
#host nc -lkp 5000
#host nc -lkp 5001
| true
|
2bad6ac345c449736bb9c7370bf0d47ccd8edc4e
|
Shell
|
angelszymczak/docker-lab
|
/lab/step01.sh
|
UTF-8
| 3,332
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
docker network create lab
# 4faccf1ff03054d0c56daeeea9c6b1eb05e1904770d0841c0969d615ae963767
# We can set environment variables from container running
docker run --net lab --name db01 -e MYSQL_ROOT_PASSWORD=passw0rd -d mariadb:latest
# Unable to find image 'mariadb:latest' locally
# latest: Pulling from library/mariadb
# da7391352a9b: Pull complete
# 14428a6d4bcd: Pull complete
# 2c2d948710f2: Pull complete
# 22776aa82430: Pull complete
# 90e64230d63d: Pull complete
# f30861f14a10: Pull complete
# e8e9e6a3da24: Pull complete
# 420a23f08c41: Pull complete
# bd73f23de482: Pull complete
# a8690a3260b7: Pull complete
# 4202ba90333a: Pull complete
# a33f860b4aa6: Pull complete
# Digest: sha256:cdc553f0515a8d41264f0855120874e86761f7c69407b5cfbe49283dc195bea8
# Status: Downloaded newer image for mariadb:latest
# c23f81e96fa59390d6ca2d69030c4aeae02a11be88fce11150fb85df4d785be3
docker ps
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
# c23f81e96fa5 mariadb:latest "docker-entrypoint.s…" 18 seconds ago Up 16 seconds 3306/tcp db01
docker exec -it db01 bash
env | grep MYSQL_ROOT_PASSWORD
# MYSQL_ROOT_PASSWORD=passw0rd
env | grep MARIADB
# MARIADB_VERSION=1:10.5.8+maria~focal
# MARIADB_MAJOR=10.5
mysql -uroot -ppassw0rd
# Welcome to the MariaDB monitor. Commands end with ; or \g.
# Your MariaDB connection id is 3
# Server version: 10.5.8-MariaDB-1:10.5.8+maria~focal mariadb.org binary distribution
#
# Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
#
# Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
#
# Ctrl-C -- exit!
# Aborted
# We can connect to db01 mysql database from different ways
docker run -it --network lab --name test --rm mariadb mysql -hdb01 -uroot -ppassw0rd
# Welcome to the MariaDB monitor. Commands end with ; or \g.
# Your MariaDB connection id is 3
# Server version: 10.5.8-MariaDB-1:10.5.8+maria~focal mariadb.org binary distribution
#
# Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
#
# Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
#
# Ctrl-C -- exit!
# Aborted
# The MYSQL_ROOT_PASSWORD will be read from container env vars, and the command will import a single users db
docker exec -i db01 sh -c 'exec mysql -uroot -p"$MYSQL_ROOT_PASSWORD"' < dump/users.sql
docker run -it --network lab --name test --rm mariadb mysql -hdb01 -uroot -ppassw0rd
show databases;
# +--------------------+
# | Database |
# +--------------------+
# | information_schema |
# | mysql |
# | performance_schema |
# | users |
# +--------------------+
# 4 rows in set (0.001 sec)
#
# MariaDB [(none)]> use users;
# Reading table information for completion of table and column names
# You can turn off this feature to get a quicker startup with -A
#
# Database changed
# MariaDB [users]> show tables;
# +-----------------+
# | Tables_in_users |
# +-----------------+
# | user |
# +-----------------+
# 1 row in set (0.001 sec)
#
# MariaDB [users]> select * from user;
# +------+----------+
# | ID | username |
# +------+----------+
# | 0 | miguel |
# | 1 | Linus |
# +------+----------+
# 2 rows in set (0.001 sec)
| true
|
c727b709a8afd5e87bbc0ba6ad9913fd847ceb63
|
Shell
|
fra87/OpenHR20
|
/tools/EnvironmentBuilder/FlashDevice.sh
|
UTF-8
| 4,418
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPTPATH="${0%/*}"
source ${SCRIPTPATH}/environment.config
SRC_FOLDER=$(realpath ${SCRIPTPATH}/../../${BIN_FOLDER})
TARGET_PREFIX=""
HARDWARE_PLATFORM=""
SETFUSES=0
DRYRUN=0
BACKUP=1
# Logic on EEPROM inverted with respect to original flash to avoid erasing the old EEPROM
EEPROM=0
FIRSTWRITE=0
printUsage() {
echo "$0 [--list] [--unit <addr>] [--master] [--setFuses] [--dryRun] [--noBackup] [--writeEEPROM]"
echo " --list Show the list of available targets"
echo " --unit <addr> Flash the unit <addr>"
echo " --master Flash the master"
echo " Other options:"
echo " --setFuses Flash the fuses with the binary (needed only once)"
echo " --dryRun Echo the AVRDUDE commands instead of executing them"
echo " --noBackup Avoid reading the firmware on the board (and saving it) before flashing"
echo " --writeEEPROM Write the EEPROM memory together with the firmware"
echo " --firstWrite First writing of the firmware (forces backup, fuses and EEPROM)"
}
while [ "$#" -ne 0 ]; do
if [ "$1" == "--list" ]; then
echo "Available targets:"
[ -f "$SRC_FOLDER/$(GetBinaryFileName HEX MASTER)" -a -f "$SRC_FOLDER/$(GetBinaryFileName EEPROM MASTER)" ] && echo " MASTER"
for i in {1..29}; do
[ -f "$SRC_FOLDER/$(GetBinaryFileName HEX UNIT $i)" -a -f "$SRC_FOLDER/$(GetBinaryFileName HEX UNIT $i)" ] && echo " UNIT $i"
done
exit 0;
elif [ "$1" == "--unit" ]; then
[ -z "$TARGET_PREFIX" ] || { echo "Too many targets specified; can only flash one device"; exit 1; }
TARGET_PREFIX="$(GetBinaryFilePrefix UNIT $2)" || { echo "Invalid address $2; aborting"; exit 1; }
HARDWARE_PLATFORM=$UNIT_HW
shift;shift;
elif [ "$1" == "--master" ]; then
[ -z "$TARGET_PREFIX" ] || { echo "Too many targets specified; can only flash one device"; exit 1; }
TARGET_PREFIX="$(GetBinaryFilePrefix MASTER)"
HARDWARE_PLATFORM=$MASTER_HW
shift;
elif [ "$1" == "--setFuses" ]; then
SETFUSES=1; shift;
elif [ "$1" == "--dryRun" ]; then
DRYRUN=1; shift;
elif [ "$1" == "--noBackup" ]; then
BACKUP=0; shift;
elif [ "$1" == "--writeEEPROM" ]; then
EEPROM=1; shift;
elif [ "$1" == "--firstWrite" ]; then
FIRSTWRITE=1; shift;
else
echo "unknown options starting at $*"
printUsage
exit 1
fi
done
# Done here to force the behavior (and avoid to ignore backup when, for instance, one calls ./FlashDevice.sh --firstWrite --noBackup)
if [ "$FIRSTWRITE" == "1" ]; then
EEPROM=1
BACKUP=1
SETFUSES=1
fi
[ -z "$TARGET_PREFIX" ] && { echo; echo "No target specified"; echo; printUsage; exit 1; }
# Flashing script adapted from /src/flash_unit.sh
set -o nounset
set -o errexit
#set -o xtrace
# Get processor code
PROC="$(GetProcessorCode ${HARDWARE_PLATFORM})"
# Load fuses settings
[ -f ${SCRIPTPATH}/fuses/${PROC}.fuses ] || { echo "Cannot find fuses file for ${PROC}"; exit 1; }
source ${SCRIPTPATH}/fuses/${PROC}.fuses
# EEPROM settings
if [ "$EEPROM" == "1" ]; then
WRITE_EEPROM="-e -U eeprom:w:${SRC_FOLDER}/${TARGET_PREFIX}.eep"
else
WRITE_EEPROM=
fi
# Backup directory
BDIR="$(realpath ${SCRIPTPATH}/../../backup)/${TARGET_PREFIX}/$(date "+%F_%T")"
# AVRDUDE command
DUDE="avrdude -p ${PROC} -c ${PROGRAMMER} -P ${PROGRAMMER_PORT} ${PROGRAMMER_OPTS}"
if [ "$DRYRUN" != "0" ]; then
DUDE="echo DRY RUN: $DUDE"
fi
echo ""
if [ "$BACKUP" != "0" ]; then
# do a backup
echo "*** making backup to ${BDIR}"
if [ ! -x $BDIR ]; then
mkdir -p $BDIR
fi
echo "*** backing up fuses..."
$DUDE -U lfuse:r:${BDIR}/lfuse.hex:h -U hfuse:r:${BDIR}/hfuse.hex:h -U efuse:r:${BDIR}/efuse.hex:h || exit
sleep 3
echo "*** backing up flash and eeprom..."
$DUDE -U flash:r:${BDIR}/${TARGET_PREFIX}.hex:i -U eeprom:r:${BDIR}/${TARGET_PREFIX}.eep:i
sleep 3
echo ""
fi
#flash files from current dir
if [ "$SETFUSES" == "1" ]; then
echo "*** setting fuses..."
$DUDE -U hfuse:w:${HFUSE_PROTECTED_EEPROM}:m -U lfuse:w:${LFUSE}:m -U efuse:w:${EFUSE}:m
sleep 3
echo ""
fi
# Unprotect EEPROM if needed
[ "$EEPROM" == "1" ] && $DUDE -U hfuse:w:${HFUSE_UNPROTECTED_EEPROM}:m
echo "*** writing openhr20 flash (and possibly eeprom)"
$DUDE -U flash:w:${SRC_FOLDER}/${TARGET_PREFIX}.hex $WRITE_EEPROM
# if we wrote the eeprom, then protect the eeprom from erase next time
# so that we can just update the code without blowing away the eeprom
[ "$EEPROM" == "1" ] && $DUDE -U hfuse:w:${HFUSE_PROTECTED_EEPROM}:m
echo ""
echo "*** done!"
| true
|
e90679dc028d7b15bb53eac89a4f7880f19f62bd
|
Shell
|
kbarnhart/inverting_topography_postglacial
|
/calibration/sew/QUESO_DRAM/model_000/lowering_history_0.pg24f_ic5etch/start_dakota.sh
|
UTF-8
| 755
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
#SBATCH --job-name sew_mcmc_model_000
#SBATCH --ntasks-per-node 24
#SBATCH --partition shas
#SBATCH --mem-per-cpu 4GB
#SBATCH --nodes 1
#SBATCH --time 24:00:00
#SBATCH --account ucb19_summit1
# load environment modules
module load intel/16.0.3
module load openmpi/1.10.2
module load cmake/3.5.2
#module load perl
module load mkl
module load gsl
# make sure environment variables are set correctly
source ~/.bash_profile
## run dakota using a restart file if it exists.
if [ -e dakota_mcmc.rst ]
then
dakota -i dakota_queso_dram.in -o dakota_queso_dram.out --read_restart dakota_mcmc.rst --write_restart dakota_mcmc.rst &> dakota.log
else
dakota -i dakota_queso_dram.in -o dakota_queso_dram.out --write_restart dakota_mcmc.rst &> dakota.log
fi
| true
|
91716115a598ed03d9a84412c5b2c62472c4b483
|
Shell
|
iravul/iosched
|
/tools/setup.sh
|
UTF-8
| 1,186
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
RED='\033[0;1;31m'
NC='\033[0m' # No Color
GIT_DIR=$(git rev-parse --git-dir 2> /dev/null)
GIT_ROOT=$(git rev-parse --show-toplevel 2> /dev/null)
if [[ ! "$GIT_ROOT" =~ /iosched$ ]]; then
echo -e "${RED}ERROR:${NC} Please run this script from the cloned iosched directory."
exit 1
fi
echo "Installing git commit-message hook"
echo
curl -sSLo "${GIT_DIR}/hooks/commit-msg" \
"https://gerrit-review.googlesource.com/tools/hooks/commit-msg" \
&& chmod +x "${GIT_DIR}/hooks/commit-msg"
echo "Installing git pre-push hook"
echo
cp "${GIT_ROOT}/tools/pre-push" "${GIT_DIR}/hooks/pre-push" \
&& chmod +x "${GIT_DIR}/hooks/pre-push"
cat <<-EOF
Please import the code style settings in Android Studio:
* open Settings -> Editor -> Code Style
* click the gear icon and select "Import Scheme..."
* find the file ${GIT_ROOT}/tools/iosched-codestyle.xml
Additionally, checking the following settings helps avoid miscellaneous issues:
* Settings -> Editor -> General -> Strip trailing spaces on Save
* Settings -> Editor -> General -> Ensure line feed at end of file on Save
* Settings -> Editor -> General -> Auto Import -> Optimize imports on the fly
EOF
| true
|
cd96fc49b08eb055e15dabea7be13d0174f8d6cd
|
Shell
|
giulioq7/tesi
|
/Libraries/Grail/tests/unix/fmtest2
|
UTF-8
| 936
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
# This code copyright (c) by the Grail project.
# No commercial use permitted without written consent.
# May 1992
#######################################################################
# add valid path by ray, at UPEI, 2010/MAY/18
#######################################################################
# Absolute path to this script.
SCRIPT=$(readlink -f $0)
# Absolute path this script is in.
SCRIPTPATH=`dirname $SCRIPT`
BIN=$SCRIPTPATH/../../bin
MACHINES=$SCRIPTPATH/../machines
ERRORS=$SCRIPTPATH/../errors
for i in d1 d2 d3 d4 d5 d6 d7 d8
do
echo -n "Testing $1 on $i..."
$BIN/$1 $MACHINES/$i | sort >$$.tmp 2>/dev/null
sort $SCRIPTPATH/../$1/$i >out
if diff $$.tmp out
then
rm $$.tmp
echo "results match !"
echo "Testing $1 on $i passed !"|cat >> passed
else
mv $$.tmp ${ERRORS}/$1.$i
echo "fail to match, results recorded !"
echo "Testing $1 on $i failed !"|cat >> failed
fi
rm out
done
| true
|
8b66a19124905bd77a2ca884dc15a30667b7cc0f
|
Shell
|
SpisTresci/shim
|
/installer.sh
|
UTF-8
| 3,819
| 3.921875
| 4
|
[] |
no_license
|
#! /bin/sh
# Userify Shim Installer
# Copyright (c) 2011-2014 Userify
if [ "$(id -u)" != "0" ]; then
cat << EOF >&2
Unfortunately, the Userify Shim requires root permissions in order to
create user accounts and manage sudoers. The Shim is open source; please
feel free to audit the code for security.
EOF
exit 1
fi
if [ "$(uname -s)" != "Linux" ]; then
echo "Currently, Userify supports only Linux systems." >&2
fi
# on older apt-get systems, attempt to install sudo
set +e
apt-get update 2>/dev/null; apt-get -y install sudo 2>/dev/null
set -e
echo "Installing Userify and halting on errors."
set -e
echo "Creating Userify directory (/opt/userify/)"
[ -d /opt/userify ] && (
echo "Please remove /opt/userify before continuing." >&2; exit -1)
mkdir /opt/userify/ || (
echo "Unable to create directory /opt/userify." >&2; exit 1)
echo "Creating uninstall script (/opt/userify/uninstall.sh)"
cat << EOF > /opt/userify/uninstall.sh
#! /bin/sh +e
# Debian, Ubuntu, RHEL:
sed -i "s/\/opt\/userify\/shim.sh \&//" \
/etc/rc.local 2>/dev/null
# SUSE:
sed -i "s/\/opt\/userify\/shim.sh \&//" \
/etc/init.d/after.local 2>/dev/null
# # Fedora:
# systemctl disable userify-shim.service 2>/dev/null
# rm -f /etc/systemd/system/userify-shim.service 2>/dev/null
# rm -Rf /opt/userify/
# killall shim.py shim.sh
EOF
if [ "x$api_id" != "x" ]; then
echo "Creating API login config (/opt/userify/creds.py)"
echo -n > /opt/userify/creds.py
chmod 0600 /opt/userify/creds.py
# create creds configuration file
cat <<EOF >> /opt/userify/creds.py
api_id="$api_id"
api_key="$api_key"
EOF
else
echo "api_id variable not found, skipping creds.py creation."
fi
echo "Creating shim (/opt/userify/shim.{sh,py})"
cat << "EOF" > /opt/userify/shim.sh
#! /bin/bash +e
[ -z "$PYTHON" ] && PYTHON="$(which python)"
output=$(curl -k https://shim.userify.com/shim.py | $PYTHON 2>&1)
echo "$output" |tee /var/log/shim.log
# fix for thundering herd
sleep $(( ( RANDOM % 5 ) + 1 ))
/opt/userify/shim.sh &
EOF
echo "Removing exit 0 from rc.local"
set +e
sed -i "s/^ *exit 0.*/# &/" /etc/rc.local 2>/dev/null
set -e
echo "Checking Shim Startup"
# most Linux versions can manage with a line added to rc.local:
if [ -f /etc/rc.local ]; then
distro="Linux"
fname=/etc/rc.local
elif [ -f /etc/init.d/after.local ]; then
distro="SUSE"
fname=/etc/init.d/after.local
# elif [ -f /etc/fedora-release ]; then
# distro="Fedora"
# cat << EOF > /etc/systemd/system/userify-shim.service
# [Unit]
# Description=Userify Shim (userify.com)
#
# [Service]
# Type=forking
# ExecStart=/opt/userify/shim.sh
#
# [Install]
# WantedBy=multi-user.target
# EOF
# systemctl enable userify-shim.service
else
cat << EOF >&2
Unable to set start at bootup -- no /etc/rc.local file?
You'll have to set shim to startup on it's own: create an
init script that launches /opt/userify/shim.sh on startup.
In most distributions, this would have been a single line
in /etc/rc.local, but you may need to do something more
exotic. Please contact us with Linux version information
and we may have more information for you.
EOF
exit 1
fi
# actually set up the startup
if [ "$distro" != "Fedora" ]; then
echo "Adding $distro Startup Script to $fname"
echo >> "$fname"
echo "/opt/userify/shim.sh &" >> "$fname"
# remove any existing lines:
set +e
sed -i "s/\/opt\/userify\/shim.sh \&//" "$fname" 2>/dev/null
set -e
fi
echo "Setting Permissions"
chmod 700 /opt/userify/ /opt/userify/uninstall.sh /opt/userify/shim.sh
echo "Launching shim.sh"
set +e;
killall shim.py shim.sh 2>/dev/null
set -e
/opt/userify/shim.sh &
echo
echo "Finished. Userify shim has been completely installed."
echo "To remove at any point in the future, run /opt/userify/uninstall.sh"
| true
|
845eaeacf3a9d1183617ac1ad6df9ee5ed06f3f6
|
Shell
|
id-tarzanych/site_builder
|
/build/update.sh
|
UTF-8
| 818
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
path=$(dirname "$0")
source $path/parse_yaml.sh
source $path/common.sh
# Read settings YAML file.
notification "Reading settings file..."
eval $(parse_yaml $path/../cnf/config.yml)
# Get environment variables.
notification "Parsing variables..."
source $path/connection.sh "$1"
# Changing working folder.
cd $path/..
# Check if Drupal was installed.
notification "Checking if Drupal is installed..."
DRUPAL_STATUS=$(drush status | grep "Drupal version")
if [ DRUPAL_STATUS == '' ];
then
error "No existing Drupal site found.\n"
exit
fi
notification "Updating site..."
chmod -R 755 sites/default
drush make "$global_machine_name.make.yml" -y
# Restoring .gitignore
git checkout .gitignore
drush drux-enable-dependencies -y
drush updatedb -y
drush fra -y
drush cc all
success "Update complete."
| true
|
579164fe4e3b87066ad362adadb7e9251cdb029d
|
Shell
|
1500118C/LAB2B
|
/LAB2B/lab2remediation.sh
|
UTF-8
| 10,974
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#4.1
#Set User/Group Owner on /boot/grub2/grub.cfg
#set the owner & group to the root user
chown root:root /boot/grub2/grub.cfg
#4.2
#Set Permissions on /boot/grub2/grub.cfg
#set permission to read+write for root only
chmod og-rwx /boot/grub2/grub.cfg
#4.3
#Set Boot Loader Password
#set boot loader pw for anyone rebooting the system
touch test1.pwd
echo "password" >> test1.pwd
echo "password" >> test1.pwd
grub2-mkpasswd-pbkdf2 < test1.pwd > test.md5
grub2-mkconfig -o /boot/grub2/grub.cfg
#5.1
#Restrict Core Dumps
#prevent users from overriding the soft variables
echo "* hard core 0" >> /etc/security/limits.conf
echo "fs.suid_dumpable = 0" >> /etc/sysctl.conf
#5.2
#Enable Randomized Virtual Memory Region Placement
#set the system flag to force randomized virtual memory region placement
echo "kernel.randomize_va_space = 2" >> /etc/sysctl.conf
#6.1.1
#Install the rsyslogpackage
yum install rsyslog
systemctl enable rsyslog
systemctl start rsyslog
#6.1.2
#Activate the rsyslogService
#ensure rsyslog service is turned on
systemctl enable rsyslog
#6.1.3
#Configure /etc/rsyslog.conf
#ensure appropriate logging is set according to environment
sed -i 's/dev/var/g' /etc/rsyslog.conf
sed -i 's/console/log\/kern.log/g' /etc/rsyslog.conf
#6.1.4
#Create and Set Permissions on rsyslogLog Files
#ensure that log files exist & correct permissions are set
touch /var/log/kern.log
chown root:root /var/log/kern.log
chmod og-rwx /var/log/kern.log
touch /var/log/messages
chown root:root /var/log/messages
chmod og-rwx /var/log/messages
touch /var/log/secure
chown root:root /var/log/secure
chmod og-rwx /var/log/secure
touch /var/log/maillog
chown root:root /var/log/maillog
chmod og-rwx /var/log/maillog
touch /var/log/cron
chown root:root /var/log/cron
chmod og-rwx /var/log/cron
touch /var/log/spooler
chown root:root /var/log/spooler
chmod og-rwx /var/log/spooler
touch /var/log/boot.log
chown root:root /var/log/boot.log
chmod og-rwx /var/log/boot.log
#6.1.5
#Configure rsyslogto Send Logs to a Remote Log Host
echo " *.* @@localhost" >> /etc/rsyslog.conf
pkill -HUP rsyslogd
#6.1.6
#Accept Remote rsyslogMessages Only onDesignated Log Hosts
pkill -HUP rsyslogd
#6.2.1.1 Configure Audit Log Storage Size
sed -i '/max_log_file/s/= .*/= 5/' /etc/audit/auditd.conf
#6.2.1.2 Keep All Auditing Information (add 'max_log...' into this file)
sed -i '/max_log_file_action/s/= .*/= keep_logs/' /etc/audit/auditd.conf
#6.2.1.3 Disable System on Audit Log Full (add following lines into this file)
sed -i '/space_left_action/s/= .*/= email/' /etc/audit/auditd.conf
sed -i '/action_mail_acct/s/= .*/= root/' /etc/audit/auditd.conf
sed -i '/admin_space_left_action/s/= .*/= halt/' /etc/audit/auditd.conf
#6.2.1.4 Enable auditdService (allows admin to determine if unauthorized access to their system is occurring.)
systemctl enable auditd
#6.2.1.5 Enable Auditing for Processes That Start Prior to auditd
#(Audit events need to be captured on processes that start up prior to auditd, so that potential malicious activity cannot go undetected.)
sed -i 's/crashkernel=auto rhgb quiet/audit=1/g' /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
#6.2.1.6 Record Events That Modify Date and Time Information
#(Unexpected changes in system date and/or time could be a sign of malicious activity on the system.)
echo "-a always, exit -F arch=b64 -S adjtimex -S settimeofday -k time-change" >>/etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b64 -S clock_settime -k time-change" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S clock_settime -k time-change" >> /etc/audit/audit.rules
echo "-w /etc/localtime -p wa -k time-change" >> /etc/audit/audit.rules
echo "-a always, exit -F arch=b64 -S adjtimex -S settimeofday -k time-change" >>/etc/audit/rules.d/audit.rules
echo "-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change" >> /etc/audit/rules.d/audit.rules
echo "-a always,exit -F arch=b64 -S clock_settime -k time-change" >> /etc/audit/rules.d/audit.rules
echo "-a always,exit -F arch=b32 -S clock_settime -k time-change" >> /etc/audit/rules.d/audit.rules
echo "-w /etc/localtime -p wa -k time-change" >> /etc/audit/rules.d/audit.rules
pkill -P 1 -HUP auditd
#6.2.1.7 Record Events That Modify User/Group Information
#(Unexpected changes to these files could be an indication that the system has been compromised and that an unauthorized user is attempting to hide their activities or compromise additional accounts.)
echo "-w /etc/group -p wa -k identity" >> /etc/audit/audit.rules
echo "-w /etc/passwd -p wa -k identity" >> /etc/audit/audit.rules
echo "-w /etc/gshadow -p wa -k identity" >> /etc/audit/audit.rules
echo "-w /etc/shadow -p wa -k identity" >> /etc/audit/audit.rules
echo "-w /etc/security/opasswd -p wa -k identity" >> /etc/audit/audit.rules
pkill -P 1 -HUP auditd
#6.2.1.8 Record Events That Modify the System's Network Environment
echo "-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale" >> /etc/audit/audit.rules
echo "-w /etc/issue -p wa -k system-locale" >> /etc/audit/audit.rules
echo "-w /etc/issue.net -p wa -k system-locale" >> /etc/audit/audit.rules
echo "-w /etc/hosts -p wa -k system-locale" >> /etc/audit/audit.rules
echo "-w /etc/sysconfig/network -p wa -k system-locale" >> /etc/audit/audit.rules
pkill -P 1 -HUP auditd
#6.2.1.9 Record Events That Modify the System's Mandatory Access Controls
#(indicate that an unauthorized user is attempting to modify access controls and change security contexts, leading to a compromise of the system.)
echo "-w /etc/selinux/ -p wa -k MAC-policy" >> /etc/audit/audit.rules
pkill -P 1 -HUP auditd
#6.2.1.10 Collect Login and Logout Events
#(Monitoring login/logout events could provide a system administrator with information associated with brute force attacks against user logins)
echo "-w /var/log/faillog -p wa -k logins" >> /etc/audit/audit.rules
echo "-w /var/log/lastlog -p wa -k logins" >> /etc/audit/audit.rules
echo "-w /var/log/tallylog -p wa -k logins" >> /etc/audit/audit.rules
pkill -P 1 -HUP auditd
#6.2.1.11 Collect session initiation information
#Add the following lines to /etc/audit/audit.rules file
echo "-w /var/run/utmp -p wa -k session" >> /etc/audit/audit.rules
echo "-w /var/log/wtmp -p wa -k session" >> /etc/audit/audit.rules
echo "-w /var/log/btmp -p wa -k session" >> /etc/audit/audit.rules
#Execute following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.12 Collect discretionary access control permission modification events
#Add the following lines to /etc/audit/audit.rules file
echo "-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod" >> /etc/audit/audit.rules
echo "a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod" >> /etc/audit/audit.rules
#Execute the following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.13 Collect unsuccessful unauthorized access attempts to files
#Add the following lines to /etc/audit/audit.rules file
echo "-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access" >> /etc/audit/audit.rules
#Execute following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.14 Collect use of privileged commands
#6.2.1.15 Collect successful file system mounts
#Add the following lines to /etc/audit/audit.rules file
echo "-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts" >> /etc/audit/audit.rules
#Execute the following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.16 Collect file deletion events by user
#Add the following lines to /etc/audit/audit.rules file
echo "-a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete" >> /etc/audit/audit.rules
#Execute the following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.17 Collect changes to system administration scope
#Add the following lines to /etc/audit/audit.rules file
echo "-w /etc/sudoers -p wa -k scope" >> /etc/audit/audit.rules
#Execute the following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.18 Collect system administrator actions (syslog)
#Add the following lines to /etc/audit/audit.rules file
echo "-w /var/log/sudo.log -p wa -k actions" >> /etc/audit/audit.rules
#Execute the following command to restart auditd
pkill -HUP -P 1 auditd
#6.2.1.19 Collect kernel module loading and unloading
#Add the following lines to /etc/audit/audit.rules file
echo "-w /sbin/insmod -p x -k modules" >> /etc/audit/audit.rules
echo "-w /sbin/rmmod -p x -k modules" >> /etc/audit/audit.rules
echo "-w /sbin/modprobe -p x -k modules" >> /etc/audit/audit.rules
echo "-a always,exit -F arch=b64 -S init_module -S delete_module -k modules" >> /etc/audit/audit.rules
#6.2.1.20 Make the audit configuration immutable
#Add the following lines to /etc/audit/audit.rules file
echo "-e 2" >> /etc/audit/audit.rules
#6.2.1.21 Configure logrotate
#Edit the /etc/logrotate.d/syslog file to include appropriate system logs
echo "/var/log/messages /var/log/secure /var/log/maillog /var/log/spooler /var/log/boot.log /var/log/cron {" >> /etc/logrotate.d/syslog
| true
|
dd99dc159bd0ef6eddc354a6dac6e00558fe967b
|
Shell
|
xiaobingchan/WenNi
|
/环境部署/部署脚本/数据库/mongodb.sh
|
UTF-8
| 1,323
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# 每个RedHat都有不同版本,不能分发错
wget http://downloads.mongodb.org/linux/mongodb-linux-x86_64-rhel70-v3.4-latest.tgz
tar -xvzf mongodb-linux-x86_64-rhel70-latest.tgz -C /data/soft/
mv /data/soft/mongodb-linux-x86_64-rhel70-4.1.10-309-g4a2b759/ /data/soft/mongodb/
mkdir /data/soft/mongodb/data
mkdir /data/soft/mongodb/log
mkdir /data/soft/mongodb/etc
touch /data/soft/mongodb/log/mongo.log
cat >> /etc/profile << EOF
export PATH=$PATH:/data/soft/mongodb/bin
EOF
source /etc/profile
chown -R 777 /data/soft/mongodb/data
chown -R 777 /data/soft/mongodb/log
chown -R 777 /data/soft/mongodb/etc
cat >> /data/soft/mongodb/mongodb.conf << EOF
dbpath=/data/soft/mongodb/data/
logpath=/data/soft/mongodb/log/mongo.log
logappend=true
quiet=true
port=20000
fork=true
bind_ip=0.0.0.0
EOF
/data/soft/mongodb/bin/mongod --config /data/soft/mongodb/mongodb.conf
# 启动mongoDB:/data/soft/mongodb/bin/mongo --port=20000
#mongodb 管理员用户:useradmin 密码:adminpassword
#use admin
#db.createUser({user:"useradmin",pwd:"adminpassword",roles:[{role:"userAdminAnyDatabase",db:"admin"}]})
#db.auth("useradmin","adminpassword")
#pdx 管理员:pdx 密码:123456
#use pdx
#db.createUser({user:"pdx",pwd:"123456",roles:[{role:"dbOwner",db:"pdx"}]})
#db.auth("pdx","123456")
| true
|
0cc365c444122ea6898adfc73f35957da0e45b64
|
Shell
|
bruceyoung01/wrf-chem.r13172
|
/models/wrf_chem/run_scripts/RUN_PERT_CHEM/ICBC_PERT/runICBC_setN_rt_CR.ksh
|
UTF-8
| 1,910
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/ksh -x
unalias ls
export YEAR=${YYYY}
export MONTH=${MM}
export DAY=${DD}
export HOUR=${HH}
export IC_METDIR=${WRFCHEM_MET_IC_DIR}
export BC_METDIR=${WRFCHEM_MET_BC_DIR}
export CHEMDIR=${RUN_DIR}/${YEAR}${MONTH}${DAY}${HOUR}/wrfchem_chem_icbc
let MEM=1
while [[ ${MEM} -le ${NUM_MEMBERS} ]]; do
export IENS=${MEM}
if [[ ${MEM} -lt 100 ]]; then export IENS=0${MEM}; fi
if [[ ${MEM} -lt 10 ]]; then export IENS=00${MEM}; fi
export WRFINP=wrfinput_d01_${YEAR}-${MONTH}-${DAY}_${HOUR}:00:00.e${IENS}
export WRFBDY=wrfbdy_d01_${YEAR}-${MONTH}-${DAY}_${HOUR}:00:00.e${IENS}
echo 'get inp'
cp ${IC_METDIR}/${WRFINP} ./.
cp ${BC_METDIR}/${WRFBDY} ./.
ls set${MEM}
#----------------------------------------------------------------------
# TRICKY PART:
# since the following original statement does not work on taki.rs.umbc.edu
# rm -f mozbc.ic.inp.set${MEM}
# cat mozbc.ic.inp set${MEM} > mozbc.ic.inp.set${MEM}
# So we add a tricky part to make a new file as below
# rm -f mozbc.ic.inp.set${MEM} mozbc.ic.inp.set${MEM}a
# cat mozbc.ic.inp set${MEM} > mozbc.ic.inp.set${MEM}a
# echo > tmp.txt
# cat mozbc.ic.inp.set${MEM}a tmp.txt > mozbc.ic.inp.set${MEM}
# then it works
#----------------------------------------------------------------------
rm -f mozbc.ic.inp.set${MEM} mozbc.ic.inp.set${MEM}a
cat mozbc.ic.inp set${MEM} > mozbc.ic.inp.set${MEM}a
echo > tmp.txt
cat mozbc.ic.inp.set${MEM}a tmp.txt > mozbc.ic.inp.set${MEM}
rm -f mozbc.bc.inp.set${MEM} mozbc.bc.inp.set${MEM}a
cat mozbc.bc.inp set${MEM} > mozbc.bc.inp.set${MEM}a
echo > tmp.txt
cat mozbc.bc.inp.set${MEM}a tmp.txt > mozbc.bc.inp.set${MEM}
echo 'run mozbc'
./run_mozbc_rt_CR.csh type=ic mozbc_inp=mozbc.ic.inp.set${MEM} ens=${IENS}
./run_mozbc_rt_CR.csh type=bc mozbc_inp=mozbc.bc.inp.set${MEM} ens=${IENS}
echo 'put files'
echo 'OK'
let MEM=MEM+1
done
| true
|
776f3b2a31d5b059f613d6bbad068feed9299f90
|
Shell
|
lintmars/opentsdb-examples
|
/bin/opentsdb_cli_install.sh
|
UTF-8
| 1,572
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Variables
#
. /tmp/opentsdb-examples/conf/opentsdb.conf
#
# Sanity Checks
#
if [ `id -un` != "root" ]; then
echo "ERROR: Must be run as root"
exit 1
fi
#
# Main
#
for node in $TSD_CLI_HOST_LIST; do
echo -e "\n##"
echo -e "## Starting OpenTSDB TSD CLI wrapper install on $node"
echo -e "##"
# Install git, automake, and gnuplot
echo -e "\n#### Installing git, gnuplot, and automake"
ssh $SSH_ARGS $node "yum install -y git gnuplot automake" || exit 1
# Build opentsdb
echo -e "\n#### Cloning OpenTSDB git repo and switching to \"next\" branch"
if ssh $SSH_ARGS $node "test -d $TSD_HOME/.git"; then
echo "WARNING: /tmp/opentsdb already exists... skipping"
else
ssh $SSH_ARGS $node "cd $(dirname $TSD_HOME) && git clone git://github.com/OpenTSDB/opentsdb.git && cd $TSD_HOME && git checkout next" || exit 1
fi
# Run the opentsdb build script
echo -e "\n#### Running the OpenTSDB build script"
ssh $SSH_ARGS $node "cd $TSD_HOME && PATH=$PATH:$JAVA_BIN ./build.sh" || exit 1
# Create TSD log dir
echo -e "\n#### Creating the TSD log dir $TSD_LOG_DIR"
ssh $SSH_ARGS $node "mkdir -p $TSD_LOG_DIR" || exit 1
# Creating the CLI wrapper
echo -e "\n#### Creating the CLI wrapper script"
ssh $SSH_ARGS $node "echo \"cd $TSD_HOME && ./build/tsdb \$\"@\" --zkquorum=$ZK_QUORUM --zkbasedir=$ZK_HBASE_ROOT\" >$TSD_HOME/tsdb.cli && chmod 755 $TSD_HOME/tsdb.cli" || exit 1
echo -e "\n##"
echo -e "## Successfully completed OpenTSDB TSD CLI install on $node"
echo -e "##"
done
| true
|
8a0ae2de568621804811d45fbb4f09f2d564ca2a
|
Shell
|
lesin/awesome-tools
|
/dotfiles/.zshrc
|
UTF-8
| 2,484
| 2.96875
| 3
|
[] |
no_license
|
export PATH="$HOME/.bin:$PATH"
source /Users/lesin/.asdf/asdf.sh
source "$HOME/.slimzsh/slim.zsh"
source ~/.zsh-nvm/zsh-nvm.plugin.zsh
# Universal
alias c='clear'
alias .='echo $PWD'
alias ..='cd ..'
alias ...='cd ../../../'
alias ....='cd ../../../../'
alias .....='cd ../../../../'
alias wget='wget -c'
alias tree="find . -print | sed -e 's;[^/]*/;|____;g;s;____|; |;g' |less"
alias showfiles='defaults write com.apple.finder AppleShowAllFiles YES; killall Finder /System/Library/CoreServices/Finder.app'
alias hidefiles='defaults write com.apple.finder AppleShowAllFiles NO; killall Finder /System/Library/CoreServices/Finder.app'
# Shortcuts
alias irbs="irb --simple-prompt"
alias srspec="spring rspec"
alias ngrok="~/Applications/ngrok"
alias subl="/Applications/Sublime\ Text.app/Contents/SharedSupport/bin/subl"
alias getsong='youtube-dl -f 140'
# The ubiquitous 'll': directories first, with alphanumeric sorting:
alias ll='ls -lvF'
alias lm='ll |more' # Pipe through 'more'
alias lr='ll -R' # Recursive ls.
alias la='ll -A' # Show hidden files.
# Colorize the grep command output for ease of use (good for log files)##
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
# confirmation #
alias mv='mv -i'
alias cp='cp -i'
alias ln='ln -i'
# Get External IP / local IPs
alias ip="curl ipinfo.io/ip"
alias ips="ifconfig -a | perl -nle'/(\d+\.\d+\.\d+\.\d+)/ && print $1'"
# Docker
alias dc="docker-compose"
alias dcr="docker-compose run --rm"
alias dps="docker ps --format \"table {{.ID}}\t{{.Names}}\t{{.Status}}\""
alias ddf="docker system df"
# Git
alias gs='git status'
alias gc='git commit'
alias gcm='git commit -m'
alias gcam='git commit --amend --no-edit'
alias ga='git add'
alias go='git checkout'
alias gb='git branch'
alias gd='git diff'
alias gdc='git diff --cached'
alias grsoft='git reset --soft HEAD^'
alias gl='git log --pretty=oneline'
alias gl5='git log --pretty=format:"%h - %s" -5'
alias gp='git push origin $(current_branch)'
alias gpf!='git push --force-with-lease'
alias gpr='git pull --rebase origin $(current_branch)'
alias gdel="git branch --merged master | grep -v '^[ *]*master$' | xargs git branch -d"
# use current branch
function current_branch() {
ref=$(git symbolic-ref HEAD 2> /dev/null) || \
ref=$(git rev-parse --short HEAD 2> /dev/null) || return
echo ${ref#refs/heads/}
}
#no autocorrection
unsetopt correct_all
setopt correct
export PS1='> '
| true
|
aadf8b273d2502471f61a8a409b98f4efc1b09da
|
Shell
|
quynhlab/KodeKloud_Tasks
|
/Tasks_31-40/TASK_36-SF_-_Scrapes_-_DB_Server.sh
|
UTF-8
| 15,526
| 2.625
| 3
|
[] |
no_license
|
-----------------------------------------------------------------------------------------------------------------
# SUPPORTING FILE
# This is a supporting file for the Task of the same name.
# This may contain configurations/config file.
# Basically, any file that is edited/created/modified through the 'vi' or 'nano' command will appear here.
# Note that there may be more than one supporting files for the Task at hand.
-----------------------------------------------------------------------------------------------------------------
TASK 36 - SupportFile - Install and Configure PostgreSQL
thor@jump_host /$ sshpass -p '******' ssh -o StrictHostKeyChecking=no peter@172.16.239.10Warning: Permanently added '172.16.239.10' (ECDSA) to the list of known hosts.
sudo su -******[peter@stdb01 ~]$ sudo su -
We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these three things:
#1) Respect the privacy of others.
#2) Think before you type.
#3) With great power comes great responsibility.
[sudo] password for peter:
[root@stdb01 ~]#
[root@stdb01 ~]# yum install -y postgresql-server postgresql-contribLoaded plugins: fastestmirror, ovl
Determining fastest mirrors
* base: centosmirror.netcup.net
* extras: mirror.checkdomain.de
* updates: centosmirror.netcup.net
base | 3.6 kB 00:00:00
extras | 2.9 kB 00:00:00
updates | 2.9 kB 00:00:00
(1/4): base/7/x86_64/group_gz | 153 kB 00:00:00
(2/4): extras/7/x86_64/primary_db | 206 kB 00:00:00
(3/4): base/7/x86_64/primary_db | 6.1 MB 00:00:00
(4/4): updates/7/x86_64/primary_db | 4.5 MB 00:00:00
Resolving Dependencies
--> Running transaction check
---> Package postgresql-contrib.x86_64 0:9.2.24-4.el7_8 will be installed
--> Processing Dependency: postgresql-libs(x86-64) = 9.2.24-4.el7_8 for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: postgresql(x86-64) = 9.2.24-4.el7_8 for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: libxslt.so.1(LIBXML2_1.0.22)(64bit) for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: libxslt.so.1(LIBXML2_1.0.18)(64bit) for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: libxslt.so.1(LIBXML2_1.0.11)(64bit) for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: libxslt.so.1()(64bit) for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: libpq.so.5()(64bit) for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
--> Processing Dependency: libossp-uuid.so.16()(64bit) for package: postgresql-contrib-9.2.24-4.el7_8.x86_64
---> Package postgresql-server.x86_64 0:9.2.24-4.el7_8 will be installed
--> Processing Dependency: systemd-sysv for package: postgresql-server-9.2.24-4.el7_8.x86_64
--> Running transaction check
---> Package libxslt.x86_64 0:1.1.28-5.el7 will be installed
---> Package postgresql.x86_64 0:9.2.24-4.el7_8 will be installed
---> Package postgresql-libs.x86_64 0:9.2.24-4.el7_8 will be installed
---> Package systemd-sysv.x86_64 0:219-73.el7_8.9 will be installed
--> Processing Dependency: systemd = 219-73.el7_8.9 for package: systemd-sysv-219-73.el7_8.9.x86_64
---> Package uuid.x86_64 0:1.6.2-26.el7 will be installed
--> Running transaction check
---> Package systemd.x86_64 0:219-62.el7_6.9 will be updated
---> Package systemd.x86_64 0:219-73.el7_8.9 will be an update
--> Processing Dependency: systemd-libs = 219-73.el7_8.9 for package: systemd-219-73.el7_8.9.x86_64
--> Running transaction check
---> Package systemd-libs.x86_64 0:219-62.el7_6.9 will be updated
---> Package systemd-libs.x86_64 0:219-73.el7_8.9 will be an update
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================================
Package Arch Version Repository Size
================================================================================================
Installing:
postgresql-contrib x86_64 9.2.24-4.el7_8 updates 552 k
postgresql-server x86_64 9.2.24-4.el7_8 updates 3.8 M
Installing for dependencies:
libxslt x86_64 1.1.28-5.el7 base 242 k
postgresql x86_64 9.2.24-4.el7_8 updates 3.0 M
postgresql-libs x86_64 9.2.24-4.el7_8 updates 234 k
systemd-sysv x86_64 219-73.el7_8.9 updates 94 k
uuid x86_64 1.6.2-26.el7 base 55 k
Updating for dependencies:
systemd x86_64 219-73.el7_8.9 updates 5.1 M
systemd-libs x86_64 219-73.el7_8.9 updates 416 k
Transaction Summary
================================================================================================
Install 2 Packages (+5 Dependent packages)
Upgrade ( 2 Dependent packages)
Total download size: 13 M
Downloading packages:
Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
(1/9): libxslt-1.1.28-5.el7.x86_64.rpm | 242 kB 00:00:00
(2/9): postgresql-contrib-9.2.24-4.el7_8.x86_64.rpm | 552 kB 00:00:00
(3/9): postgresql-libs-9.2.24-4.el7_8.x86_64.rpm | 234 kB 00:00:00
(4/9): postgresql-9.2.24-4.el7_8.x86_64.rpm | 3.0 MB 00:00:00
(5/9): postgresql-server-9.2.24-4.el7_8.x86_64.rpm | 3.8 MB 00:00:00
(6/9): systemd-219-73.el7_8.9.x86_64.rpm | 5.1 MB 00:00:00
(7/9): systemd-sysv-219-73.el7_8.9.x86_64.rpm | 94 kB 00:00:00
(8/9): uuid-1.6.2-26.el7.x86_64.rpm | 55 kB 00:00:00
(9/9): systemd-libs-219-73.el7_8.9.x86_64.rpm | 416 kB 00:00:00
------------------------------------------------------------------------------------------------
Total 28 MB/s | 13 MB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : postgresql-libs-9.2.24-4.el7_8.x86_64 1/11
Installing : postgresql-9.2.24-4.el7_8.x86_64 2/11
Installing : uuid-1.6.2-26.el7.x86_64 3/11
Updating : systemd-libs-219-73.el7_8.9.x86_64 4/11
Updating : systemd-219-73.el7_8.9.x86_64 5/11
Installing : systemd-sysv-219-73.el7_8.9.x86_64 6/11
Installing : libxslt-1.1.28-5.el7.x86_64 7/11
Installing : postgresql-contrib-9.2.24-4.el7_8.x86_64 8/11
Installing : postgresql-server-9.2.24-4.el7_8.x86_64 9/11
Cleanup : systemd-219-62.el7_6.9.x86_64 10/11
Cleanup : systemd-libs-219-62.el7_6.9.x86_64 11/11
Verifying : postgresql-9.2.24-4.el7_8.x86_64 1/11
Verifying : systemd-sysv-219-73.el7_8.9.x86_64 2/11
Verifying : postgresql-contrib-9.2.24-4.el7_8.x86_64 3/11
Verifying : libxslt-1.1.28-5.el7.x86_64 4/11
Verifying : systemd-libs-219-73.el7_8.9.x86_64 5/11
Verifying : uuid-1.6.2-26.el7.x86_64 6/11
Verifying : postgresql-server-9.2.24-4.el7_8.x86_64 7/11
Verifying : postgresql-libs-9.2.24-4.el7_8.x86_64 8/11
Verifying : systemd-219-73.el7_8.9.x86_64 9/11
Verifying : systemd-219-62.el7_6.9.x86_64 10/11
Verifying : systemd-libs-219-62.el7_6.9.x86_64 11/11
Installed:
postgresql-contrib.x86_64 0:9.2.24-4.el7_8 postgresql-server.x86_64 0:9.2.24-4.el7_8
Dependency Installed:
libxslt.x86_64 0:1.1.28-5.el7 postgresql.x86_64 0:9.2.24-4.el7_8
postgresql-libs.x86_64 0:9.2.24-4.el7_8 systemd-sysv.x86_64 0:219-73.el7_8.9
uuid.x86_64 0:1.6.2-26.el7
Dependency Updated:
systemd.x86_64 0:219-73.el7_8.9 systemd-libs.x86_64 0:219-73.el7_8.9
Complete!
[root@stdb01 ~]#
[root@stdb01 ~]# postgresql-setup initdb
Initializing database ... OK
[root@stdb01 ~]#
[root@stdb01 ~]# systemctl enable postgresql && systemctl start postgresql && systemctl status postgresql
Created symlink from /etc/systemd/system/multi-user.target.wants/postgresql.service to /usr/lib/systemd/system/postgresql.service.
● postgresql.service - PostgreSQL database server
Loaded: loaded (/usr/lib/systemd/system/postgresql.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2020-09-08 17:31:00 UTC; 6ms ago
Process: 334 ExecStart=/usr/bin/pg_ctl start -D ${PGDATA} -s -o -p ${PGPORT} -w -t 300 (code=exited, status=0/SUCCESS)
Process: 329 ExecStartPre=/usr/bin/postgresql-check-db-dir ${PGDATA} (code=exited, status=0/SUCCESS)
Main PID: 336 (postgres)
CGroup: /docker/425c146ad3e0723033f009565c1b6fa56e269d891e24fd05a958fc962b9a399e/system.slice/postgresql.service
├─336 /usr/bin/postgres -D /var/lib/pgsql/data -p 5432
├─337 postgres: logger process
├─339 postgres: checkpointer process
├─340 postgres: writer process
├─341 postgres: wal writer process
├─342 postgres: autovacuum launcher process
└─343 postgres: stats collector process
Sep 08 17:30:59 stdb01 systemd[1]: Starting PostgreSQL database server...
Sep 08 17:30:59 stdb01 pg_ctl[334]: LOG: could not bind IPv6 socket: Cannot assign reques...ess
Sep 08 17:30:59 stdb01 pg_ctl[334]: HINT: Is another postmaster already running on port 5...ry.
Sep 08 17:31:00 stdb01 systemd[1]: Started PostgreSQL database server.
Hint: Some lines were ellipsized, use -l to show in full.
[root@stdb01 ~]#
[root@stdb01 ~]# sudo -u postgres psql
could not change directory to "/root"
psql (9.2.24)
Type "help" for help.
postgres=#
postgres=# create user kodekloud_rin with encrypted password 'B4zNgHA7Ya';
CREATE ROLE
postgres=#
postgres=# create database kodekloud_db9 owner kodekloud_rin;
CREATE DATABASE
postgres=#
postgres=# grant all privileges on database kodekloud_db9 to kodekloud_rin ;
GRANT
postgres=# ^Z
[1]+ Stopped sudo -u postgres psql
[root@stdb01 ~]#
[root@stdb01 ~]# vi /var/lib/pgsql/data/postgresql.conf
[root@stdb01 ~]# vi /var/lib/pgsql/data/pg_hba.conf
[root@stdb01 ~]#
[root@stdb01 ~]#
[root@stdb01 ~]# sudo systemctl restart postgresql
[root@stdb01 ~]#
[root@stdb01 ~]# systemctl enable postgresql && systemctl start postgresql && systemctl status postgresql
● postgresql.service - PostgreSQL database server
Loaded: loaded (/usr/lib/systemd/system/postgresql.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2020-09-08 17:34:22 UTC; 53ms ago
Main PID: 366 (postgres)
CGroup: /docker/425c146ad3e0723033f009565c1b6fa56e269d891e24fd05a958fc962b9a399e/system.slice/postgresql.service
├─366 /usr/bin/postgres -D /var/lib/pgsql/data -p 5432
├─367 postgres: logger process
├─369 postgres: checkpointer process
├─370 postgres: writer process
├─371 postgres: wal writer process
├─372 postgres: autovacuum launcher process
└─373 postgres: stats collector process
Sep 08 17:34:20 stdb01 systemd[1]: Starting PostgreSQL database server...
Sep 08 17:34:21 stdb01 pg_ctl[364]: LOG: could not bind IPv6 socket: Cannot assign reques...ess
Sep 08 17:34:21 stdb01 pg_ctl[364]: HINT: Is another postmaster already running on port 5...ry.
Sep 08 17:34:22 stdb01 systemd[1]: Started PostgreSQL database server.
Hint: Some lines were ellipsized, use -l to show in full.
[root@stdb01 ~]#
[root@stdb01 ~]#
[root@stdb01 ~]# psql -U kodekloud_rin -d kodekloud_db9 -h 127.0.0.1 -W
Password for user kodekloud_rin:
psql (9.2.24)
Type "help" for help.
kodekloud_db9=> \l
List of databases
Name | Owner | Encoding | Collate | Ctype | Access privileges
---------------+---------------+----------+-------------+-------------+-------------------------
--------
kodekloud_db9 | kodekloud_rin | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =Tc/kodekloud_rin
+
| | | | | kodekloud_rin=CTc/kodekl
oud_rin
postgres | postgres | UTF8 | en_US.UTF-8 | en_US.UTF-8 |
template0 | postgres | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/postgres
+
| | | | | postgres=CTc/postgres
template1 | postgres | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/postgres
+
| | | | | postgres=CTc/postgres
(4 rows)
kodekloud_db9=> ^Z
[2]+ Stopped psql -U kodekloud_rin -d kodekloud_db9 -h 127.0.0.1 -W
[root@stdb01 ~]#
[root@stdb01 ~]#
[root@stdb01 ~]# psql -U kodekloud_rin -d kodekloud_db9 -h localhost -W
Password for user kodekloud_rin:
psql (9.2.24)
Type "help" for help.
kodekloud_db9=>
kodekloud_db9=> \l
List of databases
Name | Owner | Encoding | Collate | Ctype | Access privileges
---------------+---------------+----------+-------------+-------------+-------------------------
--------
kodekloud_db9 | kodekloud_rin | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =Tc/kodekloud_rin
+
| | | | | kodekloud_rin=CTc/kodekl
oud_rin
postgres | postgres | UTF8 | en_US.UTF-8 | en_US.UTF-8 |
template0 | postgres | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/postgres
+
| | | | | postgres=CTc/postgres
template1 | postgres | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/postgres
+
| | | | | postgres=CTc/postgres
(4 rows)
kodekloud_db9=>
kodekloud_db9=> Connection to host01 closed by remote host.
Connection to host01 closed.
The environment has expired.
Please refresh to get a new environment.
| true
|
4c765c465fd34e0bd12936097b01c7fee6a273a5
|
Shell
|
parkingvarsson/Recombination_rate_variation
|
/4-methylation/1_gunzip.sh
|
UTF-8
| 417
| 3
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash -l
#SBATCH -p core
#SBATCH -n 2
#SBATCH -t 01:00:00
##### This script contains a loop for unzipping the bismark cov -files #####
for folder in path/to/bismark/results/SwAsp*
do
echo $folder
for file in $folder/new/*bismark.cov.gz
do
echo $file
unzipped=${file##*/}
named=${unzipped%%.gz}
gunzip -c $file > $unzipped
mv $unzipped $named
done
done
| true
|
9fee3e5c61a20ea5e8b5d14603df00e0f38fbf01
|
Shell
|
nodenstuff/http-auth
|
/gendocs
|
UTF-8
| 447
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Remove current documents.
rm -r docs/*
# List of js files.
FILES=`find lib -name '*.js' | tr '\n' ' '`
TESTFILES=`find tests -name '*.js' | tr '\n' ' '`
# Generating documents.
dox --title "http-auth" --desc "Node.js package for HTTP basic and digest access authentication." $FILES > docs/index.html
dox --title "http-auth tests" --desc "Node.js package for HTTP basic and digest access authentication." $TESTFILES > docs/tests.html
| true
|
f834550532cfbf16871174d719233d4a9d2c167d
|
Shell
|
resmo/voyage-linux
|
/voyage-live/config/hooks/031-gen_passwd.chroot
|
UTF-8
| 232
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "($0)"
echo -n "Generating root password ... "
sed -i -e 's/^root::/root:$1$hsP9n7K4$s3iE.gIlAKTmTykPH3Byt1:/' /etc/shadow
sed -i -e 's/^root:\*:/root:$1$hsP9n7K4$s3iE.gIlAKTmTykPH3Byt1:/' /etc/shadow
echo " Done"
| true
|
3b77f51d902820b0309209bd9b40ec3363700429
|
Shell
|
LeWaCode/lewa_vendor_lewa
|
/prebuilt/common/etc/init.d/geno
|
UTF-8
| 4,356
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/system/bin/sh
# By Genokolar 2011/02/07
mount -o remount rw /system
# log.txt auto delete
logsize=`busybox du -k /system/log.txt |busybox cut -f1`
if [ $logsize -gt 50 ]
then
busybox rm -f /system/log.txt
fi
# start log
echo "============================================" >> /system/log.txt
echo `busybox date +%F" "%T` Boot phone ... >> /system/log.txt
# permission
chmod 777 /system/misc/1-app2sd.sh
chmod 777 /system/misc/2-data2ext.sh
# read conf
if [ -e /system/etc/enhanced.conf ]
then
SDEXT=`busybox grep SDEXT /system/etc/enhanced.conf |busybox cut -d= -f2 `
SDSWAP=`busybox grep SDSWAP /system/etc/enhanced.conf |busybox cut -d= -f2 `
else
SDEXT="mmcblk0p2"
SDSWAP="mmcblk0p3"
fi
#first mount app2sd
if [ -e /dev/block/$SDEXT -a ! -e /system/etc/.nomount ]
then
mount -t ext4 /dev/block/$SDEXT /sd-ext
if [ -d /system/etc/app2sd-on -a -s /sd-ext ]
then
busybox rm -rf /data/app
busybox ln -s /sd-ext/app /data/app
echo Open APP2SD ext4 success... >> /system/log.txt
busybox mv /system/etc/app2sd-on /system/etc/app2sd-run
fi
fi
# compactible with ext3,by george,2011-11-01
if [ -e /dev/block/$SDEXT -a ! -e /system/etc/.nomount ]
then
mount -t ext3 /dev/block/$SDEXT /sd-ext
if [ -d /system/etc/app2sd-on -a -s /sd-ext ]
then
busybox rm -rf /data/app
busybox ln -s /sd-ext/app /data/app
echo Open APP2SD ext3 success... >> /system/log.txt
busybox mv /system/etc/app2sd-on /system/etc/app2sd-run
fi
fi
# first mount data2ext
if [ -d /system/etc/data2ext-on -a -s /sd-ext ]
then
busybox rm -rf /data/data
busybox ln -s /sd-ext/data /data/data
busybox rm -rf /data/system
busybox ln -s /sd-ext/system /data/system
echo Open DATA2EXT success... >> /system/log.txt
busybox mv /system/etc/data2ext-on /system/etc/data2ext-run
fi
# app2sd run of false
if [ -d /system/etc/app2sd-run ]
then
if [ ! -s /sd-ext ]
then
busybox rm /data/app
busybox mkdir /data/app
if [ ! -d /system/etc/data2ext-run ]
then
busybox rm -rf /data/databak
busybox mv /data/data /data/databak
busybox mkdir /data/data
busybox rm -rf /data/systembak
busybox mv /data/system /data/systembak
busybox mkdir /data/system
echo SDcard \mount flase... >> /system/log.txt
else
busybox mkdir /data/databak
busybox mkdir /data/systembak
fi
busybox mv /system/etc/app2sd-run /system/etc/app2sd-false
else
echo APP2SD is OK... >> /system/log.txt
fi
fi
# data2ext run of false
if [ -d /system/etc/data2ext-run ]
then
if [ ! -s /sd-ext ]
then
busybox rm -rf /data/data
busybox mkdir /data/data
busybox rm -rf /data/system
busybox mkdir /data/system
echo SDcard \mount flase... >> /system/log.txt
busybox mv /system/etc/data2ext-run /system/etc/data2ext-false
else
echo DATA2EXT is OK... >> /system/log.txt
fi
fi
# app2sd off
if [ -d /system/etc/app2sd-off ]
then
if [ -e /dev/block/$SDEXT ]
then
busybox rm /data/app
busybox mv /data/appbak /data/app
echo Close APP2SD success... >> /system/log.txt
busybox rm -rf /system/etc/app2sd-off
fi
fi
# data2ext off
if [ -d /system/etc/data2ext-off ]
then
if [ -e /dev/block/$SDEXT ]
then
busybox rm /data/data
busybox mv /data/databak /data/data
busybox rm /data/system
busybox mv /data/systembak /data/system
echo Close DATA2EXT success... >> /system/log.txt
busybox rm -rf /system/etc/data2ext-off
fi
fi
# app2sd retry
if [ -d /system/etc/app2sd-retry ]
then
if [ -s /sd-ext ]
then
if [ ! -d /system/etc/data2ext-run ]
then
busybox rm -rf /data/data
busybox mv /data/databak /data/data
busybox rm -rf /data/system
busybox mv /data/systembak /data/system
fi
busybox rm -rf /data/app
busybox ln -s /sd-ext/app /data/app
echo Retry APP2SD success... >> /system/log.txt
busybox mv /system/etc/app2sd-retry /system/etc/app2sd-run
fi
fi
# data2ext retry
if [ -d /system/etc/data2ext-retry -a -s /sd-ext ]
then
if [ -e /dev/block/$SDEXT ]
then
busybox rm -rf /data/data
busybox ln -s /sd-ext/data /data/data
busybox rm -rf /data/system
busybox ln -s /sd-ext/system /data/system
echo Retry Data2EXT success... >> /system/log.txt
busybox mv /system/etc/data2ext-retry /system/etc/data2ext-run
fi
fi
echo "============================================" >> /system/log.txt
mount -o remount,ro /system
exit
| true
|
a88c5195d92a55a78e0352039bd5b053e25c907e
|
Shell
|
Jerovich/bash-scripts
|
/upg.sh
|
UTF-8
| 803
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
clear
sudo apt update > /dev/null
list=`sudo apt list --upgradable | grep -v Listing`
IFS=$'\n'
for j in $(sudo apt list --upgradable | grep upgradable)
do
echo "$j"
done
if [ -z $j ]; then
echo All packages are up to date.; echo
exit 0
else
echo; echo 'Do you want to proceed? (y/n)'; echo
fi
i=0
while [ $i -lt 3 ]; do
read -s -n 1 option
if [ $option = y ]; then
sudo apt full-upgrade -y
sudo apt autoremove -y
echo; echo 'Finished'; echo
exit 0
elif [ $option = n ]; then
echo 'Cancelled'; echo
exit 0
elif [ $i -lt 2 ]; then
i=$(( $i + 1))
echo "Please enter \"y\" or \"n\""; echo
else
echo "It's not that difficult to enter \"y\" or \"n\"..."; echo
sleep 1.5
echo "isn't it?"; echo
sleep 1.5
exit 0
fi
done
| true
|
963198fa4d7a8ac4888018020d3a700932792608
|
Shell
|
KCP-quarantine-area/apps
|
/zinnia/PKGBUILD
|
UTF-8
| 584
| 2.8125
| 3
|
[] |
no_license
|
pkgname=zinnia
pkgver=0.06
pkgrel=1
pkgdesc="Simple and portable open source handwriting recognition system based on Support Vector Machines"
arch=('x86_64')
url="http://zinnia.sourceforge.net/"
license=('BSD')
makedepends=('libtool')
source=("http://sourceforge.net/projects/zinnia/files/zinnia/$pkgver/$pkgname-$pkgver.tar.gz")
md5sums=('5ed6213e2b879465783087a0cf6d5fa0')
build() {
cd $pkgname-$pkgver
./configure --prefix=/usr
make
}
package() {
cd $pkgname-$pkgver
make DESTDIR=$pkgdir install
install -Dm644 COPYING $pkgdir/usr/share/licenses/$pkgname/LICENSE
}
| true
|
de9c1301d9ef2ea27e0f128d7376d2083bc4990a
|
Shell
|
sebablasko/Test_DifferentAffinityThreadsBySocket
|
/run.sh
|
UTF-8
| 2,773
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
MAX_PACKS=1000000
repetitions=60
num_port=1820
threads="1 2 4 8 16 24 36 48 64 128"
num_clients=4
echo "Compilando..."
make all
echo "Done"
#Sin processor Affinity
salida=SinProcessorAffinity
for num_threads in $threads
do
echo "evaluando "$num_threads" threads, "$salida
linea="$num_threads,";
for ((i=1 ; $i<=$repetitions ; i++))
{
./runTest.sh $num_clients --packets $MAX_PACKS --port $num_port --threads $num_threads
linea="$linea$(cat aux)"
rm aux
}
echo "$linea" >> $salida".csv"
done
#Con processor Affinity equitative
salida=EquitativeAffinity
for num_threads in $threads
do
echo "evaluando "$num_threads" threads, "$salida
linea="$num_threads,";
for ((i=1 ; $i<=$repetitions ; i++))
{
./runTest.sh $num_clients --packets $MAX_PACKS --port $num_port --threads $num_threads --scheduler equitativeSched
linea="$linea$(cat aux)"
rm aux
}
echo "$linea" >> $salida".csv"
done
#Con processor Affinity dummy
salida=DummyAffinity
for num_threads in $threads
do
echo "evaluando "$num_threads" threads, "$salida
linea="$num_threads,";
for ((i=1 ; $i<=$repetitions ; i++))
{
./runTest.sh $num_clients --packets $MAX_PACKS --port $num_port --threads $num_threads --scheduler dummySched
linea="$linea$(cat aux)"
rm aux
}
echo "$linea" >> $salida".csv"
done
#Con processor Affinity pair
salida=PairAffinity
for num_threads in $threads
do
echo "evaluando "$num_threads" threads, "$salida
linea="$num_threads,";
for ((i=1 ; $i<=$repetitions ; i++))
{
./runTest.sh $num_clients --packets $MAX_PACKS --port $num_port --threads $num_threads --scheduler pairSched
linea="$linea$(cat aux)"
rm aux
}
echo "$linea" >> $salida".csv"
done
#Con processor Affinity impair
salida=ImpairAffinity
for num_threads in $threads
do
echo "evaluando "$num_threads" threads, "$salida
linea="$num_threads,";
for ((i=1 ; $i<=$repetitions ; i++))
{
./runTest.sh $num_clients --packets $MAX_PACKS --port $num_port --threads $num_threads --scheduler impairSched
linea="$linea$(cat aux)"
rm aux
}
echo "$linea" >> $salida".csv"
done
#Con processor Affinity Numa pair
salida=NumaPairAffinity
for num_threads in $threads
do
echo "evaluando "$num_threads" threads, "$salida
linea="$num_threads,";
for ((i=1 ; $i<=$repetitions ; i++))
{
./runTest.sh $num_clients --packets $MAX_PACKS --port $num_port --threads $num_threads --scheduler numaPairSched
linea="$linea$(cat aux)"
rm aux
}
echo "$linea" >> $salida".csv"
done
make clean
echo "Done"
#Compilar los resultados en un sólo csv para simplicidad
echo "" > Resumen_afinidad.csv
for filename in *Affinity.csv; do
echo $filename >> Resumen_afinidad.csv
cat $filename >> Resumen_afinidad.csv
echo "" >> Resumen_afinidad.csv
done
| true
|
af5297845dcc55e0fc4af49e296f770b13d2e8b5
|
Shell
|
derekbrokeit/dotfiles
|
/unity/profile.sh
|
UTF-8
| 3,480
| 2.953125
| 3
|
[] |
no_license
|
#export EDITOR="$HOME/bin/tmvim"
export EDITOR=nvim
if os_is_linux ; then
export BROWSER=google-chrome-stable
else
export BROWSER=v3m
fi
export PAGER="nvimpager"
export MANPAGER="$PAGER"
alias less=$PAGER
ccat() {
# local cols=$(tput cols)
local cols=20
printf "${CYAN}%${cols}s${NC}\n" | tr " " -
vimcat -c "hi Normal ctermbg=NONE" $@
printf "${CYAN}%${cols}s${NC}\n" | tr " " -
}
# export GNUTERM=dumb
# export VMAIL_HTML_PART_READER="w3m -dump -o display_link_number=1 "
# export VMAIL_VIM=mvim
# vim temporary directory for swap files
export EDITOR_TMP="${HOME}/.${EDITOR}-tmp"
# # tmux files
# export TMUX_CONF="$HOME/.tmux.conf"
# export TMUX_CONF_NEST="${TMUX_CONF}.nested"
# export TMUX_CONF_TMWIN="${TMUX_CONF}.tmwin"
# export TMUX_CONF_MINI="${TMUX_CONF}.mini"
# # virtualenvwrapper
# export VIRTUALENVWRAPPER_PYTHON=$(command -v python)
# python startup file
export PYTHONSTARTUP=$HOME/.pythonrc.py
# language variables
# some systems throw an error when using locale, so throw errors to null
export LANG="$(locale -a 2> /dev/null | egrep 'en_US.*(utf|UTF)')"
if os_is_linux ; then
export LOCALE=UTF-8
else
export LC_ALL=$LANG
fi
# # grep coloring
# if os_is_osx ; then
# export GREP_OPTIONS='--color=auto'
# export GREP_COLOR='1;32'
# fi
# colorful man-pages
# Less Colors for Man Pages
export LESS="-R"
export LESS_TERMCAP_mb=$'\E[01;31m' # begin blinking
export LESS_TERMCAP_md=$'\E[01;38;5;74m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # end mode
export LESS_TERMCAP_se=$'\E[0m' # end standout-mode
export LESS_TERMCAP_so=$'\E[38;5;246m' # begin standout-mode - info box
export LESS_TERMCAP_ue=$'\E[0m' # end underline
export LESS_TERMCAP_us=$'\E[04;38;5;146m' # begin underline
export LS_OPTIONS="--color=auto"
if [ ! -s ~/.dir_colors ]; then
#setup ~/.dir_colors for ls if one does not exist
if is_avail dircolors ; then
dircolors -p > ~/.dir_colors
dircolors ~/.dir_colors
elif is_avail gdircolors ; then
gdircolors -p > ~/.dir_colors
gdircolors ~/.dir_colors
fi
fi
if [[ -d $HOME/Dropbox ]] ; then
export DROPBOX="$HOME/Dropbox"
fi
if is_avail lammps ; then
export LAMMPS_COMMAND=$(command -v lammps)
if is_avail brew ; then
export LAMMPS_POTS=$(brew --prefix )/share/lammps/potentials
export LAMMPS_DOCS=$(brew --prefix )/share/lammps/doc
export LAMMPS_LIB=$(brew --prefix )/lib/liblammps.so
else
export LAMMPS_POTS=$HOME/repo/lammps/potentials
export LAMMPS_DOCS=$HOME/repo/lammps/doc
export LAMMPS_LIB=/usr/local/lib/liblammps.so
fi
fi
export PS1="\[$(printf $YELLOW_BRIGHT)\]${HOSTNAME:0:1} > \[$(printf $NC)\]"
export SSHFS_DIR="$HOME/sshfs"
if os_is_linux ; then
# make sure that git doesn't throw errors on https:// sites
# export GIT_SSL_NO_VERIFY=true
export HOMEBREW_CACHE=$HOME/.hb_cache
fi
# export ParaView_DIR=/home/derek/OpenFOAM/ThirdParty-2.3.0/platforms/linux64Gcc/ParaView-4.1.0
# export PATH=$ParaView_DIR/bin:$PATH
# export PV_PLUGIN_PATH=$FOAM_LIBBIN/paraview-4.1
## Uncomment the following line to enable pretty prompt:
#export MOOSE_PROMPT=true
## Uncomment the following line to enable autojump:
#export MOOSE_JUMP=true
## Source the MOOSE profile if moose_profile exists:
# if [ -f /opt/moose/environments/moose_profile ]; then
# . $HOME/.moose_profile
# fi
export PYTHONBREAKPOINT=pudb.set_trace
| true
|
3e982e80d14e9a2c5db8f7c7685e4913e9d8ad49
|
Shell
|
antonioribeiro/dev-box
|
/roles/composer/templates/createPackage.sh
|
UTF-8
| 6,120
| 4.0625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Export those variables in your .bashrc to ease the deployment of new packages
#
# export DEFAULT_VCS_USER=antonioribeiro
# export DEFAULT_VCS_SERVICE=github.com
# export DEFAULT_VENDOR_NAME=pragmarx
# export DEFAULT_SKELETON_NAME=skeleton
# export DEFAULT_SKELETON_REPOSITORY=https://github.com/antonioribeiro/skeleton.git
function main() {
clear
askForData
createPackage
}
function searchAndReplace()
{
find $1 -name "*" -exec sed -i "s/$2/$3/g" {} \; &> /dev/null
}
function createPackage()
{
git clone $SKELETON_REPOSITORY $DESTINATION_FOLDER
searchAndReplace $DESTINATION_FOLDER $SKELETON_NAME $PACKAGE_NAME
searchAndReplace $DESTINATION_FOLDER $SKELETON_NAME_CAPITAL $PACKAGE_NAME_CAPITAL
searchAndReplace $DESTINATION_FOLDER $SKELETON_VENDOR_NAME $VENDOR_NAME
searchAndReplace $DESTINATION_FOLDER $SKELETON_VENDOR_NAME_CAPITAL $VENDOR_NAME_CAPITAL
renameAll $DESTINATION_FOLDER $SKELETON_NAME $PACKAGE_NAME
renameAll $DESTINATION_FOLDER $SKELETON_NAME_CAPITAL $PACKAGE_NAME_CAPITAL
renameAll $DESTINATION_FOLDER $SKELETON_VENDOR_NAME $VENDOR_NAME
renameAll $DESTINATION_FOLDER $SKELETON_VENDOR_NAME_CAPITAL $VENDOR_NAME_CAPITAL
mv $DESTINATION_FOLDER/src/$SKELETON_NAME_CAPITAL.php $DESTINATION_FOLDER/src/$PACKAGE_NAME_CAPITAL.php
rm -rf $DESTINATION_FOLDER/.git/
cd $DESTINATION_FOLDER
git init
git add -A
git commit -m "first commit"
if [[ "$PACKAGE_REPOSITORY" != "" ]]; then
git remote add origin $PACKAGE_REPOSITORY
git push origin master
displayInstructions
fi
}
function askForData()
{
DESTINATION_FOLDER=/var/www/\<name\>
inquireText "Package destination folder:" $DESTINATION_FOLDER
DESTINATION_FOLDER=$answer
if [[ -d $DESTINATION_FOLDER ]]; then
message
message "Destination folder already exists. Aborting..."
message
exit 1
fi
message
VENDOR_NAME=`echo $DEFAULT_VENDOR_NAME | awk '{print tolower($0)}'`
inquireText "Vendor name (lowercase):" $VENDOR_NAME
VENDOR_NAME=$answer
VENDOR_NAME_CAPITAL=${VENDOR_NAME^}
inquireText "Vendor name (Capitalized):" $VENDOR_NAME_CAPITAL
VENDOR_NAME_CAPITAL=$answer
message
PACKAGE_NAME=`echo $(basename $DESTINATION_FOLDER) | awk '{print tolower($0)}'`
inquireText "Your new package name (lowercase):" $PACKAGE_NAME
PACKAGE_NAME=$answer
PACKAGE_NAME_CAPITAL=${PACKAGE_NAME^}
inquireText "Your new package name (Capitalized):" $PACKAGE_NAME_CAPITAL
PACKAGE_NAME_CAPITAL=$answer
if [[ "$DEFAULT_VCS_USER" != "" ]]; then
VCS_USER=$DEFAULT_VCS_USER
else
VCS_USER=$VENDOR_NAME
fi
PACKAGE_REPOSITORY=https://$DEFAULT_VCS_SERVICE/$VCS_USER/$PACKAGE_NAME.git
inquireText "Your new package repository link (create a package first or leave it blank):" $PACKAGE_REPOSITORY
PACKAGE_REPOSITORY=$answer
message
SKELETON_VENDOR_NAME=`echo $VENDOR_NAME | awk '{print tolower($0)}'`
inquireText "Skeleton Vendor name (lowercase):" $SKELETON_VENDOR_NAME
SKELETON_VENDOR_NAME=$answer
SKELETON_VENDOR_NAME_CAPITAL=${SKELETON_VENDOR_NAME^}
inquireText "Skeleton Vendor name (Capitalized):" $SKELETON_VENDOR_NAME_CAPITAL
SKELETON_VENDOR_NAME_CAPITAL=$answer
message
SKELETON_NAME=`echo $(basename $DEFAULT_SKELETON_NAME) | awk '{print tolower($0)}'`
inquireText "Skeleton package name (lowercase):" $SKELETON_NAME
SKELETON_NAME=$answer
SKELETON_NAME_CAPITAL=${SKELETON_NAME^}
inquireText "Skeleton package name (Capitalized):" $SKELETON_NAME_CAPITAL
SKELETON_NAME_CAPITAL=$answer
if [[ "$DEFAULT_SKELETON_REPOSITORY" != "" ]]; then
SKELETON_REPOSITORY=$DEFAULT_SKELETON_REPOSITORY
else
SKELETON_REPOSITORY=https://$DEFAULT_VCS_SERVICE/$SKELETON_VENDOR_NAME/$SKELETON_NAME.git
fi
inquireText "Skeleton repository link:" $SKELETON_REPOSITORY
SKELETON_REPOSITORY=$answer
}
function renameAll()
{
renameFiles $1 $2 $3
renameDirectories $1 $2 $3
}
function renameFiles()
{
FOLDER=$1
OLD=$2
NEW=$3
find $FOLDER -type f -print0 | while IFS= read -r -d $'\0' file; do
dir=$(dirname $file)
filename=$(basename $file)
new=$dir/$(echo $filename | sed -e "s/$OLD/$NEW/g")
if [[ "$file" != "$new" ]]; then
if [ -f $file ]; then
mv $file $new
fi
fi
done
}
function renameDirectories()
{
FOLDER=$1
OLD=$2
NEW=$3
find $FOLDER -type d -print0 | while IFS= read -r -d $'\0' file; do
new=$(echo $file | sed -e "s/$OLD/$NEW/g")
if [[ "$file" != "$new" ]]; then
if [ -d $file ]; then
mv $file $new
fi
fi
done
}
function inquireText() {
answer=""
value=$2
if [[ "$value" == "Pragmarx" ]]; then
value=PragmaRX
fi
if [[ $BASH_VERSION > '3.9' ]]; then
read -e -p "$1 " -i "$value" answer
else
read -e -p "$1 [hit enter for $value] " answer
fi
}
function displayInstructions()
{
echo
echo
echo "Now open one of your applications composer.json and add those items to their proper sections:"
echo
echo "\"require\": {"
echo " \"$VENDOR_NAME/$PACKAGE_NAME\": \"dev-master\","
echo "},"
echo
echo
echo "and"
echo
echo
echo "\"repositories\": ["
echo " {"
echo " \"type\": \"vcs\","
echo " \"url\": \"$PACKAGE_REPOSITORY\""
echo " },"
echo "],"
echo
echo
echo "If this is a Laravel project, don't forget to add a ServiceProvider in your app/config/app.php too, something like:"
echo
echo " '$VENDOR_NAME_CAPITAL\\$PACKAGE_NAME_CAPITAL\\Vendor\\Laravel\\ServiceProvider'",
echo
echo
}
function message() {
if [ "$1" != "" ]; then
command="echo $@"
${command}
else
echo
fi
}
main $@
| true
|
28691a700603c1e00e390abc5d8f3a5c9b88f776
|
Shell
|
weltonwe/hostinglimit
|
/bin/cpulimit
|
UTF-8
| 1,735
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Script to start CPU limit daemon
#
set -e
case “$1” in
start)
if [ $(ps -eo pid,args | gawk ‘$3==”/usr/bin/cpulimit_daemon.sh” {print $1}’ | wc -l) -eq 0 ]; then
nohup /usr/bin/cpulimit_daemon.sh >/dev/null 2>&1 &
ps -eo pid,args | gawk ‘$3==”/usr/bin/cpulimit_daemon.sh” {print}’ | wc -l | gawk ‘{ if ($1 == 1) print ” * cpulimit daemon started successfully”; else print ” * cpulimit daemon can not be started” }’
else
echo ” * cpulimit daemon can’t be started, because it is already running”
fi
;;
stop)
CPULIMIT_DAEMON=$(ps -eo pid,args | gawk ‘$3==”/usr/bin/cpulimit_daemon.sh” {print $1}’ | wc -l)
CPULIMIT_INSTANCE=$(ps -eo pid,args | gawk ‘$2==”cpulimit” {print $1}’ | wc -l)
CPULIMIT_ALL=$((CPULIMIT_DAEMON + CPULIMIT_INSTANCE))
if [ $CPULIMIT_ALL -gt 0 ]; then
if [ $CPULIMIT_DAEMON -gt 0 ]; then
ps -eo pid,args | gawk ‘$3==”/usr/bin/cpulimit_daemon.sh” {print $1}’ | xargs kill -9 # kill cpulimit daemon
fi
if [ $CPULIMIT_INSTANCE -gt 0 ]; then
ps -eo pid,args | gawk ‘$2==”cpulimit” {print $1}’ | xargs kill -9 # release cpulimited process to normal priority
fi
ps -eo pid,args | gawk ‘$3==”/usr/bin/cpulimit_daemon.sh” {print}’ | wc -l | gawk ‘{ if ($1 == 1) print ” * cpulimit daemon can not be stopped”; else print ” * cpulimit daemon stopped successfully” }’
else
echo ” * cpulimit daemon can’t be stopped, because it is not running”
fi
;;
restart)
$0 stop
sleep 3
$0 start
;;
status)
ps -eo pid,args | gawk ‘$3==”/usr/bin/cpulimit_daemon.sh” {print}’ | wc -l | gawk ‘{ if ($1 == 1) print ” * cpulimit daemon is running”; else print ” * cpulimit daemon is not running” }’
;;
esac
exit 0
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.