blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1f0eb864ce430f0b308c40dd998ba6bf8c19efc0 | Shell | joshbarrass/docker-openttd-server | /start-server.sh | UTF-8 | 486 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ ! -d "/home/openttd/.openttd/baseset" ]; then
mkdir -p /home/openttd/.openttd/baseset
fi
if [ ! "$(ls -A /home/openttd/.openttd/baseset)" ]; then
echo "Downloading OpenGFX $OPENGFX_VERSION ..."
mkdir -p ~/.openttd/baseset && wget -q -O /tmp/opengfx.zip https://cdn.openttd.org/opengfx-releases/$OPENGFX_VERSION/opengfx-$OPENGFX_VERSION-all.zip && unzip /tmp/opengfx.zip -d ~/.openttd/baseset && rm -f /tmp/opengfx.zip
fi
exec /usr/games/openttd -D0.0.0.0:3979
| true |
87c74352dcce32ba252b11729376c46aaf43be14 | Shell | quynguyen/shell-config | /zsh/zsh-antigen | UTF-8 | 275 | 3.234375 | 3 | [] | no_license | #!/bin/zsh
export ANTIGEN_HOME=$HOME/.antigen
export ANTIGEN_CMD=$ANTIGEN_HOME/antigen.zsh
[ -d $ANTIGEN_HOME ] || mkdir -v -p $ANTIGEN_HOME
[ -e $ANTIGEN_CMD ] || (curl -L git.io/antigen > $ANTIGEN_CMD \
&& echo "Antigen now installed $ANTIGEN_CMD")
source $ANTIGEN_CMD
| true |
e3e9929078858cfe2714c24c58f47d2b035f3536 | Shell | rhiller/pi-thermometer | /checkup | UTF-8 | 509 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# pick one if multiple default routes
defaultGateway=`ip route show | grep default | tail -1 | awk '{ print $3 }'`
ping -w 2 -i .5 -q $defaultGateway 2>&1 > /dev/null
res=$?
if [ $res -eq 0 ]
then
# logger "checkup: first try succeeded"
exit 0
fi
logger "checkup: first try failed...retrying"
ping -w 2 -i .5 -q $defaultGateway 2>&1 > /dev/null
res=$?
if [ $res -eq 0 ]
then
logger "checkup: second try succeeded"
exit 0
fi
logger "checkup: second try also failed...rebooting"
/sbin/reboot
| true |
522851055b9815fd5db91cb86d3d6fad85169a37 | Shell | brunotech/BotFramework-WebChat | /scripts/lerna_publish | UTF-8 | 630 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo //registry.npmjs.org/:_authToken=\${NPM_TOKEN} > ~/.npmrc
cp README.md packages/bundle
cp LICENSE packages/bundle
cp LICENSE packages/core
cp LICENSE packages/component
# If TRAVIS_TAG is present, it means this is going PRODUCTION
if [ -n "$TRAVIS_TAG" ]
then
cd packages/core
npm publish
cd ../component
npm publish
cd ../bundle
npm publish
cd ../..
fi
# If on "master" branch, deploy to "master" tag
if [ "$TRAVIS_BRANCH" = "master" ]
then
cd packages/core
npm publish --tag master
cd ../component
npm publish --tag master
cd ../bundle
npm publish --tag master
cd ../..
fi
| true |
e5f491a848ad60a9b13e29f241499cd31d94f95e | Shell | Fithos/tgp | /preprocesser/bin/preprocess.sh | UTF-8 | 1,321 | 3.421875 | 3 | [] | no_license | #!/bin/bash
source env-var.sh
if [ ! -d "$PREP_FILE_PATH" ]; then
mkdir $PREP_FILE_PATH
fi
if [ "$(ls -A $PREP_FILE_PATH)" ]; then
if [ ! -z ${DEBUG+x} ]; then
echo "Preprocessed types have already be computed. If you wish to run the preprocesser, delete all files from $PREP_FILE_PATH"
fi
exit 0
fi
rm -rf $PREPROCESSER_OUTPUT_PATH/*
echo "Preprocessing tasks (init)...."
$PREPROCESSER_SCRIPT_PATH/do_preprocess.sh task-init
sleep 2
echo "=================="
echo "Preprocessing runnable (init)...."
$PREPROCESSER_SCRIPT_PATH/do_preprocess.sh runnable-init
sleep 2
echo "=================="
echo "Preprocessing callable (init)...."
$PREPROCESSER_SCRIPT_PATH/do_preprocess.sh callable-init
sleep 2
echo "=================="
echo "Preprocessing forkjointask (init)...."
$PREPROCESSER_SCRIPT_PATH/do_preprocess.sh forkjointask-init
sleep 2
echo "=================="
echo "Preprocessing tasks (exec)...."
$PREPROCESSER_SCRIPT_PATH/do_preprocess.sh task-exec
sleep 2
echo "=================="
echo "Preprocessing executors (submit)...."
$PREPROCESSER_SCRIPT_PATH/do_preprocess.sh executor-submit
sleep 2
echo "=================="
if [ "$(ls -A $PREPROCESSER_OUTPUT_PATH)" ]; then
cp -r $PREPROCESSER_OUTPUT_PATH/* $PREP_FILE_PATH
echo "Preprocessed type hierarchy added to $PREP_FILE_PATH"
fi
| true |
6f7ebc57ecb0d115ac3632e961a50da317bf9225 | Shell | billxiong24/misc_scripts | /add_lvm_space.sh | UTF-8 | 528 | 3.53125 | 4 | [] | no_license | #!/bin/bash
# RUN AS ROOT
# Sometimes run out of room in root partition.
# Shrinks space from fedora-home and adds that space to root partition
if [ $# -lt 1 ]; then
echo "Input amount of space to add to root from home partition. (e.g. 10G for 10 GB)"
exit
fi
echo "BEFORE"
lvscan
echo "-----------------------------------------------------------"
lvreduce -L -$1 /dev/mapper/fedora-home
lvextend -l +100%FREE /dev/fedora/root
echo "AFTER"
echo "-----------------------------------------------------------"
lvscan
| true |
37e6024e95d583f0f1c7174bcc414f10aeed16ab | Shell | dslm4515/BMLFS | /build-scripts/rclone-browser.build | UTF-8 | 1,467 | 3.328125 | 3 | [] | no_license | #! /bin/bash
# rclone-browser
# Source: https://github.com/kapitainsky/RcloneBrowser/archive/refs/tags/1.8.0.tar.gz
#
# $BUILD = Directory to temporarily install
# $PKGS = Directory to store built packages
#
# DEPS
# Required: rclone, qt5
# Recommended: NONE
# Optional: NONE
CFLAGS="-w" CXXFLAGS="-w" cmake -B OUT \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=MinSizeRel &&
read -p "Compile?" && make -j2 -C OUT &&
sudo -S make DESTDIR=$BUILD -C OUT install &&
cd $BUILD && sudo -S mkdir -v ${BUILD}/install &&
cat > /tmp/slack-desc << "EOF"
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':' except on otherwise blank lines.
|-----handy-ruler------------------------------------------------------|
rclone-browser: rclone-browser
rclone-browser:
rclone-browser: Simple cross platform GUI for rclone. Supports macOS,
rclone-browser: GNU/Linux, BSD family and Windows.
rclone-browser:
rclone-browser: github.com/kapitainsky/RcloneBrowser
rclone-browser:
EOF
sudo -S mv -v /tmp/slack-desc install/ &&
sudo -S makepkg -l y -c n $PKGS/rclone-browser-1.8.0-$(uname -m)-mlfs.txz &&
sudo -S rm -rf ${BUILD}/*
| true |
0cfda79b8d5ac9967eb6d00a9357bab0517ae855 | Shell | petronny/aur3-mirror | /mate-menu-git/PKGBUILD | UTF-8 | 881 | 2.59375 | 3 | [] | no_license | # Maintainer: John Jenkins twodopeshaggy@gmail.com
pkgname=mate-menu-git
pkgver=r453.3de5f14
pkgrel=1
pkgdesc="Mate Menu fork of MintMenu"
arch=('any')
url="https://bitbucket.org/ubuntu-mate/mate-menu/"
license=('GPL2')
makedepends=('git')
depends=('python2' 'python2-xlib' 'mate-applets' 'python2-distutils-extra' 'python2-setuptools')
source=('git+https://bitbucket.org/ubuntu-mate/mate-menu.git')
sha256sums=('SKIP')
pkgver() {
cd "$srcdir/mate-menu"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
prepare() {
cd "$srcdir/mate-menu"
sed 's/python/python2/g' -i lib/mate-menu.py
sed 's/python/python2/g' -i lib/mate-menu-config.py
}
package() {
cd "$srcdir/mate-menu"
python2 setup.py install --root="$pkgdir/" --optimize=1
mkdir -p $pkgdir/usr/share/licenses/$pkgname
install -m 0644 COPYING $pkgdir/usr/share/licenses/$pkgname/COPYING
}
| true |
40c569d5263fc518a1f8fca3147eeb55fa4d3f85 | Shell | naqushab/teton | /bin/makedirs | UTF-8 | 287 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
while read -r line; do
dname="${line%/*}"
lname=$(echo "$dname" | cut -d'/' -f6-)
mkdir -p "$lname" || {
printf "error: unable to create '%s'.\n", "$lname" >&2
continue
}
echo "$line"
done < <(ls ~/Documents/projects/bison/assessment/LWR/validation/)
| true |
e03ed13a2cc6b209ded07786c14ce85eda364c37 | Shell | lambdasawa/dotfiles | /script/install/watchexec.sh | UTF-8 | 392 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if ! which watchexec >/dev/null 2>&1; then
[ "$(uname)" = Darwin ] && brew install watchexec
[ "$(uname)" = Linux ] &&
pushd "$PWD" &&
cd "$(mktemp -d)" &&
curl -sSLO https://github.com/watchexec/watchexec/releases/download/cli-v1.17.1/watchexec-1.17.1-x86_64-unknown-linux-gnu.deb &&
sudo dpkg -i watchexec-1.17.1-x86_64-unknown-linux-gnu.deb &&
popd
fi
| true |
a0fa6ddc10f146d5e5104ac80304d8c52daba749 | Shell | flatiron-labs/environmentalizer | /shared/chrome/check.sh | UTF-8 | 105 | 2.59375 | 3 | [] | no_license | #!/bin/bash
result=$(mdfind "kind:app chrome")
if [[ -n $result ]]; then
echo '1'
else
echo '0'
fi
| true |
0c5b06483f1feb05ff680bd3e7e878a19a1de800 | Shell | marmotcai/gather | /tools/docker-port-del.sh | UTF-8 | 350 | 3.46875 | 3 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ];then
echo "The argument must be 1: xx host_port"
exit;
else
echo "begin..."
fi
host_port=${1}
num=$(iptables -t nat -nL DOCKER --line-number | grep dpt:${host_port} | awk '{print $1 }')
iptables_cmd='iptables -t nat -D DOCKER '${num}
echo ${iptables_cmd}
${iptables_cmd}
iptables -t nat -nL --line-number
| true |
378bca9092634024da1cf473d26451a1ab906eeb | Shell | naps62/unix | /files/shell/bash/ps1 | UTF-8 | 1,436 | 3.671875 | 4 | [] | no_license | #!/bin/bash
function _git_prompt() {
git branch &>/dev/null || return 1;
local git_status="`git status -unormal 2>&1`"
if ! [[ "$git_status" =~ Not\ a\ git\ repo ]]; then
if [[ "$git_status" =~ nothing\ to\ commit ]]; then
local ansi=32
elif [[ "$git_status" =~ Changes\ not\ staged\ for\ commit ]]; then
local ansi=33
elif [[ "$git_status" =~ nothing\ added\ to\ commit\ but\ untracked\ files\ present ]]; then
local ansi=33
elif [[ "$(__git_ps1)" =~ MERGING ]]; then # if there are conflicts
local ansi=31
else
local ansi=33
fi
echo -n "\[\033[01;"$ansi"m\]"
fi
}
PS1_COLOR_NAME="\[\e[01;34m\]"
PS1_COLOR_DIR="\[\e[01;34m\]"
PS1_COLOR_PROMPTUSER="\[\e[01;32m\]"
PS1_COLOR_PROMPTROOT="\[\e[01;31m\]"
PS1_COLOR_CMD="\[\e[00m\]"
PS1_COLOR_GIT="\[\033[01;33m\]"
PS1_NAME="\u"
PS1_DIR="\W"
PS1_GIT="\$(__git_ps1)"
PS1_PROMPT_USER="\$ "
PS1_PROMPT_ROOT="# "
PS1_COLOR_GIT="`_git_prompt`"
#
# bash prompt
#
if [ `whoami` == "root" ]; then
PS1_COLOR_PROMPT=$PS1_COLOR_PROMPTROOT
PS1_PROMPT=$PS1_PROMPT_ROOT
else
PS1_COLOR_PROMPT=$PS1_COLOR_PROMPTUSER
PS1_PROMPT=$PS1_PROMPT_USER
fi
# show change signs in prompt
export GIT_SHOWDIRTYSTATE=1
function _prompt_command() {
PS1='${debian_chroot:+($debian_chroot)}'
PS1="${PS1} ${PS1_COLOR_DIR}${PS1_DIR}`_git_prompt`${PS1_GIT} ${PS1_COLOR_PROMPT}${PS1_PROMPT}${PS1_COLOR_CMD}"
}
PROMPT_COMMAND=_prompt_command
| true |
824beaf9c66dea911ce903d7cf369ae3b9e76dbb | Shell | guimier/secure-enum-converter | /tools/run_cf.sh | UTF-8 | 247 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash -e
# shellcheck disable=SC1111
declare -r desc="Compilation failure “$1”"
shift
declare -r out="$1"
shift
declare -ra cmd=("$@")
if "${cmd[@]}" 2> "$out"; then
echo "FAILED: $desc"
exit 1
else
echo "PASSED: $desc"
fi
| true |
b52306f20e1e7ecf8491cd49519107b231408e8c | Shell | RehmatFalcon/bash-helpers | /git-runner.sh | UTF-8 | 257 | 3.234375 | 3 | [] | no_license | #!/bin/bash
echo "Enter git command : "
read gitCmd
for i in ./*;
do
if [[ -d $i ]]; then
cd $i
echo "************************************"
echo "Running command for $i"
echo "************************************"
$gitCmd
cd ..
fi;
done
| true |
ab644e5bf91b729756d8ee33d2067d2b27a738ce | Shell | mitch000001/dotvim | /update | UTF-8 | 261 | 3.0625 | 3 | [] | no_license | #!/bin/bash
cd ~/.vim
git fetch -q
remote_latest_ref=$(git log --pretty=format:%H -n 1 origin)
local_latest_ref=$(git log --pretty=format:%H -n 1)
if [ "$remote_latest_ref" != "$local_latest_ref" ]
then
echo 'Updating repository...'
git pull --rebase
fi
| true |
1b282997db8bb8e55db2d59952385c079c767af7 | Shell | RadicalZephyr/sage-tools | /maplogs.sh | UTF-8 | 293 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
while read -r
do
NAME=$(echo $(basename $REPLY) | cut -d '/' -f 7 | cut -d '.' -f 1)
DATE=$(echo $(basename $REPLY) | cut -d '/' -f 7 | cut -d '.' -f 2 | cut -d '-' -f1-3)
if [ $NAME = "repo-activity" ]
then
echo $NAME-$DATE$'\t'$REPLY
fi
done
| true |
45ad7b46214aa21def7e8b22c7db9383d296b924 | Shell | matix-io/django-heroicons | /release.sh | UTF-8 | 287 | 2.84375 | 3 | [] | no_license | #!/bin/bash
echo "Current version is $(cat VERSION.txt). What version did you want to release?"
read VERSION
echo $VERSION > VERSION.txt
python setup.py sdist
twine upload "dist/django_heroicons-$VERSION.tar.gz"
git add .
git commit -m "v$VERSION"
git tag -a "v$VERSION" -m "v$VERSION"
| true |
a1ce3eeaec169f9046b161eaa0753721093a424b | Shell | eSentire/qspm | /tools/setup.sh | UTF-8 | 2,632 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# This script sets up ones environment for rust development using
# Web Assembly.
#
set -e
# Utility Functions
function _info() {
printf '\x1b[34;1mINFO: %d: %s\x1b[0m\n' ${BASH_LINENO[0]} "$*"
}
function _err() {
printf '\x1b[31;1mERROR: %d: %s\x1b[0m\n' ${BASH_LINENO[0]} "$*"
exit 1
}
# Banner
printf '\x1b[34;1m'
cat <<EOF
# ========================================================================
#
# Setting up for rust Web Assembly Development.
#
# ========================================================================
EOF
printf '\x1b[0m'
# Install rust if it is not already installed.
if [ ! -f $HOME/.cargo/bin/rustup ] ; then
_info "installing rust"
# Avoid the prompt by specifying -y.
set -x
time curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
{ set +; } 2> /dev/null
else
_info "rust already installed"
fi
# Setup the environment.
source $HOME/.cargo/env
# Report the rust installation.
_info "rust installation"
cargo --version
rustc --version
rustup --version
# Install the wabt tools if they are not already installed.
# Note that cmake must exist.
if ! cmake --version 2>/dev/null ; then
_err "required tool not installed: cmake, cannot continue"
fi
# Building the tools requires cmake, make, a C compiler, etc.
if [ ! -f wabt/bin/wasm2wat ] ; then
_info "installing wabt"
set -x
git clone https://github.com/WebAssembly/wabt.git
cd wabt
git submodule update --init
mkdir build
cd build
cmake ..
cmake --build .
make
cd ../..
{ set +x; } 2> /dev/null
else
_info "wabt/bin/wasm2wat already installed"
fi
_info "verifying wabt setup"
export PATH="$PATH:$(pwd)/wabt/bin"
set -x
wasm2wat --version
{ set +x; } 2> /dev/null
# Update rust to include Web Assembly components.
_info "installing web assembly components"
set -x
time rustup update
time rustup component add rls rust-analysis rust-src
time rustup target add wasm32-unknown-unknown
time cargo install wasm-gc
time cargo install wasm-bindgen-cli
{ set +x; } 2> /dev/null
_info "done"
printf '\x1b[32;1m'
cat <<EOF
You can start to build Web Assembly into rust without npm.
Here is how you setup your environment:
\$ source \$HOME/.cargo/env
\$ export PATH="\$PATH:$(pwd)/wabt/bin"
Here is how you can test it:
\$ cargo --version
\$ wasm2wat --version
EOF
printf '\x1b[0m'
# Notes for ubuntu-18.04
# sudo apt-get update
# sudo apt-get install -y curl
# sudo apt-get install -y build-essentials
# sudo apt-get install -y libssl-dev pkg-config
# sudo apt-get install -y cmake make
| true |
49f79e8ad7643b0a5adcbe305f216c8251b97892 | Shell | shania3322/joeynmt | /github/preprocess/sockeye/code/cnn/fairseq/test.sh | UTF-8 | 566 | 2.765625 | 3 | [] | permissive | #!/bin/bash
. env.sh
# needed by fairseq to get the language pair
pytorch_data_dir=bin_data_$PAIR
test=${2:-$DATDIR/$PAIR/test.bpe.$SOURCE}
c=`cat early_stop.txt`
export CUDA_VISIBLE_DEVICES=0
python $FAIRSEQ/generate.py \
$pytorch_data_dir \
--path model/checkpoint$c.pt \
--batch-size 16 -i --beam 5 --gen-subset test \
| sed -u 's/\@\@ //g' | tee test/out \
| $MOSES/scripts/tokenizer/detokenizer.perl -q -l $TARGET > out.detok
# get BLEU
cat out.detok | sacrebleu -t wmt17 -l $PAIR | tee out.detok.bleu
| true |
89a21aa390f7757605f16a8cca9794625438cfbc | Shell | RonitNayak25/Shell-Scripts | /LabExperiments/3.sh | UTF-8 | 316 | 3.71875 | 4 | [] | no_license | # Write a shell script to which will accept a number & find out the summation of square of last 3 digits.
Num=123
g=$Num
s=0
while [ $Num -gt 0 ]
do
k=$(( $Num % 10 ))
p=`expr "$k" \* "$k" \* "$k" `
Num=$(( $Num / 10 ))
s=$(( $s + $p ))
done
echo "sum of cubes of digits of $g is : $s"
| true |
c494a5dbee973f644655b734b6ac3e7f3e9103d3 | Shell | idanjos/tciot | /run | UTF-8 | 433 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# run this script with sudo
# sudo ./run <path to your ssh keys>
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
if [ -n "$1" ]; then
echo "Path to private key: $1"
else
echo "The key parameter was not supplied."
exit 1
fi
trap 'pkill ssh' INT TERM ERR
trap 'pkill ssh' EXIT
echo "Connecting to VPS Server"
ssh -i $1 -L 5672:85.217.171.67:5672 root@85.217.171.67 -fN
sleep 5
python3 scan.py
wait
| true |
80df21937a454c756b3317a394f61cafab5bbc87 | Shell | felipecwb/bipeline | /log.sh | UTF-8 | 671 | 3.4375 | 3 | [] | no_license |
declare -A LOG_COLORS
LOG_COLORS[DEFAULT]="\e[49m\e[39m"
LOG_COLORS[DEBUG]="\e[104m"
LOG_COLORS[INFO]="\e[36m"
LOG_COLORS[WARNING]="\e[91m"
LOG_COLORS[ERROR]="\e[41m"
LOG_COLORS[OK]="\e[32m"
LOG_COLORS[SUCCESS]="\e[42m"
logger() {
_type="${1^^}"
_color="${LOG_COLORS[DEFAULT]}"
if [[ -n "${LOG_COLORS[$_type]}" ]]; then
_color="${LOG_COLORS[$_type]}"
shift
else
_type=DEFAULT
fi
echo -e "$_color$@${LOG_COLORS[DEFAULT]}"
}
log() { logger DEFAULT $@; }
ldebug() { logger DEBUG $@; }
linfo() { logger INFO $@; }
lwarn() { logger WARNING $@; }
lerror() { logger ERROR $@; }
lok() { logger OK $@; }
lsuccess() { logger SUCCESS $@; }
| true |
0b33dd3d14a207aa918cf4290a5c2b414c6b962a | Shell | bjarteb/vagrant-kafka-ssl | /scripts/create-certs.sh | UTF-8 | 3,821 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# /bin/rm ca.key ca.crt server.truststore.jks client.truststore.jks server.keystore.jks cert-file cert-signed
# create the CA cert. The same CA will be used to sign all kafka certificates
# Note! you will be prompted for password to encrypt the ca.key
openssl req \
-new -x509 -keyout ca.key -out ca.crt -days 365 \
-subj "/C=NO/ST=Hordaland/L=Bergen/O=E/OU=EXAMPLE/CN=selfsigned.kafka.example.com"
# generate key and certificate for each machine in the cluster
keytool -genkey -keystore server.keystore.jks -keyalg RSA -keysize 2048 \
-validity 10000 -alias kafka -dname "cn=k1.kafka.example.com, ou=IT, o=EXAMPLE.COM, c=NO" \
-ext SAN=DNS:k1.kafka.example.com \
-storepass password -keypass password
# migrate to pkcs12
#keytool -importkeystore -srckeystore server.keystore.jks -destkeystore server.keystore.jks -deststoretype pkcs12 \
# -storepass password -keypass password
# If you configure the Kafka brokers to require client authentication by setting ssl.client.auth to be "requested" or "required" on the Kafka brokers config then you must provide a truststore for the Kafka brokers as well and it should have all the CA certificates that clients' keys were signed by.
# import CA cert to server truststore
keytool -keystore server.truststore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
# import CA cert to client truststore
keytool -keystore client.truststore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
# request certificate and write to file
keytool -keystore server.keystore.jks -alias kafka -certreq -file cert-file -storepass password -noprompt
# sign certificate
openssl x509 -req -CA ca.crt -CAkey ca.key -in cert-file -out cert-signed -days 1000 -CAcreateserial -passin pass:password
# import both certificates (ca + kafka) to server keystore
keytool -keystore server.keystore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
keytool -keystore server.keystore.jks -alias kafka -import -file cert-signed -storepass password -noprompt
# import both certificates (ca + kafka) to client keystore
keytool -keystore client.keystore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
keytool -keystore client.keystore.jks -alias kafka -import -file cert-signed -storepass password -noprompt
# list certificates in keystore
keytool -list -v -keystore server.keystore.jks -storepass password
keytool -list -v -keystore client.keystore.jks -storepass password
################################################################################
# script
################################################################################
# import CA cert to server truststore
keytool -keystore server.truststore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
# import CA cert to client truststore
keytool -keystore client.truststore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
for i in {1..3}; do
keytool -genkey -keystore k${i}.keystore.jks -keyalg RSA -keysize 2048 \
-validity 10000 -alias kafka -dname "cn=k${i}.kafka.example.com, ou=IT, o=EXAMPLE.COM, c=NO" \
-ext SAN=DNS:k${i}.kafka.example.com \
-storepass password -keypass password
# request certificate and write to file
keytool -keystore k${i}.keystore.jks -alias kafka -certreq -file cert-file -storepass password -noprompt
# sign certificate
openssl x509 -req -CA ca.crt -CAkey ca.key -in cert-file -out cert-signed -days 1000 -CAcreateserial -passin pass:password
# import both certificates (ca + kafka) to keystore
keytool -keystore k${i}.keystore.jks -alias CARoot -import -file ca.crt -storepass password -noprompt
keytool -keystore k${i}.keystore.jks -alias kafka -import -file cert-signed -storepass password -noprompt
done
# copy
| true |
7209d02dc4e24ae6063de0f980e34b6997f1a293 | Shell | Webprotekh/simple_shell | /generate_authos.sh | UTF-8 | 363 | 3.125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/."
# see also ".mailmap" for how email addresses and names are deduplicate
{
cat <<'EOH'
#this file will list all contibutors to this repository.
#To know how it was generated, checkh 'generate_authors.sh'.
EOH
echo
git log --format='%aN <%aE>' | LC_ALL-C.UTF-8 sort -uf
} > AUTHORS
| true |
c283cf317d9815339aeb547e510550b85eda1d28 | Shell | OpenSourceConsulting/playce-meerkat | /agent/kill.sh | UTF-8 | 275 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
. ./env.sh
PID=`ps -ef | grep java | grep "$AGENT_PROCESS_NAME" | awk '{print $2}'`
if [[ -z ${PID} ]];then
logger -s "${AGENT_PROCESS_NAME} is not running."
exit;
fi
ps -ef | grep java | grep "$AGENT_PROCESS_NAME" | awk {'print "kill -9 " $2'} | sh -x
| true |
eb576c59269252625866e402bb39ef91df9c50d5 | Shell | KNMI/VERCE | /test-resources/testfiles/fullsimulation_PRACE_LRZ_WORK_patched_test_rtprov.sh | UTF-8 | 3,965 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -l
unzip vercepes
PROV=$(cat control)
mkdir -p $HOME/$PROV
export DISPLAY=localhost:38.0
echo -------------------------------------------------------
RUN_PATH=$(pwd)
export WORK_SHARED=$WORK/../di68gex/
echo "producing input files..."
python verce-hpc-pe/src/bulk_inputgen.py quakeml stations solverconf
echo "update provenance inputgen..."
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ provloc $RUN_PATH/provout_inputgen
echo -------------------------------------------------------
echo " decomposing mesh..."
python verce-hpc-pe/src/bulk_decompose.py jsonout_inputgen solverconf
echo "update provenance decompose..."
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ provloc $RUN_PATH/provout_decompose
RUN_BASE=$(basename "$PWD")
SIMULATION_PATHS=$(python verce-hpc-pe/src/PEDataExtractor.py jsonout_inputgen content path)
RUN_ID=$(python verce-hpc-pe/src/PEDataExtractor.py jsonout_inputgen metadata runId)
VELOCITY=$(python verce-hpc-pe/src/PEDataExtractor.py solverconf root velocity_model)
echo $RUN_ID
mkdir -p $HOME/$RUN_ID
echo "--- file copy for runtime metadata messaging -- "
TIME=$(date +%s)
cp $RUN_PATH/provout_inputgen $HOME/$PROV/provout_inputgen$RUN_ID$TIME
cp $RUN_PATH/provout_decompose $HOME/$PROV/provout_decompose$RUN_ID$TIME
arr=(`echo $SIMULATION_PATHS`);
COUNTER=0
for i in "${arr[@]}"
do
:
pwd
mkdir -p $WORK/$i/../OUTPUT_FILES
mkdir -p $WORK/$i/../bin
mkdir -p $WORK/$i/../velocity
cp $WORK_SHARED/specfem/velocity_$VELOCITY/* $WORK/$i/../velocity
cp -r $RUN_PATH/$RUN_ID/OUTPUT_FILES/DATABASES_MPI $WORK/$i/../OUTPUT_FILES
cp -r $i/ $WORK/$i/../
echo $WORK/$i -------------------------------------------------------
cd $WORK/$i/../
# runs database generation and simulation
echo
echo "database generation and simulation..."
echo
EVENT_PATH=`pwd`
cd bin/
echo python $RUN_PATH/verce-hpc-pe/src/bulk_run_specfem_full.py $RUN_PATH/jsonout_inputgen $RUN_PATH/jsonout_decompose $RUN_PATH/solverconf poe $COUNTER
python $RUN_PATH/verce-hpc-pe/src/bulk_run_specfem_full.py $RUN_PATH/jsonout_inputgen $RUN_PATH/jsonout_decompose $RUN_PATH/solverconf poe $COUNTER
echo "database generation and simulation... complete"
echo "see results in directory: OUTPUT_FILES/"
echo "update provenance simulation..."
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ provloc $EVENT_PATH/bin/provout_run_specfem
echo "--- file copy for runtime metadata messaging -- "
TIME=$(date +%s)
cp $EVENT_PATH/bin/provout_run_specfem $HOME/$PROV/provout_run_specfem$RUN_ID$TIME
echo "generation of seed and plot files..."
python $RUN_PATH/verce-hpc-pe/src/bulk_seed_vis.py jsonout_run_specfem $RUN_PATH/stations
echo "update provenance transform..."
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ provloc $EVENT_PATH/bin/provout_transformed
echo "--- file copy for runtime metadata messaging -- "
TIME=$(date +%s)
cp $EVENT_PATH/bin/provout_transformed $HOME/$PROV/provout_transformed$RUN_ID$TIME
let COUNTER=COUNTER+1
rm -rf $EVENT_PATH/OUTPUT_FILES/DATABASES_MPI
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ dataloc $i/
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ dataloc $EVENT_PATH/OUTPUT_FILES/
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ dataloc $EVENT_PATH/DATA/
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ cleanloc $EVENT_PATH/
done
python $RUN_PATH/verce-hpc-pe/src/transferMetadataProducer.py $RUN_PATH/solverconf gsiftp://supermuc.lrz.de/ hostname supermuc.lrz.de
echo "--- terminates PROV job -- "
touch $HOME/$PROV/exitf
| true |
1db0d002d6ed85ebc02094057d709326980d5299 | Shell | donatj/gobinsize | /buildall.sh | UTF-8 | 731 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e -x
export DOCKER_DEFAULT_PLATFORM=linux/amd64
mkdir -p output
rm -f output/*
build() {
rm -rf gopath/bin gopath/pkg || echo "No bin or pkg directories to remove"
docker run --rm -e GOPATH=/usr/src/myapp/gopath -v "$PWD":/usr/src/myapp -w /usr/src/myapp/gopath/src/"$2" golang:"$1" /bin/bash -c "go get && go build -o /usr/src/myapp/output/$3.$1"
}
for VER in 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13 1.14 1.15 1.16 1.17 1.18 1.19 1.20
do
echo "Building with golang:$VER"
build $VER hello hello
build $VER hello-nofmt hello-nofmt
build $VER github.com/donatj/imgavg imgavg
build $VER github.com/donatj/sqlread/cmd/sqlread sqlread
build $VER github.com/donatj/hookah/cmd/hookah hookah
done
| true |
d9337fb67a6b1f75cfe477603059d047aec910dc | Shell | aming/dotfiles | /sketchybar/plugins/front_app.sh | UTF-8 | 390 | 3.34375 | 3 | [] | no_license | #!/bin/sh
# Some events send additional information specific to the event in the $INFO
# variable. E.g. the front_app_switched event sends the name of the newly
# focused application in the $INFO variable:
# https://felixkratz.github.io/SketchyBar/config/events#events-and-scripting
echo "[$NAME] set label to $INFO"
if [[ $INFO != "" ]]; then
sketchybar --set $NAME label="$INFO"
fi
| true |
2dd60546118ec55f6c1dfc32284ba895863029ef | Shell | bkgood/ffs | /ffs.sh | UTF-8 | 4,094 | 2.515625 | 3 | [] | no_license | #!/bin/sh
function pexit {
if [ $1 -ne 0 ]; then echo failed to write ; exit 1 ; fi
return 0
}
name=$1
if [ -z "$name" ]; then echo "usage: $0 projectname"; exit 1 ; fi
mkdir -vp $name/src/main/java/$name $name/src/main/resources $name/src/test/java/$name $name/src/test/resources $name/project
pexit $? && echo creating $name/build.sbt && cat > $name/build.sbt <<EOF
name := "$name"
organization := "$name"
version := "2.1.5"
scalaVersion := "2.11.7"
parallelExecution in ThisBuild := false
publishMavenStyle := true
crossPaths := false
autoScalaLibrary := false
javacOptions ++= Seq(
"-source", "1.8",
"-target", "1.8",
"-Xlint:unchecked"
)
mainClass in (Compile, packageBin) := Some("$name.Main")
lazy val versions = new {
val finatra = "2.1.5"
val guice = "4.0"
val logback = "1.0.13"
}
resolvers ++= Seq(
Resolver.sonatypeRepo("releases"),
"Twitter Maven" at "https://maven.twttr.com"
)
libraryDependencies ++= Seq(
"com.twitter.finatra" %% "finatra-http" % versions.finatra,
"com.twitter.finatra" %% "finatra-httpclient" % versions.finatra,
"ch.qos.logback" % "logback-classic" % versions.logback,
"com.twitter.finatra" %% "finatra-http" % versions.finatra % "test",
"com.twitter.finatra" %% "finatra-jackson" % versions.finatra % "test",
"com.twitter.inject" %% "inject-server" % versions.finatra % "test",
"com.twitter.inject" %% "inject-app" % versions.finatra % "test",
"com.twitter.inject" %% "inject-core" % versions.finatra % "test",
"com.twitter.inject" %% "inject-modules" % versions.finatra % "test",
"com.google.inject.extensions" % "guice-testlib" % versions.guice % "test",
"com.twitter.finatra" %% "finatra-http" % versions.finatra % "test" classifier "tests",
"com.twitter.finatra" %% "finatra-jackson" % versions.finatra % "test" classifier "tests",
"com.twitter.inject" %% "inject-server" % versions.finatra % "test" classifier "tests",
"com.twitter.inject" %% "inject-app" % versions.finatra % "test" classifier "tests",
"com.twitter.inject" %% "inject-core" % versions.finatra % "test" classifier "tests",
"com.twitter.inject" %% "inject-modules" % versions.finatra % "test" classifier "tests",
"org.mockito" % "mockito-core" % "1.9.5" % "test",
"org.scalatest" %% "scalatest" % "2.2.3" % "test",
"org.specs2" %% "specs2" % "2.3.12" % "test",
"com.novocode" % "junit-interface" % "0.11" % Test)
EOF
pexit $? && echo creating $name/project/plugins.sbt && cat > $name/project/plugins.sbt <<EOF
addSbtPlugin("io.spray" % "sbt-revolver" % "0.8.0")
EOF
pexit $? && echo creating $name/src/main/java/$name/Main.java && cat > $name/src/main/java/$name/Main.java <<EOF
package $name;
public class Main {
public static void main(String[] args) {
new MainServer().main(args);
}
}
EOF
pexit $? && echo creating $name/src/main/java/$name/MainServer.java && cat > $name/src/main/java/$name/MainServer.java <<EOF
package $name;
import java.util.Collection;
import com.google.common.collect.ImmutableList;
import com.google.inject.Module;
import com.twitter.finatra.http.JavaHttpServer;
import com.twitter.finatra.http.filters.CommonFilters;
import com.twitter.finatra.http.routing.HttpRouter;
import javax.inject.Inject;
import com.twitter.finatra.http.JavaController;
public class MainServer extends JavaHttpServer {
public static class MainController extends JavaController {
public class GoodbyeResponse {
public final String name;
public final String message;
public final Integer code;
public GoodbyeResponse(String name, String message, Integer code) {
this.name = name;
this.message = message;
this.code = code;
}
}
public void configureRoutes() {
get("/goodbye", request ->
new GoodbyeResponse("guest", "cya", 123));
}
}
@Override
public void configureHttp(HttpRouter httpRouter) {
httpRouter
.filter(CommonFilters.class)
.add(MainController.class);
}
}
EOF
| true |
7bcbe7329285088887bb8ac2aa2bff7106e04869 | Shell | backbonecabal/infra | /run.sh | UTF-8 | 2,345 | 3.84375 | 4 | [] | no_license | #!/bin/bash -u
NO_LOCK_REQUIRED=true
. ./.env
. ./.common.sh
PARAMS=""
displayUsage()
{
echo "This script creates and start a local private Backbone network using Docker."
echo "You can select the client mechanism to use.\n"
echo "Usage: ${me} [OPTIONS]"
echo " -c <miner|relay|monitor> : the client mechanism that you want to run
monitor provides stats"
echo " -e : setup ELK with the network."
echo " -s : test ethsigner with the rpcnode Note the -s option must be preceeded by the -c option"
exit 0
}
# options values and api values are not necessarily identical.
# value to use for miner option as required for Besu --rpc-http-api and --rpc-ws-api
miner='miner'
relay='relay' # value to use for relay option
composeFile="docker-compose"
while getopts "hesc:" o; do
case "${o}" in
h)
displayUsage
;;
c)
algo=${OPTARG}
case "${algo}" in
miner|relay)
export SAMPLE_POA_NAME="${algo}"
export SAMPLE_POA_API="${!algo}"
export SNAPSHOT_VERSION="${BESU_VERSION}"
composeFile="${composeFile}_poa"
;;
ethash)
;;
*)
echo "Error: Unsupported client value." >&2
displayUsage
esac
;;
e)
elk_compose="${composeFile/docker-compose/docker-compose_elk}"
composeFile="$elk_compose"
;;
s)
if [[ $composeFile == *"poa"* ]]; then
signer_compose="${composeFile/poa/poa_signer}"
composeFile="$signer_compose"
else
echo "Error: Unsupported client value." >&2
displayUsage
fi
;;
*)
displayUsage
;;
esac
done
composeFile="-f ${composeFile}.yml"
# Build and run containers and network
echo "${composeFile}" > ${LOCK_FILE}
echo "${SNAPSHOT_VERSION}" >> ${LOCK_FILE}
echo "${bold}*************************************"
echo "Client ${SNAPSHOT_VERSION}"
echo "*************************************${normal}"
echo "Start network"
echo "--------------------"
echo "Starting network..."
docker-compose ${composeFile} build --pull
docker-compose ${composeFile} up --detach
#list services and endpoints
./list.sh
# Copyright 2018 ConsenSys AG.
# http://www.apache.org/licenses/LICENSE-2.0
# Backbone Cabal
| true |
3c80e10684aed644608bd58cad4c01b1019e0f60 | Shell | delkyd/alfheim_linux-PKGBUILDS | /xplanet-svn/PKGBUILD | UTF-8 | 1,038 | 2.65625 | 3 | [] | no_license | # Maintainer: Gaetan Bisson <bisson@archlinux.org
# Contributor: Gilles CHAUVIN <gcnweb at gmail dot com>
pkgname=xplanet-svn
_pkgname=xplanet
pkgver=20170111.209
pkgrel=1
pkgdesc='Renders an image of the earth into the X root window'
url='http://xplanet.sourceforge.net/'
arch=('i686' 'x86_64' 'armv7h')
license=('GPL2')
makedepends=('subversion')
depends=('pango' 'giflib' 'libtiff' 'libxss')
source=("${_pkgname}::svn://svn.code.sf.net/p/xplanet/code/trunk"
"giflib.patch")
sha256sums=('SKIP'
'c9abf31bf242d7c0940e8fbc5b64714c12edd4b995aba1ebe776ddc0c5bf019a')
provides=("${_pkgname}")
conflicts=("${_pkgname}")
pkgver() {
cd "${srcdir}/${_pkgname}"
svn info | awk '/Revision/{r=$2}/Date/{gsub(/-/,"");d=$4}END{print d"."r}'
}
prepare() {
cd "${srcdir}/${_pkgname}"
patch -p1 <"${srcdir}/giflib.patch"
aclocal && autoconf && automake --add-missing
}
build() {
cd "${srcdir}/${_pkgname}"
./configure --prefix=/usr --with-freetype
make
}
package() {
cd "${srcdir}/${_pkgname}"
make prefix="${pkgdir}"/usr install
}
| true |
a047dd62b94902406fccfe5cd1146302eec303f7 | Shell | yinjun111/Reilly2015Science | /PermutationTest/run_module_permutation.sh | UTF-8 | 2,208 | 2.78125 | 3 | [] | no_license | #For network permutation test
permutationnum=100
category=CS16-ac-hs_gain
#Final result for the permutation test is: results/cordev_all_3-wayOrtho_hs_enhancer_${category}_WC2_4_shuffled_qvalues.txt
mkdir shuffle intersect counts results
#shuffle the human gain features
for num in $(eval echo {1..$permutationnum})
do
perl shuffle_humangain_140225.pl cordev_all_3-wayOrtho_hs_enhancer.bed cortex_annotation_140226_forcount_enhancer.txt shuffle/cordev_all_3-wayOrtho_hs_enhancer_s$num.bed
done
#intersect with human genes
cut -f 1,2,3,4 gencode.v10.wholeGene.exonTranscript_regDom.bed | intersectBed -a stdin -b cordev_all_3-wayOrtho_hs_enhancer.bed -wb | cut -f 4,8 > intersect/cordev_all_3-wayOrtho_hs_enhancer_intersect.bed;
for num in $(eval echo {1..$permutationnum})
do
cut -f 1,2,3,4 gencode.v10.wholeGene.exonTranscript_regDom.bed | intersectBed -a stdin -b shuffle/cordev_all_3-wayOrtho_hs_enhancer_s$num.bed -wb | cut -f 4,8 > intersect/cordev_all_3-wayOrtho_hs_enhancer_s$num\_intersect.bed;
done
#count the shuffling
perl count_intersect_module_140227.pl intersect/cordev_all_3-wayOrtho_hs_enhancer_intersect.bed cortex_annotation_140226_forcount_enhancer.txt WC2_4_module_allinfo.txt counts/cordev_all_3-wayOrtho_hs_enhancer_intersect_${category}_WC2_4_count.txt ${category} all 1 1
for num in $(eval echo {1..$permutationnum})
do
perl count_intersect_module_140227.pl intersect/cordev_all_3-wayOrtho_hs_enhancer_s$num\_intersect.bed cortex_annotation_140226_forcount_enhancer.txt WC2_4_module_allinfo.txt counts/cordev_all_3-wayOrtho_hs_enhancer_s$num\_intersect_${category}_WC2_4_count.txt ${category} all 1 1;
done
#calculate enrichment test p-value and perform p-value correction
perl summarize_shuffle_results_module_131118.pl counts/cordev_all_3-wayOrtho_hs_enhancer_intersect_${category}_WC2_4_count.txt "counts/cordev_all_3-wayOrtho_hs_enhancer_s*_intersect_${category}_WC2_4_count.txt" results/cordev_all_3-wayOrtho_hs_enhancer_${category}_WC2_4_shuffled_results.txt;
Rscript cal_general_binom_permu_140312.R results/cordev_all_3-wayOrtho_hs_enhancer_${category}_WC2_4_shuffled_results.txt results/cordev_all_3-wayOrtho_hs_enhancer_${category}_WC2_4_shuffled_qvalues.txt
| true |
181787b60798460ca1a5be212819b4af0720150f | Shell | SixTrack/SixDesk | /boinc_software/monitor-boinc-server/submissions/retrieveData.sh | UTF-8 | 3,452 | 3.625 | 4 | [] | no_license | #!/bin/bash
boincServer=boincai11.cern.ch
sixtrackProjPath=/usr/local/boinc/project/sixtrack
spoolDirPath=/afs/cern.ch/work/b/boinc/boinc
spoolDirTestPath=/afs/cern.ch/work/b/boinc/boinctest
where=$PWD
lretrieve=true
lgetOwners=true
lclean=true
# date to treat
if [ -z "$1" ] ; then
echo "please specify a date in format: YYYY-MM-DD"
exit
else
tmpDate=$1
fi
#
echo " starting `basename $0` at `date` ..."
if ${lretrieve} ; then
echo " retrieving submitted WUs and time intervals from log files on ${boincServer} - date: ${tmpDate}"
# do not use grep -h: the script still needs the study name (from the submit.*.log file)!
ssh sixtadm@${boincServer} "cd ${sixtrackProjPath}/log_boincai11 ; grep -e $tmpDate submit*log | grep Submitted | awk '{sub(/:/,\"\ \",\$0); print (\$0)}' | sort -k2 > ${where}/submitAll_${tmpDate}.txt"
echo " reshuffling retrieve info in a more compact and plottable way in submitAll_${tmpDate}.dat ..."
# $1 is really the study name (from the submit.*.log file)
# awk '{if ($1!=lastStudy) {print (tStart,lastLine,Ntot); tStart=$2; Ntot=0;} Ntot=Ntot+1; lastLine=$0; lastStudy=$1;}END{print (tStart,$0,Ntot)}' submitAll_${tmpDate}.txt > submitAll_${tmpDate}.dat
awk '{if ($1!=lastStudy) {if (NR>1) {print (tStart,lastStudy,tStop,Ntot);} tStart=$2; Ntot=0;} Ntot=Ntot+1; lastStudy=$1; tStop=$2;}END{print (tStart,lastStudy,tStop,Ntot)}' submitAll_${tmpDate}.txt > submitAll_${tmpDate}.dat
# WUs being assimilated
echo " retrieving assimilated WUs - date: ${tmpDate}"
ssh sixtadm@${boincServer} "cd ${sixtrackProjPath}/log_boincai11 ; grep Assimilated sixtrack_assimilator.log | grep $tmpDate > ${where}/assimilateAll_${tmpDate}.dat"
fi
if ${lgetOwners} ; then
echo " getting owners..."
tmpLinesSubmit=`cat submitAll_${tmpDate}.dat`
Nlines=`echo "${tmpLinesSubmit}" | wc -l`
# get unique studies and owners
uniqueStudyNames=`echo "${tmpLinesSubmit}" | awk '{sub(/submit./,"",$2); sub(/\.log/,"",$2); print ($2)}' | sort -u`
uniqueStudyNames=( ${uniqueStudyNames} )
owners=""
dirOwners=""
for uniqueStudyName in ${uniqueStudyNames[@]} ; do
spoolDir=''
for tmpDir in ${spoolDirPath} ${spoolDirTestPath} ; do
if [ -d ${tmpDir}/${uniqueStudyName} ] ; then
spoolDir=${tmpDir}/${uniqueStudyName}
break
fi
done
if [ "${spoolDir}" == "" ] ; then
# the spooldir is not there
owner="-"
dirOwner="-"
else
dirOwner=`ls -ld ${spoolDir} | awk '{print ($3)}'`
ownerFile=${spoolDir}/owner
if [ -e ${ownerFile} ] ; then
owner=`cat ${ownerFile}`
else
owner="-"
fi
fi
owners="${owners} ${owner}"
dirOwners="${dirOwners} ${dirOwner}"
done
owners=( ${owners} )
dirOwners=( ${dirOwners} )
# paste everything
Nlines=`echo "${tmpLinesSubmit}" | wc -l`
rm -f temp.dat
for (( ii=1; ii<=${Nlines}; ii++ )) ; do
tmpLine=`echo "${tmpLinesSubmit}" | head -n ${ii} | tail -1`
# match study with owner
tmpStudyName=`echo "${tmpLine}" | awk '{sub(/submit./,"",$2); sub(/\.log/,"",$2); print ($2)}'`
for (( jj=0; jj<${#uniqueStudyNames[@]}; jj++ )) ; do
if [ "${tmpStudyName}" == "${uniqueStudyNames[$jj]}" ] ; then
echo "${tmpLine} ${tmpStudyName} ${dirOwners[$jj]} ${owners[$jj]}" >> temp.dat
break
fi
done
done
mv temp.dat submitAll_${tmpDate}.dat
fi
if ${lclean} ; then
rm submitAll_${tmpDate}.txt
fi
#
echo " ...done by `date`."
| true |
18df1f7abd1466ea123caf8396055b9f7654efd7 | Shell | starlingx/metal | /bsp-files/kickstarts/functions.sh | UTF-8 | 4,890 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | # This file defines functions that can be used in %pre and %post kickstart sections, by including:
# . /tmp/ks-functions.sh
#
cat <<END_FUNCTIONS >/tmp/ks-functions.sh
#
# Copyright (c) xxxYEARxxx Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
function wlog()
{
[ -z "\$stdout" ] && stdout=1
local dt="\$(date "+%Y-%m-%d %H:%M:%S.%3N")"
echo "\$dt - \$1" >&\${stdout}
}
function get_by_path()
{
local dev_name=\$(basename \$1)
if echo "\$dev_name" | grep -q mpath; then
exec_retry 30 1 "ls /dev/mapper/\$dev_name" > /dev/null
fi
for p in /dev/mapper/mpath*; do
if [ "\$p" = "\$1" -o "\$p" = "/dev/mapper/\$dev_name" ]; then
find -L /dev/disk/by-id/dm-uuid* -samefile /dev/mapper/\$dev_name
return
fi
done
local disk=\$(cd /dev ; readlink -f \$1)
for p in /dev/disk/by-path/*; do
if [ "\$disk" = "\$(readlink -f \$p)" ]; then
echo \$p
return
fi
done
}
function get_disk()
{
if echo \$1 | grep -q mpath; then
find -L /dev/mapper/ -samefile \$1
return
fi
echo \$(cd /dev ; readlink -f \$1)
}
function report_pre_failure_with_msg()
{
local msg=\$1
echo -e '\n\nInstallation failed.\n'
echo "\$msg"
exit 1
}
function report_prestaging_failure_with_msg()
{
local msg=\$1
echo -e '\n\nPrestaging failed.\n'
echo "\$msg"
exit 1
}
function report_post_failure_with_msg()
{
local msg=\$1
cat <<EOF >> /etc/motd
Installation failed.
\$msg
EOF
if [ -d /etc/platform ] ; then
echo "\$msg" >/etc/platform/installation_failed
fi
echo -e '\n\nInstallation failed.\n'
echo "\$msg"
exit 1
}
function report_post_failure_with_logfile()
{
local logfile=\$1
cat <<EOF >> /etc/motd
Installation failed.
Please see \$logfile for details of failure
EOF
if [ -d /etc/platform ] ; then
echo \$logfile >/etc/platform/installation_failed
fi
echo -e '\n\nInstallation failed.\n'
cat \$logfile
exit 1
}
function get_http_port()
{
echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
}
function get_disk_dev()
{
local disk
# Detect HDD
for blk_dev in vda vdb sda sdb dda ddb hda hdb; do
if [ -d /sys/block/\$blk_dev ]; then
disk=\$(ls -l /sys/block/\$blk_dev | grep -v usb | head -n1 | sed 's/^.*\([vsdh]d[a-z]\+\).*$/\1/');
if [ -n "\$disk" ]; then
exec_retry 3 0.5 "multipath -c /dev/\$disk" > /dev/null && continue
echo "\$disk"
return
fi
fi
done
for blk_dev in nvme0n1 nvme1n1; do
if [ -d /sys/block/\$blk_dev ]; then
disk=\$(ls -l /sys/block/\$blk_dev | grep -v usb | head -n1 | sed 's/^.*\(nvme[01]n1\).*$/\1/');
if [ -n "\$disk" ]; then
echo "\$disk"
return
fi
fi
done
for mpath_dev in mpatha mpathb; do
if [ -e /dev/mapper/\$mpath_dev ]; then
echo "/dev/mapper/\$mpath_dev"
return
fi
done
}
function exec_no_fds()
{
# Close open FDs when executing commands that complain about leaked FDs.
local fds=\$1
local cmd=\$2
local retries=\$3
local interval=\$4
local ret_code=0
local ret_stdout=""
for fd in \$fds
do
local cmd="\$cmd \$fd>&-"
done
if [ -z "\$retries" ]; then
#wlog "Running command: '\$cmd'."
eval "\$cmd"
else
ret_stdout=\$(exec_retry "\$retries" "\$interval" "\$cmd")
ret_code=\$?
echo "\${ret_stdout}"
return \${ret_code}
fi
}
function exec_retry()
{
local retries=\$1
local interval=\$2
local cmd=\$3
let -i retry_count=1
local ret_code=0
local ret_stdout=""
cmd="\$cmd" # 2>&\$stdout"
while [ \$retry_count -le \$retries ]; do
#wlog "Running command: '\$cmd'."
ret_stdout=\$(eval \$cmd)
ret_code=\$?
[ \$ret_code -eq 0 ] && break
wlog "Error running command '\${cmd}'. Try \${retry_count} of \${retries} at \${interval}s."
wlog "ret_code: \${ret_code}, stdout: '\${ret_stdout}'."
sleep \$interval
let retry_count++
done
echo "\${ret_stdout}"
return \${ret_code}
}
# This is a developer debug tool that can be line inserted in any kickstart.
# Code should not be committed with a call to this function.
# When inserted and hit, execution will stall until one of the 2 conditions:
# 1. /tmp/wait_for_go file is removed 'manually'
# 2. or after 10 minutes
function wait_for_go()
{
touch /tmp/wait_for_go
for loop in {1..60} ; do
sleep 10
if [ ! -e "/tmp/wait_for_go" ] ; then
break
fi
done
}
END_FUNCTIONS
| true |
b46f3ff36c18cb23e17515b4223e08dd26070180 | Shell | open-estuary/armor | /testing/test_scripts/test_lttng_kernel.sh | UTF-8 | 1,283 | 3.03125 | 3 | [] | no_license | ###############################################################################
# This script tests the LTTNG tools for kernel
#Usage:
# $ sh test_lttng_kernel.sh
################################################################################
#!/bin/bash
#check if installed
bash check_install.sh lttng
status=$?
if test $status -eq 1
then
exit
fi
lttng list --kernel
status=$?
if test $status -ne 0
then
echo "command lttng list --kernel failed"
exit
fi
lttng create lttng_kernel_test
status=$?
if test $status -ne 0
then
echo "command lttng create [FAIL]"
exit
fi
lttng enable-event --kernel sched_switch,sched_process_fork
#lttng enable-event --kernel --all
status=$?
if test $status -ne 0
then
echo "command lttng enable-event [FAIL]"
exit
fi
lttng start
status=$?
if test $status -ne 0
then
echo "command lttng start [FAIL]"
exit
fi
# add any command here
sleep 2
lttng stop
status=$?
if test $status -ne 0
then
echo "command lttng stop [FAIL]"
exit
fi
lttng view
status=$?
if test $status -ne 0
then
echo "command lttng view [FAIL]"
exit
fi
lttng destroy
status=$?
if test $status -ne 0
then
echo "command lttng distroy [FAIL]"
exit
fi
echo "lttng test for kernel completed"
exit
| true |
bcef5d651a46e85141a3fa436d18755f24b79546 | Shell | agoldberglab/CaboVerde_Demographic_Analyses | /PreparingAncestryData/phasing_step2_findduplicatesnps.sh | UTF-8 | 279 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#SBATCH --mem=10GB
#SBATCH -p noor
cd /datacommons/noor/klk37/CV/NewReseqData/phasing
FILES=merged*.bed
for FILE in $FILES
do
NAME="$(echo ${FILE} | awk -F'[.]' '{print $1}')"
/opt/apps/rhel7/plink-1.90/plink --bfile $NAME --list-duplicate-vars --out "$NAME"
done
| true |
98c74e2f37f156868b2fc6a303c13f5e3b35bd72 | Shell | gabrielpetry/commiter | /gcom | UTF-8 | 405 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
[[ -z "$1" ]] && echo "No message" && exit 1
COMMIT_MESSAGES="$HOME/.config/commiter/messages.txt"
FZF_OPTIONS="--height=10 --border --layout=reverse"
/usr/bin/gadd
test -f $COMMIT_MESSAGES || (mkdir -p $(dirname $COMMIT_MESSAGES) && touch $COMMIT_MESSAGES)
COMMIT_TYPE="$(cat "$COMMIT_MESSAGES" | fzf $FZF_OPTIONS)"
# echo $COMMIT_TYPE
message="$COMMIT_TYPE $@"
git commit -m "$message"
| true |
d5abc3b4d7595b636b50725aec94cc995b42bd6f | Shell | encointer/encointer-node | /scripts/ci/init_env.sh | UTF-8 | 683 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -euo pipefail
# script that sets the correct environment variables to execute other scripts
export CI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export SCRIPT_DIR="$(dirname "$CI_DIR")"
export PROJ_ROOT="$(dirname "$SCRIPT_DIR")"
export CLIENT_DIR="$PROJ_ROOT/client"
export CLIENT_BIN="$PROJ_ROOT/target/release/encointer-client-notee"
export NODE_BIN="$PROJ_ROOT/target/release/encointer-node-notee"
echo "Set environment variables:"
echo " BASH_DIR: $CI_DIR"
echo " SCRIPT_DIR: $SCRIPT_DIR"
echo " PROJ_ROOT: $PROJ_ROOT"
echo " Client Directory: $CLIENT_DIR"
echo " Cleint Binary: $CLIENT_BIN"
echo " Node binary: $NODE_BIN"
| true |
e1706d30e3b518b257d47a8261aa133619abbc90 | Shell | HanfiNardine/Shell_Project | /menutext.sh | UTF-8 | 899 | 3.296875 | 3 | [] | no_license | graphique() #affichage d'un sous menu de chois de type de courbe
{
yad --center --width=300 --height=125 --title 'COURBE/CAMEMBER CPU%' --text="Veuillez choisir le type de graphique à voir :" \
--button="courbe CPU%":1 \
--button="camember CPU%":2
foo=$?
if [[ $foo -eq 1 ]]; then
./file.sh
elif [[ $foo -eq 2 ]]; then
./pie2.sh
else
cancel && exit 0
fi
}
#******************************************************************
while :
do
#affichge sous menu
echo "
----- MENU -----
(1) afficher et gerer les caractéristique harware de machine
(2) afficher les courbe & camember
(3) 0 = Quitter
"
read choix #lire le choix et discussion avec case..in
case $choix in
*1)
./caracteristiques.sh #execution de fichier caracterisqtiques
;;
*2)
graphique;
;;
3| 0 | q ) echo "L'utilisateur $USER à quitter le programme" ;
exit 0 ;;
*) echo "CHOIX INVALIDE ! RÉESSEYEZ ";;
esac
done
echo
exit 0
| true |
e56c5b59a6ff8bddab312f3d8f796f34de3d7b39 | Shell | Knas2121/cog-book | /scripts/ensure_image.sh | UTF-8 | 269 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
image=operable/cog-book-toolchain:sphinx
docker inspect ${image} >& /dev/null
if [ $? -gt 0 ]; then
echo "Retrieving ${image} from Docker Hub. Please wait..."
docker pull ${image}
else
echo "Found local instance of ${image}. Continuing build..."
fi
| true |
a39c3dd7b53adf17d64ffd6fc396a8f27c9034ae | Shell | glc12125/dev_setup | /ros-indigo/run.sh | UTF-8 | 953 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
# Check args
if [ "$#" -ne 1 ]; then
#echo "usage: ./run.sh IMAGE_NAME"
#return 1
echo "Using default: robok/ros-indigo-vo:nvidia_N430_50"
IMAGE_NAME=robok/ros-indigo-vo:nvidia_N430_50
else
IMAGE_NAME=$1
fi
# Get this script's path
pushd `dirname $0` > /dev/null
popd > /dev/null
set -e
local_dev=$HOME/Development/
if [ ! -d $local_dev ]; then
mkdir -p $local_dev
fi
external_data_dir=/media/liangchuan/Samsung_T51/data/
#docker volume create --driver local --opt type=none --opt device=$local_dev --opt o=bind rosindigo_dev
xhost + # Allow any connections to X server
# Run the container with shared X11
docker run \
--privileged \
-e SHELL \
-e DISPLAY \
-e DOCKER=1 \
-w /Development/ \
-v $local_dev:/Development \
-v $external_data_dir:/Development/external_data \
-v "/tmp/.X11-unix:/tmp/.X11-unix:rw" \
-v /dev/bus/usb:/dev/bus/usb \
-v /dev/input:/dev/input \
-it $IMAGE_NAME $SHELL
| true |
b1fca1bf32b1a47d9c8db8aa4501830bfe1d9011 | Shell | RTHilton/minecraft_bedrock | /start.sh | UTF-8 | 981 | 3.828125 | 4 | [] | no_license | #!/bin/bash
cd /data
# Check if we have never initialized this container
if [ ! -f /data/bedrock_server ]; then
echo "Extracting server..."
unzip /opt/bedrock_server.zip -d /data
else
md5local=$(md5sum bedrock_server | cut -d' ' -f 1)
md5zip=$(unzip -p /opt/bedrock_server.zip bedrock_server | md5sum | cut -d' ' -f 1)
# Check if the zip file in the image is different than our local volume
if [ $md5local != $md5zip ]; then
# Server was updated
# Backup configurations
echo "Backing up configurations"
mv server.properties server.properties.bak
mv whitelist.json whitelist.json.bak
mv permissions.json permissions.json.bak
echo "Extracting server..."
unzip -o /opt/bedrock_server.zip -d /data
echo "Restoring configurations"
mv server.properties.bak server.properties
mv whitelist.json.bak whitelist.json
mv permissions.json.bak permissions.json
fi
fi
echo "Starting Server..."
LD_LIBRARY_PATH=. ./bedrock_server
| true |
c337cec1f04ece47ca66b7f085a8b5644f65acb4 | Shell | michalspondr/skripty | /lock.sh | UTF-8 | 1,169 | 3.484375 | 3 | [] | no_license | #!/bin/sh
#
# Put random graphics effect on screenshot and use this image as lock image for i3lock
# Effects are chosen so the readability is disabled
#
# Use it with e.g. xautolock. Add this to your ~/.profile:
# xautolock -time 10 -locker 'lock.sh' &
EFFECT_COUNT=10 # don't forget to increase it if you add new effects
FILENAME=/tmp/screenshot.png
import -window root $FILENAME
let "v = RANDOM % $EFFECT_COUNT"
case $v in
"0")
mogrify -scale 10% -scale 1000% $FILENAME
;;
"1")
mogrify -adaptive-blur 0x5 $FILENAME
;;
"2")
mogrify -adaptive-blur 0x6 $FILENAME
;;
"3")
mogrify -charcoal 11 -blur 0x3 $FILENAME
;;
"4")
mogrify -blur 0x3 -edge 15 $FILENAME
;;
"5")
mogrify -emboss 17 -blur 0x3 $FILENAME
;;
"6")
mogrify -implode 1 -motion-blur 0x8 -emboss 5 $FILENAME
;;
"7")
mogrify -posterize 8 -noise 5x5 $FILENAME
;;
"8")
mogrify -sketch 3x3 -emboss 7 -threshold 90 -blur 0x3 $FILENAME
;;
"9")
mogrify -monochrome -wave 8x8 $FILENAME
;;
*)
zenity --info --text "Selection out of range"
mogrify -scale 10% -scale 1000% $FILENAME
;;
esac
# Change this to use a different screen locker
i3lock -i $FILENAME
| true |
db38da9df1d3c54c4deb351389dd8e2000978a09 | Shell | dvst/wanna-build-canaima | /bin/sync.sh | UTF-8 | 3,109 | 3.359375 | 3 | [] | no_license | #!/bin/bash
set -eE
LANG=C
if [ -z "$1" ]
then
echo "Usage: $0 archive"
echo "e.g. $0 debian"
exit 1
fi
# START OF OPTIONS ######################################################
TARGET_BASE=/org/wanna-build/tmp/archive
TARGET="$TARGET_BASE/$1"
PASSWORD_BASE=/org/wanna-build/etc
PASSWORD_FILE="$PASSWORD_BASE/$1.rsync-password"
RSYNC_OPTIONS="--delete --delete-excluded -av"
MIRROR_EXCLUDES="--exclude=**/*.changes --exclude=**/installer-* --exclude=**/Packages.diff --exclude=**/Sources.diff --exclude=ChangeLog --exclude=**/Contents-* --exclude=**/Translation-* --exclude=**/*.bz2 --exclude=Packages --exclude=Sources --exclude=**/*.new" # the latter two because we only accept gziped files
MIRROR_OPTIONS="$MIRROR_EXCLUDES $RSYNC_OPTIONS"
# END OF OPTIONS ########################################################
mkdir -p "$TARGET"
if [ ! "$2" = "nolock" ]
then
# Do locking to avoid destroying other's views on an archive.
LOCKFILE="$TARGET/lock"
cleanup() {
rm -rf "$LOCKFILE"
}
if lockfile -! -r 10 $LOCKFILE
then
echo "Sync failed: cannot lock $LOCKFILE, aborting."
exit 1
fi
trap cleanup 0
fi
# Handle the syncing.
case $1 in
debian)
USER=cimarosa
BUILDD_QUEUE_OPTIONS="--include=Packages.gz --include=Sources.gz --include=**Release* --exclude=* $RSYNC_OPTIONS"
rsync --password-file "$PASSWORD_FILE" $MIRROR_OPTIONS $USER@ftp-master.debian.org::debian/dists/ "$TARGET/archive"
rsync --password-file "$PASSWORD_BASE/$1-buildd.rsync-password" $BUILDD_QUEUE_OPTIONS $USER@ftp-master.debian.org::buildd-sid/ "$TARGET/buildd-sid"
rsync --password-file "$PASSWORD_BASE/$1-buildd.rsync-password" $BUILDD_QUEUE_OPTIONS $USER@ftp-master.debian.org::buildd-experimental/ "$TARGET/buildd-experimental"
# Also sync the Maintainers and Uploaders files for consumption through the web interface.
rsync --password-file "$PASSWORD_FILE" $MIRROR_OPTIONS $USER@ftp-master.debian.org::debian/indices/Maintainers /org/buildd.debian.org/etc/Maintainers
rsync --password-file "$PASSWORD_FILE" $MIRROR_OPTIONS $USER@ftp-master.debian.org::debian/indices/Uploaders /org/buildd.debian.org/etc/Uploaders
;;
debian-security)
chmod 0700 "$TARGET"
USER=cimarosa
BUILDD_QUEUE_OPTIONS="--include=Packages.gz --include=Sources.gz --include=**Release* --exclude=* $RSYNC_OPTIONS"
rsync $MIRROR_OPTIONS $USER@security-master.debian.org::debian-security/dists/ "$TARGET/archive"
rsync --password-file "$PASSWORD_BASE/$1-buildd.rsync-password" $BUILDD_QUEUE_OPTIONS $USER@security-master.debian.org::buildd-wheezy/ "$TARGET/buildd-wheezy"
rsync --password-file "$PASSWORD_BASE/$1-buildd.rsync-password" $BUILDD_QUEUE_OPTIONS $USER@security-master.debian.org::buildd-squeeze/ "$TARGET/buildd-squeeze"
rsync --password-file "$PASSWORD_BASE/$1-buildd.rsync-password" $BUILDD_QUEUE_OPTIONS $USER@security-master.debian.org::buildd-lenny/ "$TARGET/buildd-lenny"
;;
debian-volatile)
rsync $MIRROR_OPTIONS volatile-master.debian.org::debian-volatile/dists/ "$TARGET/archive"
;;
backports)
rsync --password-file "$PASSWORD_FILE" $MIRROR_OPTIONS wbadm@backports-master.debian.org::debian-backports/dists/ "$TARGET/archive"
;;
debian-edu)
rsync $MIRROR_OPTIONS --exclude=woody/ ftp.skolelinux.no::skolelinux-dist/dists/ "$TARGET/archive"
;;
*)
echo "Sync target $1 not supported, aborting."
exit 1
;;
esac
| true |
479fce8713a956a8a1367b9979ce9a85691f7fbe | Shell | avijit1258/automate_the_painful_stuffs | /automate_running_deeplearning_model.sh | UTF-8 | 1,114 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# this script save my life when I was doing automated agricultural analysis course
# we have field image of 18 weeks.
# in the 2d model, we tried to predict crop production from individual weeks
# For each week, I have to manually take one week image from Field_1 and Nassar
# place them in a data folder then preprocess, train, test the model.
# put the results according to week name.
# imagine how painful the tasks is for 18 weeks.
# I write this script to automate the pain.
# as I am running the model from terminal so the manual effort is very high.
# with the script, I went to sleep and bam. I am done with 18 week results.
for counter in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18
do
echo $counter
cp -r Fields_main/Field_1/$counter/ Fields/Field_1
cp -r Fields_main/Nassar/$counter/ Fields/Nassar
mkdir results
mkdir data
python data_gen.py
python training.py
python results.py
mv results result$counter
mv result$counter all_results
rm -r data
rm -r Fields/Field_1/$counter/
rm -r Fields/Nassar/$counter/
done
| true |
676502d86558719d4408c25c2f50c179599a6d47 | Shell | lemikegao/memoryDojo | /memoryDojo/Scripts/TP-update.sh | UTF-8 | 431 | 2.875 | 3 | [] | no_license | #! /bin/sh
TP=/usr/local/bin/TexturePacker
if [ "${ACTION}" = "clean" ]
then
# remove sheets - please add a matching expression here
rm -f ${PROJECT_DIR}/SpriteSheets/*.pvr.ccz
rm -f ${PROJECT_DIR}/SpriteSheets/*.pvr
rm -f ${PROJECT_DIR}/SpriteSheets/*.plist
rm -f ${PROJECT_DIR}/SpriteSheets/*.png
else
# create all assets from tps files
mkdir -p ${PROJECT_DIR}/SpriteSheets
${TP} *.tps
fi
exit 0 | true |
8a70e87ea8403d545995136486be9ec413f80a04 | Shell | AmeyKamat/dotFiles | /vim/vim-bootstrap.sh | UTF-8 | 564 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Script to install vim environment
PROMPT="[VIM]"
echo "${PROMPT} Setting up vim"
echo
echo "${PROMPT} Intitialising vim directory..."
mkdir ~/.vim
echo "${PROMPT} Installing plug.vim..."
mkdir ~/.vim/autoload
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
echo "${PROMPT} plug.vim installation complete."
echo "${PROMPT} Creating symlinks..."
cp ./vim/.vimrc ~/.vimrc
vim +PlugInstall +qall
echo "${PROMPT} Plugins installed."
echo
echo "${PROMPT} vim installation complete."
| true |
39ada469f24e4d838b809607b1433f4e675daeb9 | Shell | blinkfox/jpack-maven-plugin | /src/main/resources/linux/bin/start.sh | UTF-8 | 4,458 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
#====================================================
# 描述: 用于启动 ${name} 服务的 shell 脚本.
# 文件:
# 1. logs 目录: 项目运行的日志目录;
# 2. logs/nohup-xxx.out: 记录后台启动日志;
#====================================================
# 项目名称
APPLICATION="${name}"
# 项目启动jar包名称
APPLICATION_JAR="${jarName}"
# 判断该服务是否已经启动,如果已经启动了,就不再重复运行了.
if [[ -z "$1" ]]
then
pid=$(ps ax |grep -i '${jarName}' |grep java | grep -v grep | awk '{print $1}')
else
pid=$(ps ax |grep -i '${jarName}' |grep java | grep -i 'server.port='''${1}''''| grep -v grep | awk '{print $1}')
fi
if [[ "$pid" ]] ; then
echo "监测到 \${APPLICATION} 服务正在运行中,将不再重复启动... [running...]"
exit 1;
fi
# bin目录绝对路径
BIN_PATH=$(cd `dirname $0`; pwd)
# 进入bin目录
cd `dirname $0`
# 返回到上一级项目根目录路径
cd ..
# 打印项目根目录绝对路径
# `pwd` 执行系统命令并获得结果
BASE_PATH=$(pwd)
# 项目日志输出绝对路径
LOG_DIR=\${BASE_PATH}"/logs"
# 项目启动日志输出绝对路径
LOG_STARTUP_PATH="\${LOG_DIR}/nohup-${name}.out"
# 如果logs文件夹不存在,则创建文件夹
if [[ ! -d "\${LOG_DIR}" ]]; then
mkdir "\${LOG_DIR}"
fi
# 当前时间
NOW_PRETTY=$(date +'%Y-%m-%m %H:%M:%S')
# 启动日志
STARTUP_LOG="================================= \${NOW_PRETTY} =================================\n"
#==========================================================================================
# JVM Configuration
# -Xmx1024m:设置JVM最大可用内存为256m,根据项目实际情况而定,建议最小和最大设置成一样。
# -Xms1024m:设置JVM初始内存。此值可以设置与-Xmx相同,以避免每次垃圾回收完成后JVM重新分配内存
# -Xmn512m:设置年轻代大小为512m。整个JVM内存大小=年轻代大小 + 年老代大小 + 持久代大小。
# 持久代一般固定大小为64m,所以增大年轻代,将会减小年老代大小。此值对系统性能影响较大,Sun官方推荐配置为整个堆的3/8
# -XX:MetaspaceSize=64m:存储class的内存大小,该值越大触发Metaspace GC的时机就越晚
# -XX:MaxMetaspaceSize=320m:限制Metaspace增长的上限,防止因为某些情况导致Metaspace无限的使用本地内存,影响到其他程序
# -XX:-OmitStackTraceInFastThrow:解决重复异常不打印堆栈信息问题
#==========================================================================================
JAVA_OPT="${vmOptions}"
JAVA_OPT="\${JAVA_OPT} -XX:-OmitStackTraceInFastThrow"
PROGRAM_ARGS="${programArgs}"
#=======================================================
# 将命令启动相关日志追加到日志文件
#=======================================================
# 输出项目名称
STARTUP_LOG="\${STARTUP_LOG}应用服务名称: \${APPLICATION}\n"
# 输出jar包名称
STARTUP_LOG="\${STARTUP_LOG}应用服务 jar 包名称: \${APPLICATION_JAR}\n"
# 输出项目根目录
STARTUP_LOG="\${STARTUP_LOG}应用服务根目录: \${BASE_PATH}\n"
# 输出项目bin路径
STARTUP_LOG="\${STARTUP_LOG}应用服务 bin 目录: \${BIN_PATH}\n"
# 打印日志路径
STARTUP_LOG="\${STARTUP_LOG}应用服务 log 目录: \${LOG_STARTUP_PATH}\n"
# 打印JVM配置
STARTUP_LOG="\${STARTUP_LOG}应用服务 JVM 配置: \${JAVA_OPT}\n"
# 打印程序参数配置
STARTUP_LOG="\${STARTUP_LOG}应用服务 参数配置: ${programArgs}\n"
# 打印启动命令
STARTUP_LOG="\${STARTUP_LOG}应用服务的启动命令: nohup java \${JAVA_OPT} -jar \${BASE_PATH}/\${APPLICATION_JAR} \${PROGRAM_ARGS} > \${LOG_STARTUP_PATH} 2>&1 &\n"
#======================================================================
# 执行启动命令:后台启动项目,并将日志输出到项目根目录下的logs文件夹下
#======================================================================
nohup java \${JAVA_OPT} -jar \${BASE_PATH}/\${APPLICATION_JAR} \${PROGRAM_ARGS} > \${LOG_STARTUP_PATH} 2>&1 &
# 进程ID
PID=$(ps -ef | grep "\${APPLICATION_JAR}" | grep -v grep | awk '{ print $2 }')
STARTUP_LOG="\${STARTUP_LOG}应用服务(进程ID: \${PID})正在后台启动中,请稍后一段时间访问本服务.\n"
STARTUP_LOG="\${STARTUP_LOG}=======================================================================================\n"
# 打印启动日志
echo -e \${STARTUP_LOG}
exit 0
| true |
2ee92d6a51f19eb8cc7ca640b41f0a0e4781f688 | Shell | ouwenpf/Bash-scripts | /work/complex.sh | UTF-8 | 1,749 | 2.921875 | 3 | [] | no_license | #!/bin/bash
#
logfile="/home/data/logs/nginx"
if [ -f $logfile/WR_pay.log ];then
>$logfile/log_tj/complex.log
echo '====支付请求IP============支付ID====================体现请求IP=================登录请求IP==================移动设备端==============='>> $logfile/log_tj/complex.log
a=(`egrep 'GET /pay/v1.alipay/index.html' $logfile/WR_pay.log|awk -F '["]' '{print $8}'|awk -F ',' '{print $1}'|awk 'OFS="-" {a[$1]++}END{for(i in a)print a[i],i}'|sort -rn|head -20`)
b=(`egrep 'GET /pay/v1.alipay/index.html' $logfile/WR_pay.log|egrep -o '(%2522uid%2522:.*7D)|(%22uid%22:.*})|(psub=1&uid=.*&version)'|sed 's/psub=/%/g'|sed 's/=/%/g'|awk -F '[%:}&]' '{print $4}'|awk 'OFS="-" {a[$1]++}END{for(i in a)print a[i],i}'|sort -rn|head -20`)
c=(`egrep -v 'GET /test.html' $logfile/WR_tx.log|awk -F '"' '{print $8}'|awk -F ',' '{print $1}'|awk 'OFS="-" {a[$1]++}END{for(i in a)print a[i],i}'|sort -rn|head -20`)
d=(`egrep 'GET /routertwo/serverlist2' $logfile/WR_game_api.log|awk -F '"' '{print $8}'|awk -F ',' '{print $1}'|awk 'OFS="-" {a[$1]++}END{for(i in a)print a[i],i}'|sort -rn|head -20`)
e=(`egrep 'GET /pay/v1.alipay/index.html' $logfile/WR_pay.log|grep -o '(.*)'|awk -F 'AppleWebKit' '{print $1}'|sed 's/ u;//gi'|sed 's/zh-cn;//g'|sed 's/cpu//gi'|sed 's/like//gi'|sed 's/;//g'|awk -F 'Build' '{print $1}'|awk -F '[ ]+' 'OFS="-" {print $2,$3,$4,$5}'|awk 'OFS="-" {a[$0]++}END{for(i in a)print a[i],i}'|sort -rn|head -20`)
for i in `seq 0 $[${#a[*]}-1]`
do
echo -e "${a[$i]} ${b[$i]} ${c[$i]} ${d[$i]} ${e[$i]}" |awk '{printf "%-25s%-25s%-25s%-25s%-25s\n",$1,$2,$3,$4,$5}' >> $logfile/log_tj/complex.log
done
less $logfile/log_tj/complex.log
else
echo "分析的文件不存在"
exit
fi
| true |
1afa8d673003bac9ae481334a42df293731d1367 | Shell | kdong2395/nanoNOMe | /analysis/data_parse/181108_gm12878_pool_mbed.sh | UTF-8 | 465 | 3 | 3 | [] | no_license | #!/bin/bash
root=/dilithium/Data/Nanopore/projects/nomeseq/analysis
outdir=$root/pooled/methylation/methbyread_all
d1=$root/pooled/methylation/methbyread_old
d2=$root/gm12878/ngmlr/mbed
samp=GM12878
for mod in cpg gpc;do
echo $mod
f1=$(find $d1 -name "*$samp*$mod*gz")
f2=$(find $d2 -name "181102*$samp*$mod*gz")
out=$outdir/$samp.$mod.pooled.meth.bed.gz
echo $out
gunzip -c $f1 $f2 |\
sort -k1,1 -k2,2n |\
bgzip > $out
tabix -p bed $out
done
| true |
877d725010c5c6cec0dbf4e713eb0b2adb30c186 | Shell | ricleite/IceStreamingService | /src/streamer_ffmpeg_hls_dash.sh | UTF-8 | 566 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# Streamer needs to start an ffmpeg instance
# for the sake of flexibility, it executes this shell script, and
# this shell script starts ffmpeg with user arguments
# it's somewhat ugly, but it beats having to parse ffmpeg options in code
# and it's better than passing everything as an environment variable
# $1 = video file path
# $2 = HLS/DASH end point info in "transport://ip:port/path" format
# (e.g rtmp://127.0.0.1:8080/hls_app/stream)
ffmpeg -re -i $1 -codec:v libx264 -vprofile baseline -g 30 \
-codec:a aac -strict -2 \
-f flv $2
| true |
9af124159b8bce4601724e02a04ce1ea9b9955c1 | Shell | SamuelLorrain/ProgrammingNotes | /sh_tuto/test.sh | UTF-8 | 1,508 | 3.640625 | 4 | [] | no_license | #!/usr/bin
#ATTENTIONS AUX ESPACES SUR LES TESTS
#man test pour d'autres types de tests
if [ $foo = "bar" ]
then
echo "if"
elif [ $foo = "baz" ];then
echo "elif"
else
echo "else"
fi
if [ $foo = "toast" ]; then
echo "then is on the same line. Need ;"
fi
#number comparaisons
if [ "$X" -lt "0" ];then
echo "X < 0"
elif [ "$X" -gt "0" ];then
echo "X > 0"
elif [ "$X" -eq "0" ];then
echo "X == 0"
elif [ "$X" -ge "10" ];then
echo "X >= 10"
elif [ "$X" -le "10" ];then
echo "X <= 10"
fi
#strings comparaisons
if [ "$Y" = "toast" ];then
echo "Y == 'toast'"
elif [ "$Y" != "toast" ];then
echo "Y != 'toast'"
fi
#others comparaisons
if [ -n "$Z" ];then
echo "Z est un string de longueur > 0"
elif [ -f "$Z" ];then
echo "Z est le path d'un fichier existant"
elif [ -x "$Z" ];then
echo "Z est le path d'un executable"
elif [ "$Z" -nt "/etc/password" ]; then
echo "Z est un fichier plus récent que /etc/password"
elif [ "$Z" -ot "/etc/password" ]; then
echo "Z est un fichier plus ancien que /etc/password"
elif [ "$Z" -d ]; then
echo "Z un path vers un directory"
elif [ "$Z" -r ]; then
echo "Z un path vers un fichier readable"
elif [ "$Z" -w ]; then
echo "Z un path vers un fichier writable"
fi
#shorcut if
[ $X -ne 0 ] && echo "X != 0" || echo "X == 0"
#while test
while [ -n "$X"]; do
echo "Enter some text (RETURN to quit)"
read X
if [ ! -n "$X" ]; then #if the string is non-zero
echo "You said: $X"
fi
done
| true |
1fff72473a4facc6d1cf55528f5b2c25df7d9bd9 | Shell | LLNL/scr | /examples/test_cleanup.sh | UTF-8 | 249 | 3.28125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# SCR cleanup script
# Usage:
# ./cleanup.sh [file]
# where file contains a list of items to remove
if [ $# -gt 1 ]; then
echo "Usage: $0 [output]"
exit 1
fi
rm -rf /dev/shm/$USER/scr.*/
if [ $# -eq 1 ]; then
rm -rf $1
fi
exit 0
| true |
a7261c53ef19f4a0d814b8b75ef440c2dde0a198 | Shell | jaspotsangbam/core-web | /docker/storage.sh | UTF-8 | 1,593 | 3.6875 | 4 | [] | no_license | #!/bin/bash
bash ./testing/printStoragePaths.sh
ignoring_return_value=$?
outputFolder="/usr/src/app/karma_html"
buckedProtocol="gs://"
# Do we have service account permissions
if [ -z "${GOOGLE_CREDENTIALS_BASE64}" ]
then
echo ""
echo "======================================================================================"
echo " >>> 'GOOGLE_CREDENTIALS_BASE64' environment variable NOT FOUND <<<"
echo "======================================================================================"
exit 1
fi
# Validating if we have something to copy
if [ -z "$(ls -A $outputFolder)" ]; then
echo ""
echo "================================================================"
echo " >>> EMPTY [${outputFolder}] FOUND <<<"
echo "================================================================"
exit 1
fi
echo $GOOGLE_CREDENTIALS_BASE64 | base64 -d - > $GOOGLE_CREDENTIALS_FILE_PATH
echo ""
echo " >>> Pushing reports and logs to [${buckedProtocol}${GOOGLE_STORAGE_JOB_COMMIT_FOLDER}] <<<"
echo " >>> Pushing reports and logs to [${buckedProtocol}${GOOGLE_STORAGE_JOB_BRANCH_FOLDER}] <<<"
echo ""
gcloud auth activate-service-account --key-file="${GOOGLE_CREDENTIALS_FILE_PATH}"
gsutil -m -q cp -a public-read -r ${outputFolder} ${buckedProtocol}${GOOGLE_STORAGE_JOB_COMMIT_FOLDER}
# When the bucket has the branch name we need to clean up the bucket first
gsutil -q rm ${buckedProtocol}${GOOGLE_STORAGE_JOB_BRANCH_FOLDER}/**
gsutil -m -q cp -a public-read -r ${outputFolder} ${buckedProtocol}${GOOGLE_STORAGE_JOB_BRANCH_FOLDER}
| true |
6ef6a965927a2190a6d6dd74575b8f8fbdd01035 | Shell | allenbross-wf/Arelle-1 | /scripts/runESMAtests.sh | UTF-8 | 2,138 | 2.609375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"MIT"
] | permissive | #!/bin/sh
rm -f /users/hermf/temp/ESMA-conf-*
OUTPUTLOGFILE=/users/hermf/temp/ESMA-conf-log.txt
OUTPUTERRFILE=/users/hermf/temp/ESMA-conf-err.txt
ARELLECMDLINESRC=/users/hermf/Documents/mvsl/projects/arelle/arelleproject/src/arelleCmdLine.py
PYTHON=python3.5
for f in \
https://www.esma.europa.eu/sites/default/files/bzwbk_2016.zip \
https://www.esma.europa.eu/sites/default/files/bonetherapeutics_2016.zip \
https://www.esma.europa.eu/sites/default/files/comarch_2016.zip \
https://www.esma.europa.eu/sites/default/files/enel_2016.zip \
https://www.esma.europa.eu/sites/default/files/erstegroup_2016.zip \
https://www.esma.europa.eu/sites/default/files/ferrovial_2016.zip \
https://www.esma.europa.eu/sites/default/files/generali_2016.zip \
https://www.esma.europa.eu/sites/default/files/genomicvision_2016.zip \
https://www.esma.europa.eu/sites/default/files/imerys_2016.zip \
https://www.esma.europa.eu/sites/default/files/komercnibanka_2016.zip \
https://www.esma.europa.eu/sites/default/files/helaba_2016.zip \
https://www.esma.europa.eu/sites/default/files/leoexpress_2016.zip \
https://www.esma.europa.eu/sites/default/files/molgroup_2016_0.zip \
https://www.esma.europa.eu/sites/default/files/nelja_2016.zip \
https://www.esma.europa.eu/sites/default/files/nationalbankofgreece_2016.zip \
https://www.esma.europa.eu/sites/default/files/ontex_2016.zip \
https://www.esma.europa.eu/sites/default/files/orangepolska_2016.zip \
https://www.esma.europa.eu/sites/default/files/siemens_2016.zip \
https://www.esma.europa.eu/sites/default/files/ucb_2016.zip \
https://www.esma.europa.eu/sites/default/files/uniqa_2016.zip \
https://www.esma.europa.eu/sites/default/files/upm_2016.zip \
https://www.esma.europa.eu/sites/default/files/valmet_2016.zip
do
echo file: $f
$PYTHON $ARELLECMDLINESRC --file $f --plugins validate/ESMA --disclosureSystem esma --validate --packages '/Users/hermf/downloads/IFRST_2017-03-09.zip|/Users/hermf/Documents/mvsl/projects/ESMA/ESMA_ESEF_Taxonomy_Draft.zip' --logFile "$OUTPUTLOGFILE" 2> "$OUTPUTERRFILE"
done
| true |
cc0a04563058e84f21bb4c9b39206d771966edd6 | Shell | hofmeist/schism-setups | /arctic/mistral/run_scaling.sh | UTF-8 | 2,510 | 2.8125 | 3 | [] | no_license | #!/bin/bash
#SBATCH --job-name=arcticscale # Specify job name
#SBATCH --comment="SCHISM compiled with intel17 and intelmpi"
#SBATCH --partition=compute2 # Specify partition name
### --ntasks=192
#SBATCH --ntasks=252
#SBATCH --ntasks-per-node=36
#SBATCH --time=00:50:00 # Set a limit on the total run time
#SBATCH --wait-all-nodes=1 # start job, when all nodes are available
#SBATCH --mail-type=FAIL # Notify user by email in case of job failure
#SBATCH --mail-user=richard.hofmeister@hzg.de # Set your e−mail address
#SBATCH --account=gg0877 # Charge resources on this project account
#SBATCH --output=log.o # File name for standard output
#SBATCH --error=log.e # File name for standard error output
# for n-tasks=36 and hourly output: 24h in 0.25 h
# ntasks= 36: 24h in 794s
# ntasks= 72: 24h in 413s
# ntasks= 144: 24h in 238s
# ntasks= 252: 24h in 145s
# ntasks= 540: 24h in 90s
# ntasks=1080: 24h in 69s
## use Intel MPI
module load intel/17.0.1
module load intelmpi/5.1.3.223
export I_MPI_PMI_LIBRARY=/use/lib64/libmpi.so
module load python/2.7-ve0
id="$1"
outpath=/work/gg0877/hofmeist/arctic/scaling/$id
mkdir -p $outpath
rm -rf $outpath/*
mkdir -p $outpath/outputs
rm -f outputs
ln -sf $outpath/outputs outputs
# set runtime and get prevyear
timestep=240
rnday=61 # (1 day in month 2012-03)
ihfskip=21960
nspool=360
cp param.default param.in
sed -i -- "s/MY_RNDAY/$rnday/g" param.in
sed -i -- "s/MY_IHFSKIP/$ihfskip/g" param.in
sed -i -- "s/MY_NSPOOL/$nspool/g" param.in
sed -i -- "s/MY_HOTOUT_WRITE/$ihfskip/g" param.in
sed -i -- "s/MY_DT/$timestep/g" param.in
# run the model
# --distribution=block:cyclic bind tasks to physical cores
for i in {1..9} ; do
touch outputs/staout_${i}
done
# disable ramps here
sed -i -- 's/MY_NRAMP_SS/0/g' param.in
sed -i -- 's/MY_NRAMPWIND/0/g' param.in
sed -i -- 's/MY_NRAMPBC/0/g' param.in
sed -i -- 's/MY_NRAMP_/0/g' param.in
sed -i -- 's/MY_ICELEV/0/g' param.in
sed -i -- 's/MY_IHOT/2/g' param.in
# copy parameter namelists
cp param.in $outpath
cp bctides.in $outpath
cp vgrid.in $outpath
cp fabm.nml $outpath 2> /dev/null
cp ice.nml $outpath 2> /dev/null
ulimit -c 0
srun -l --propagate=STACK,CORE --cpu_bind=verbose,cores --distribution=block:cyclic ~/schism/svn-code/trunk/icebuild/bin/pschism
#srun -l --propagate=STACK --cpu_bind=verbose,cores --distribution=block:cyclic ~/schism/svn-code/trunk/pebuild/bin/pschism
# move log files
mv fort.* mirror.out $outpath
# wait until all nodes/file-actions are settled
wait
| true |
846fd24b3f45926c6f910f22f823a5212081bdf3 | Shell | amosbird/serverconfig | /scripts/showstalonetray.sh | UTF-8 | 918 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env bash
toggle() {
workspace=$(bspc query -D -d focused --names)
id=$(cat /tmp/stalonetray)
if [[ -z $id ]]; then
exit 0
fi
if bspc query -N -n focused | grep -q "$(bspc query -N -n "$id")"; then
bspc node "$id".window -g hidden -f
exit 0
else
bspc node "$id" --to-desktop "$workspace"
bspc node "$id" -t floating
bspc node "$id".window -g hidden=off -f
bspc node "$id" -l above
fi
}
if ! xwininfo -name "stalonetray" >/dev/null 2>&1; then
rm -f /tmp/stalonetray
case $(hostname) in
abt480)
stalonetray --icon-size=48 --kludges=force_icons_size &>/tmp/stalonetray.log &
;;
*)
stalonetray --icon-size=96 --kludges=force_icons_size &>/tmp/stalonetray.log &
;;
esac
if (($#)); then
sleep 0.2
toggle
fi
else
toggle
fi
| true |
1644b5b958dc81cef96cab1a04b2f77ffe535d4b | Shell | pseudomind/neuik-go | /scripts/update_neuik_srcs.sh | UTF-8 | 1,335 | 3.796875 | 4 | [
"ISC"
] | permissive | #!/usr/bin/env bash
#------------------------------------------------------------------------------#
# This update script is included only to simplify the process of pulling the #
# current libneuik sources and header files from the upstream repository. This #
# script should never need to be run by a developer using `neuik-go`. #
#------------------------------------------------------------------------------#
thisDir=`pwd`
dirBase=`basename $thisDir`
if [ ! "$dirBase" = 'scripts' ]; then
echo 'This script should only be run within the `scripts` directory;'\
'Aborting.'
exit
fi
if [ ! "$CLEAN" = '1' ]; then
echo 'Pulling latest source/header files from upstream NEUIK repository...'
fi
echo 'Removing copies of old copies of C source & header files'
rm -f ../neuik/*.c
rm -f ../neuik/include/*.h
echo 'Deleting the existing subproject folder'
if [ -d 'subproject' ]; then
rm -rf 'subproject'
fi
if [ "$CLEAN" = '1' ]; then
exit
fi
echo 'Creating a new subproject folder'
mkdir 'subproject'
echo 'Getting a copy of the latest NEUIK sources'
cd subproject
git clone https://github.com/pseudomind/neuik.git
echo 'Copying over the latest NEUIK C source & header files'
cp neuik/lib/*.c ../../neuik
cp neuik/include/*.h ../../neuik/include
cd ..
if [ -d 'subproject' ]; then
rm -rf 'subproject'
fi
| true |
2a9a356277ee6b277af963eb7ca08bae3ca45c27 | Shell | DKrul/cloudstackOps | /xenserver_post_empty_script.sh | UTF-8 | 2,363 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
echo "This is the post_empty script you could customise."
#echo "Downgrading openvswitch RPM to the XenServer default"
rpm -Uvh http://10.200.10.10/software/xenserver/openvswitch-1.4.6-143.9926.i386.rpm --force --nodeps
echo "Applying patches"
HOST_UUID=$(xe host-list name-label=${HOSTNAME} --minimal)
# Check for 6.5
cat /etc/redhat-release | grep "6.5"
if [ $? -eq 0 ]; then
SERVICE_PACK_PATCH=XS65ESP1
fi
# Check for 6.2
cat /etc/redhat-release | grep "6.2"
if [ $? -eq 0 ]; then
SERVICE_PACK_PATCH=XS62ESP1
fi
# First apply SP1
xe patch-list name-label=${SERVICE_PACK_PATCH} params=hosts --minimal | tr ',' '\n' | grep ${HOST_UUID}
if [ $? -eq 0 ]; then
echo Service Pack ${SERVICE_PACK_PATCH} is already installed, skipping.
else
echo Installing ${SERVICE_PACK_PATCH}...
PATCH_UUID=$(xe patch-list name-label=${SERVICE_PACK_PATCH} | grep uuid | sed -e 's/^.*: //g')
if [ ${PATCH_UUID} ]; then
xe patch-apply uuid=${PATCH_UUID} host-uuid=${HOST_UUID}
fi
fi
# Apply any other available patch
XEN_ALL_PATCHES=$(xe patch-list params=name-label --minimal | tr ',' '\n' )
XEN_INSTALLED_PATCHES=$(xe patch-list hosts:contains=${HOST_UUID} params=name-label --minimal | tr ',' '\n' )
for patch in ${XEN_ALL_PATCHES}; do
echo "Checking patch " ${patch}
# Check if already included
echo ${XEN_INSTALLED_PATCHES} | grep ${patch} 2>&1 >/dev/null
if [ $? -eq 0 ]; then
echo Patch ${patch} is already installed, skipping.
else
echo Installing $patch...
PATCH_UUID=$(xe patch-list name-label=${patch}| grep uuid | sed -e 's/^.*: //g')
if [ ${PATCH_UUID} ]; then
xe patch-apply uuid=${PATCH_UUID} host-uuid=${HOST_UUID}
fi
fi
done
echo "Upgrading drivers"
yum -y install bnx2x-* fnic* qla2* glnic* qlge* tg3* openvswitch-modules-xen*
if [ $? -eq 0 ]; then
echo "Yum commnand returned non-zero"
exit 1
fi
SYSTEM_HARDWARE=$(dmidecode -s system-product-name | grep -v "#")
if [[ "${SYSTEM_HARDWARE}" == "ProLiant DL380 G7" ]]; then
echo "Skip HPSA on HP ProLiant DL380 G7 or else the box won't boot"
else
yum -y install hpsa*
if [ $? -eq 0 ]; then
echo "Yum commnand returned non-zero"
exit 1
fi
fi
yum -y upgrade nicira-ovs-hypervisor-node
if [ $? -eq 0 ]; then
echo "Yum commnand returned non-zero"
exit 1
fi | true |
b113117526e03b5ca0dd9518a7ee2887fbb1ead5 | Shell | fivestars/git-hooks | /included/hooks/pre-commit/jira-protect-branch | UTF-8 | 1,704 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env bash
set -eu -o pipefail
: <<DESC
Prevent accidental commits to protected branches
DESC
# Get our useful functions (be sure to provide lib path as source argument)
# shellcheck source=included/lib/jira.sh
. "$(dirname "${BASH_SOURCE[@]}")/../../lib/jira.sh" "$(dirname "${BASH_SOURCE[@]}")/../../lib"
: <<HELP
Mark branches as protected by adding them to a ${c_value}.protected${c_reset} file in the top-level
repository directory. By default, only ${c_value}master${c_reset} is protected. This works well if
run as a precursor to ${c_value}pre-commit/jira-branch-name${c_reset}.
HELP
if ! commit_in_progress; then
printf "${c_action}%s${c_reset}\\n" "Not a new commit, nothing to be done"
exit 0
fi
branch=$(git symbolic-ref --short HEAD)
if is_protected_branch "$branch"; then
printf "${c_prompt}%s${c_value}%s${c_prompt}%s${c_reset}" "Do you really want to commit directly to protected branch " "$branch" "? ([y]es/[n]o): "
read -r response
case $response in
yes|y)
printf "${c_action}%s${c_value}%s${c_reset}\\n" "Committing to protected branch " "$branch"
;;
*)
printf "${c_action}%s${c_reset}\\n" "Committing to new branch"
if [[ "${branch}" == "master" ]] && ! git rev-parse --verify --quiet master &>/dev/null; then
# This appears to be a brand new repository and we're commiting our first commit.
# Go ahead and create the branch so we can have a base for our new feature branch.
git checkout -b master &>/dev/null
fi
move_to_branch "$(jira_ensure_conforming_branch_name "$(jira_get_new_branch_name)")" ;;
esac
fi
| true |
ff8dcd1f1d7244167da2af7711ec8f1f76e6fe4c | Shell | astroumd/astromake | /rc/casa.sh | UTF-8 | 2,036 | 3.390625 | 3 | [] | no_license |
# Although the official way is to untar a casa tar ball and either
# use the absolute path path_to_casa/casapy, or add path_to_casa to
# your $PATH, we do a little more here:
# set CASAPATH to the root directory ($a_root) [note CASA has a quad-convention]
# add $a_root to $PATH
# add $a_root/lib/casapy/bin to $PATH (version 4.2.x and below)
# add $a_root/lib/casa/bin to $PATH
# add $a_root/lib to $LD_LIBRARY_PATH
#
# See also https://help.nrao.edu/index.php?/Tickets/Ticket/View/4777
#
dir=$ASTROMAKE/opt/casa
# a_version is an astromake variable
# a_root is used in this script
unset a_root
if [ ${#a_version[@]} -gt 0 ]; then
a_root=$dir/${a_version}
elif [ -e $dir/VERSIONS ]; then
version=`head -1 $dir/VERSIONS`
a_root=$dir/${version}
elif [ -e $dir/`cat $ASTROMAKE/status/casa` ]; then
a_root=$dir/`cat $ASTROMAKE/status/casa`
fi
if [ -e ${a_root} ]; then
# this adds commands like carmafiller and ipython
if [ -d ${a_root}/lib/casapy/bin ]; then
PATH=${a_root}/lib/casapy/bin:$PATH
elif [ -d ${a_root}/lib/casa/bin ]; then
PATH=${a_root}/lib/casa/bin:$PATH
fi
if [ -z ${LD_LIBRARY_PATH} ]; then
export LD_LIBRARY_PATH=${a_root}/lib/:${LD_LIBRARY_PATH}
else
export LD_LIBRARY_PATH=${a_root}/lib
fi
# add the main thing (skip a_root/bin for now, they are symlinks)
PATH=${a_root}/bin:${a_root}:${PATH}
if [ -d ${a_root}/etc/carta/notyet ]; then
echo Warning: loading CARTA
path=(${a_root}/etc/carta/bin ${path})
export LD_LIBRARY_PATH=$a_root/etc/carta/lib:$LD_LIBRARY_PATH
fi
hash -r
# setenv CASADATA $a_root/data # this appears useless, use CASAPATH
# CASAPATH is a 4 word env var that is set when casapy starts up,
# root_dir xxx yyy zzz
# CASAROOT is our astromake invention
export CASAPATH="${a_root} linux socorro `hostname`"
export CASAROOT=${a_root}
else
echo BAD ${a_root}
fi
#unset dir
#unset a_root
| true |
0124c26e12cf2a03ba7a1220cdc6a8f92a0f6609 | Shell | TipsyPixie/dotfiles | /git/git-prune-branches | UTF-8 | 284 | 2.9375 | 3 | [] | no_license | #! /usr/bin/env sh
# shellcheck disable=SC1090
. "$(dirname "$0")/check-git"
git fetch --all --prune || exit 1
git for-each-ref --format '%(refname:short) %(upstream:track)' | grep -E ' \[gone\]$' | cut -f 1 -d ' ' | while read -r GONE_BRANCH; do git branch -D "$GONE_BRANCH"; done
| true |
e4696ea04ed41d95f2619edeed931586d113125c | Shell | andreibaranouski/misc | /centos_x64--100_01--mobile_functional_android.conf | UTF-8 | 2,818 | 2.765625 | 3 | [] | no_license | #!/bin/bash
#PLATFORM=CentOS_release_5.8
#export PATH=/usr/local/bin:$PATH
export ANDROID_HOME=/home/jenkins/android-studio/sdk/
export PATH=$PATH:$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools
export
/sbin/ifconfig
export DISPLAY=:1
killall emulator64-arm
emulator64-arm -avd test -netspeed full -netdelay none &
AUT_DIR=${WORKSPACE}/app-under-test
#if [[ -e ${AUT_DIR} ]] ; then rm -rf ${AUT_DIR} ; fi
rm -rf ${WORKSPACE}/*
mkdir -p ${AUT_DIR}
echo "#-------------------------------------------- build sync_gateway"
export GOROOT=$HOME/go
export PATH=$PATH:$GOROOT/bin
cd ${AUT_DIR}
git clone https://github.com/couchbase/sync_gateway.git
cd ${AUT_DIR}/sync_gateway
git checkout ${sync_gatewa_branch}
git submodule init
git submodule update
./build.sh
echo "#-------------------------------------------- build LiteServAndroid"
cd ${AUT_DIR}
git clone https://github.com/couchbaselabs/LiteServAndroid.git
cd ${AUT_DIR}/LiteServAndroid
git checkout ${LiteServ_branch}
echo "sdk.dir=/home/jenkins/android-studio/sdk/" > local.properties
git submodule init && git submodule update
git status
git log -3
./gradlew clean && ./gradlew build
echo "./run_android_liteserv.sh 8080"
./run_android_liteserv.sh 8080
env | sort
echo "#-------------------------------------------- build cblite-tests"
export SYNCGATE_PATH=${AUT_DIR}/sync_gateway/bin/sync_gateway
export LITESERV_PATH=${AUT_DIR}/LiteServAndroid
cd ${WORKSPACE}
if [[ ! -d cblite-tests ]] ; then git clone https://github.com/couchbaselabs/cblite-tests.git ; fi
cd cblite-tests
git pull
git show --stat
mkdir -p tmp/single
export CC="gcc44"
export CXX="g++44"
npm install 2>&1 > ${WORKSPACE}/npm_install.log
echo "===================================================================================== killing any hanging com.couchbase.liteservandroid apps"
#adb shell am force-stop com.couchbase.liteservandroid
# echo ===================================================================================== starting ${LITESERV_PATH}
# ${LITESERV_PATH} | tee ${WORKSPACE}/liteserv.log &
# echo ===================================================================================== starting ./node_modules/.bin/tap
export TAP_TIMEOUT=500
# ./node_modules/.bin/tap ./tests 1> ${WORKSPACE}/results.log 2> ${WORKSPACE}/gateway.log
echo ===================================================================================== starting npm
CONF_FILE=local_android npm test
#> ${WORKSPACE}/results.log 2> ${WORKSPACE}/gateway.log
echo ===================================================================================== killing any hanging com.couchbase.liteservandroid apps
adb shell am force-stop com.couchbase.liteservandroid || true
echo ===================================================================================== DONE
| true |
00b23c0b54e5d35738a37b503a1225c221186929 | Shell | Tritlo/GRACe | /docker_dist/build.sh | UTF-8 | 540 | 2.84375 | 3 | [] | no_license | #!/bin/sh
mkdir bins
# Pick up executables, copy to bins
find "../$(stack path --dist-dir)/build" -type f -perm -u=x,g=x,o=x | \
xargs -I % cp % bins
# Get dependencies
git clone https://github.com/MiniZinc/libminizinc.git
svn --username anonymous checkout https://svn.gecode.org/svn/gecode/tags/release-5.0.0
# Build image
## When building server image:
# docker build -t eugraceful/grace-server .
## When building examples
docker build -t eugraceful/grace-examples .
# Clean up
echo Removing
rm -rf bins libminizinc release-5.0.0
| true |
6e43b2d336c587598b3e157f6bcc5e5a41803530 | Shell | gtbai/PyFFM | /run_scripts/sync_repo.sh | UTF-8 | 306 | 2.921875 | 3 | [] | no_license | #!/bin/zsh
# copy repo to each worker
cd ~
for idx in {2..4}
do
worker_name=cp-$idx
echo "Sending repo to $worker_name..."
ssh $worker_name "rm -rf /mnt/project/DistributedFFM"
scp -r /mnt/project/DistributedFFM $worker_name:/mnt/project
## ssh $worker_name "source setup_node.sh"
done
| true |
34e13b2551b06c3a8b1f14c1b4915f5b7ed32938 | Shell | x2x4com/my-script | /lbt/jboss.sh | UTF-8 | 634 | 3.328125 | 3 | [] | no_license | #!/bin/sh
JAVA_HOME=/usr/local/java
PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin
CLASSPATH=$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
LANG=zh_CN.GB18030
jboss_home=/usr/local/jboss
jboss_log=/var/log/jboss_tty_out.log
case "$1" in
"start")
echo "Start Jboss..."
/usr/bin/sudo -u jboss $jboss_home/jboss/bin/run.sh 2&>1 >> jboss_log=/var/log/jboss_tty_out.log &
;;
"stop")
echo "Stop Jboss..."
for i in `ps -ef | grep jboss | grep java | grep -v grep | awk '{print $2}'`
do
kill -9 $i
done
;;
*)
echo "Usage : `basename $0` {start | stop}"
exit 0
;;
esac
| true |
8a625ab909fd194cf20fdeec2b4d265c497a6ea6 | Shell | Stormbase/django-cookiecutter | /{{cookiecutter.project_slug}}/script/makemessages | UTF-8 | 398 | 2.859375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #! /bin/sh
# script/makemessages: Generate new .po translation files
set -e
RED='\033[0;31m'
NC='\033[0m' # No Color
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
BLUE='\033[0;34m'
WHITE='\033[1;37m'
echo -e "\n${BLUE}==> Running django-admin makemessages…${NC}"
cd {{cookiecutter.project_slug}} DJANGO_SETTINGS_MODULE={{cookiecutter.project_slug}}.settings.develop && django-admin makemessages --all | true |
9cc9885912e7464e2e0da377dcbb703a3e47048c | Shell | mjgs/dotfiles | /bin/install/osx/installs/brew.sh | UTF-8 | 1,676 | 4.09375 | 4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/sh
#
# Description: installs brew packages
#
if [ -n "$DEBUG" ]; then
echo "$0: Setting bash option -x for debug"
PS4='+($(basename ${BASH_SOURCE}):${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
set -x
fi
# Exit on error
set -e; set -o pipefail
PFX=${PFX:-==>}
HOME=${HOME:?}
DOTFILES_DIR=${DOTFILES_DIR:?}
HOMEBREW_URL=${HOMEBREW_URL:?}
PACKAGES=(
ack
awscli
cmake
git
git-extras
mongodb-community@4.0
httpie
heroku
iperf
jq
neovim
openssl
redis
tree
watch
wget
doctl
)
function installHomebrew() {
echo "$PFX Installing homebrew..."
if [ ! -x /usr/local/bin/brew ]; then
echo "$PFX Homebrew url: $HOMEBREW_URL"
ruby -e "$(curl -fsSL $HOMEBREW_URL)"
else
echo "$PFX Homebrew already installed, skipping..."
fi
}
function addHomebrewTaps() {
echo "$PFX Adding homebrew taps..."
brew tap mongodb/brew
brew tap heroku/brew
}
function installHomebrewPackages() {
echo "$PFX Installing homebrew packages..."
for PACKAGE in "${PACKAGES[@]}"; do
echo "$PFX Installing package: $PACKAGE"
brew install $PACKAGE
configurePackage $PACKAGE
echo "$PFX Install complete: $(date +"%Y-%m-%d-%H%M%S")"
done
}
function configurePackage() {
local PACKAGE=$1
local CONFIGURATIONS_DIR=$DOTFILES_DIR/bin/install/osx/configurations/cli
local CONFIGURATION_SCRIPT_NAME=$(echo $PACKAGE | cut -d@ -f1).sh
local CONFIGURATION_SCRIPT=$CONFIGURATIONS_DIR/$CONFIGURATION_SCRIPT_NAME
if [ -e $CONFIGURATION_SCRIPT ]; then
$CONFIGURATION_SCRIPT
else
echo "$PFX No configuration script..."
fi
}
#
# Main
#
installHomebrew
addHomebrewTaps
installHomebrewPackages
exit 0
| true |
4ed78e3643beb43c9648a1c04e2b9b39e27bb344 | Shell | codazoda/joeldare-blog | /admin.sh | UTF-8 | 840 | 3.875 | 4 | [] | no_license | #!/bin/bash
COLS=$(tput cols)
clear
echo $COLS wide
echo ---[ Blog Admin ]---
echo
echo [S] Start Local Server
echo [T] Test in Browser
echo [G] Generate Site
echo [P] Pull on Server
echo [Q] Quit
echo [D] Deploy with Add and Commit
echo
printf "> "
read -n 1 CHAR # Read a single character and be silent
echo
echo
case $CHAR in
s)
hugo serve &
;;
t)
open http://localhost:1313/blog/
;;
g)
hugo
git status
;;
p)
echo Connecting to joeldare.com.
ssh root@joeldare.com "cd /var/www/joeldare-blog;git pull"
;;
d)
echo Adding, committing, and deploying to joeldare.com.
hugo
git add .
git commit -m 'add/edit a post and regenerate'
git push
ssh root@joeldare.com "cd /var/www/joeldare-blog;git pull"
;;
q)
echo Then why even start?
echo
;;
esac
| true |
e62bd743386242ae6c04ec4df052364c0ffca468 | Shell | dpb587/upstream-blob-mirror | /ci/tasks/update/execute.sh | UTF-8 | 445 | 2.9375 | 3 | [] | no_license | #!/bin/bash
set -eu -o pipefail
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../../../.."
export GIT_COMMITTER_NAME=Concourse
export GIT_COMMITTER_EMAIL=concourse.ci@localhost
export GIT_AUTHOR_NAME="${GIT_AUTHOR_NAME:-$GIT_COMMITTER_NAME}"
export GIT_AUTHOR_EMAIL="${GIT_AUTHOR_EMAIL:-$GIT_COMMITTER_EMAIL}"
git clone --quiet file://$PWD/repo repo-output
export GIT_REMOTE_URI=$( cd repo ; git remote get-url origin )
cd repo-output
./bin/all
| true |
6138ca769a5b4eea9661a4351e9daa2dc55a6c54 | Shell | 9sako6/dotfiles | /dist/.zshenv | UTF-8 | 2,652 | 2.734375 | 3 | [] | no_license | # /etc/profile を読み込まない設定
# 勝手に読み込まれるとPATH先頭に/usr/binが来てanyenvで入れた*envのPATHが読み込まれない
setopt no_global_rcs
# local bin
export PATH="/usr/local/bin:${PATH}"
# Settings for rbenv
export RBENV_ROOT="$HOME/.rbenv"
if [ -d "$RBENV_ROOT" ]; then
export PATH="$RBENV_ROOT/bin:$PATH"
if [ -d "${HOME}/.rbenv/bin/rbenv" ]; then
eval "$(~/.rbenv/bin/rbenv init - zsh)"
else
eval "$(rbenv init - --no-rehash)"
fi
fi
# Settings for pyenv
export PYENV_ROOT="$HOME/.pyenv"
if [ -d "$PYENV_ROOT" ]; then
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init - --no-rehash)"
fi
eval "$(pyenv init --path)"
# Settings for nodebrew
export PATH="$PATH:$HOME/.nodebrew/current/bin"
# PATH general
export PATH="$PATH:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/TeX/texbin"
# Setting for original commands
export PATH="$PATH:$HOME/mybin"
# Setting for fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# Setting for nvm
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# Rust
export PATH="$HOME/.cargo/bin:$PATH"
# Cargo
. "$HOME/.cargo/env"
# go
export GOPATH="$HOME/go"
export PATH="$GOPATH/bin:$PATH"
export PATH="$PATH:/usr/local/go/bin"
# goenv
export GOENV_ROOT="$HOME/.goenv"
export PATH="$GOENV_ROOT/bin:$PATH"
if type goenv > /dev/null 2>&1; then
eval "$(goenv init -)"
export PATH="$PATH:$GOPATH/bin"
fi
# deno
export DENO_INSTALL="$HOME/.local"
export PATH="$DENO_INSTALL/bin:$PATH"
# gnu-getopt
export PATH="/usr/local/opt/gnu-getopt/bin:$PATH"
# opam configuration
test -r /Users/9sako6/.opam/opam-init/init.zsh && . /Users/9sako6/.opam/opam-init/init.zsh > /dev/null 2> /dev/null || true
# depot_tools for Chromium
# https://chromium.googlesource.com/chromium/src/+/main/docs/mac_build_instructions.md#install
export PATH="$PATH:$HOME/ghq/chromium.googlesource.com/chromium/tools/depot_tools"
# protobuf
export PATH="$PATH:/usr/local/protobuf/bin"
# K8s auto-complete
autoload -U +X compinit && compinit
source <(kubectl completion zsh)
# Dart
export PATH="$PATH":"$HOME/.pub-cache/bin"
# Set PATH, MANPATH, etc., for Homebrew.
[ -f '/home/linuxbrew/.linuxbrew/bin/brew' ] && eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
# minikube
eval $(minikube -p minikube docker-env)
# aqua
export PATH="${AQUA_ROOT_DIR:-${XDG_DATA_HOME:-$HOME/.local/share}/aquaproj-aqua}/bin:$PATH"
export AQUA_GLOBAL_CONFIG=${AQUA_GLOBAL_CONFIG:-}:${XDG_CONFIG_HOME:-$HOME/.config}/aquaproj-aqua/aqua.yaml
| true |
e4cf4c942f02629f27b8b2709cf50e18acab35ce | Shell | hrimfaxi/faketool | /usb_img.sh | UTF-8 | 271 | 2.65625 | 3 | [] | no_license | #!/bin/sh
FAKE_ROOT=/home/hrimfaxi/.fake/
date >> $FAKE_ROOT/usb.log
UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
echo dd if=$1 of=$FAKE_ROOT/$(basename $1)-$UUID bs=32M >> $FAKE_ROOT/usb.log
dd if=$1 of=$FAKE_ROOT/$(basename $1)-$UUID bs=32M
| true |
3c85a4ffcea34003742c805e5de8359e36895475 | Shell | vstenvers/PRACTICE_DAY9 | /GITHUB_TEST/clean.sh | UTF-8 | 254 | 2.9375 | 3 | [] | no_license | #! /bin/bash
# BONUS
# Author: Vanessa Stenvers
# Date: 17-01-2019
# Purpose: Remove the DATA and RESULTS folders from the GITHUB_TEST subdirectories
for d in ANNOTATION_1 ALPHABET_2 NUMBERS_3; do
cd $d
for i in DATA RESULTS; do
rm -rf $i
done
cd ../
done
| true |
4f82005b4d15688ca54829159b47bbb6a0f29aa4 | Shell | pwcazenave/data-processing | /PhD/scratch/backups/access/data/diff_grids/transect/gebco_mike_transect.sh | UTF-8 | 4,723 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# script to plot difference between GEBCO, CMAP and ETOPO.
gmtset D_FORMAT=%g
gmtset PLOT_DEGREE_FORMAT=F
gmtset ANNOT_FONT_SIZE=12
gmtset LABEL_FONT_SIZE=12
gmtset HEADER_FONT_SIZE=14
gmtset ANNOT_FONT_SIZE_SECONDARY=12
area=-R-10/5/47.5/52.5
proj=-Jm1
gebco_1min=../../gebco/plot/gebco_bathy.grd
etopo_1min=../../etopo/etopo_bathy_resampled_1min.grd
cmap_1min=../../cmap/grids/cmap_bathy.grd
outfile=../images/gebco_vs_etopo_transect.ps
# make a transect (fairly long)
slat=49.3
slong=-6.5
elat=50.5
elong=1
echo -n "making the profile... "
project -C$slong/$slat -E$elong/$elat -G1 -Q > ./onemin_transect.trk
echo "done."
# get the values from the grid
echo -n "take the depth values from the grids... 1 "
grdtrack ./onemin_transect.trk -G$gebco_1min -S > ./g_onemin_transect.pfl
echo -n "2 "
grdtrack ./onemin_transect.trk -G$etopo_1min -S > ./e_onemin_transect.pfl
echo -n "3 "
grdtrack ./onemin_transect.trk -G$cmap_1min -S > ./c_onemin_transect.pfl
echo "done."
# plot the profiles
echo -n "plot the profiles... "
p_area=$(awk '{print $3, $4}' ./g_onemin_transect.pfl | minmax -I5)
p_proj=-JX15c/9c
awk '{print $3, $4}' ./g_onemin_transect.pfl | \
psxy $p_area $p_proj \
-Ba100f50g100:,-km::"Distance along line":/a20f10g20:,-m::"Depth":WSn \
-K -Xc -Y18 -W3/50/50/200 -P > $outfile
#awk '{print $3, $4}' ./e_onemin_transect.pfl | \
# psxy $p_area $p_proj -B0 -O -K -W3/50/200/50 >> $outfile
# add in the mike mesh output profile
echo -n "adding cmap... "
#psxy $p_area $p_proj -B0 -O -K -W3/0/0/0 ./MIKE_transect.txt >> $outfile
#psxy $p_area $p_proj -B0 -O -K -W3/100/100/100 ./MIKE_transect_3.txt >> $outfile
awk '{print $3, $4}' ./c_onemin_transect.pfl | \
psxy $p_area $p_proj -B0 -O -K -W3/200/150/150 >> $outfile
# add labels
end_x=$(echo "scale=4; $(echo $p_area | cut -f2 -d"/")-30" | bc -l)
pstext -N $p_area $p_proj -O -K -W255/255/255 << LABELS >> $outfile
10 -30 12 0 0 1 A
$end_x -30 12 0 0 1 B
LABELS
echo "done."
# add in the error using a subsampled version of the 1 minute profile
#echo -n "calculating errors... "
#awk '{print $3, $4}' ./g_onemin_transect.pfl > sub1.pfl
#echo -n "formatting... "
#awk '{print $3, $4}' ./e_onemin_transect.pfl > sub2.pfl
#paste sub1.pfl sub2.pfl | awk '{print $1, $2-$4}' > eg_error.pfl
#d_area=$(minmax ./eg_error.pfl -I3)
#echo -n "plotting... "
#psxy $d_area $p_proj -O -K -B0/a5f2.5E -W2/200/50/50 eg_error.pfl >> $outfile
## add in a red axis label
#pstext -N -O -K $p_area $p_proj -G200/50/50 << RED_LABEL >> $outfile
#625 -99 12 90 0 1 Difference in depth values (m)
#RED_LABEL
#echo "done."
# plot the profile on a map
echo -n "add the location... "
grdimage $area $proj -Ba1f0.5g1WeSn -O -K -C../../gebco/plot/shelf.cpt ../../gebco/plot/gebco_bathy.grd -Y-13 -I../../gebco/plot/gebco_grad.grd >> $outfile
# add a coastline for the area
pscoast $area $proj -Ba5f2.5g5WeSn -Df -G0/0/0 -O -K -N1/255/255/255 -W1/255/255/255 >> $outfile
# add the profile location
psxy ./onemin_transect.trk $area $proj -W5/255/255/255 -B0 -O -K \
>> $outfile
psxy ./onemin_transect.trk $area $proj -W3/0/0/0 -B0 -O -K \
>> $outfile
echo "done."
# label the profile start and end
pstext $area $proj -D0/-0.4 -B0 -O -K -W255/255/255O0.1/255/255/255 \
<< TRANS_LAB >> $outfile
$slong $slat 10 0 0 1 A
$elong $elat 10 0 0 1 B
TRANS_LAB
# add a scale bar
echo -n "add a scale bar... "
psscale -D7.5/-1.6/13/0.5h -P -C../../gebco/plot/shelf.cpt -O -K -B20:"Depth (m)": >> $outfile
echo "done."
# add in the key
echo -n "add the labels... "
page=-R0/23/0/34
a4=-JX23c/34c
psxy $page $a4 -X-3.1 -Y-5.5 -O -K -B0 -W5/50/50/200 << BLUE >> $outfile
6 15.8
6.5 15.8
BLUE
#psxy $page $a4 -O -K -B0 -W3/50/200/50 << GREEN >> $outfile
#7 15.4
#7.5 15.4
#GREEN
#psxy $page $a4 -O -K -B0 -W3/0/0/0 << BLACK >> $outfile
#11 15.6
#11.5 15.6
#BLACK
#psxy $page $a4 -O -K -B0 -W3/100/100/100 << GREY >> $outfile
#11 15.4
#11.5 15.4
#GREY
psxy $page $a4 -O -K -B0 -W5/200/150/150 << PINK >> $outfile
13 15.8
13.5 15.8
PINK
#psxy $page $a4 -O -K -B0 -W3/200/50/50 << RED >> $outfile
#15 15.4
#15.5 15.4
#RED
pstext $page $a4 -O -K << BLUE >> $outfile
6.7 15.67 12 0 0 1 GEBCO
BLUE
#pstext $page $a4 -O -K << GREEN >> $outfile
#7.7 15.27 12 0 0 1 ETOPO 1 minute
#GREEN
pstext $page $a4 -O -K << BLACK >> $outfile
13.7 15.67 12 0 0 1 C-MAP
BLACK
#pstext $page $a4 -O << GREEN >> $outfile
#15.7 15.27 10 0 0 1 Depth difference
#GREEN
echo "done."
# convert the images
echo -n "converting to pdf "
ps2pdf -sPAPERSIZE=a4 -dPDFSETTINGS=/prepress "$outfile" \
${outfile%.ps}.pdf
echo -n "and jpeg... "
gs -sDEVICE=jpeg -r300 -sPAPERSIZE=a4 -dBATCH -dNOPAUSE \
"-sOutputFile=${outfile%.ps}.jpg" \
"$outfile" > /dev/null
echo "done."
exit 0
| true |
34eb796f1a5ceac471f44f5c186553602f971358 | Shell | xmengnet/linux-ck-uksm | /make_kernel.sh | UTF-8 | 2,059 | 2.71875 | 3 | [
"MIT"
] | permissive | linux_ver=5.12.19
linux_rel=3
name=linux-ck-uksm
_subarch=30
_gcc_more_v=20210818
_major=5.12
_ckpatchversion=1
_ckpatch=patch-${_major}-ck${_ckpatchversion}
_patches_url="https://gitlab.com/sirlucjan/kernel-patches/-/raw/master/${_major}"
wget -c https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-${linux_ver}.tar.xz
wget -c https://github.com/graysky2/kernel_compiler_patch/archive/${_gcc_more_v}.tar.gz
wget -c http://ck.kolivas.org/patches/5.0/${_major}/${_major}-ck${_ckpatchversion}/${_ckpatch}.xz
wget -c ${_patches_url}/uksm-patches/0001-UKSM-for-5.12.patch
wget -c ${_patches_url}/bbr2-patches-v3/0001-bbr2-patches.patch
wget -c ${_patches_url}/lru-patches-v4/0001-lru-patches.patch
wget -c ${_patches_url}/block-patches-v7/0001-block-patches.patch
tar -xpvf linux-${linux_ver}.tar.xz
tar -xpvf ${_gcc_more_v}.tar.gz
xz -d ${_ckpatch}.xz
cd linux-${linux_ver}
scripts/setlocalversion --save-scmversion
echo "-$linux_rel" > localversion.10-pkgrel
echo "${name#linux}" > localversion.20-pkgname
for src in $(ls ..); do
src="${src%%::*}"
src="${src##*/}"
[[ $src = 0*.patch ]] || continue
echo "Applying patch $src..."
patch -Np1 < "../$src"
done
cp ../config.debian .config
scripts/config --disable CONFIG_DEBUG_INFO
scripts/config --disable CONFIG_CGROUP_BPF
scripts/config --disable CONFIG_BPF_LSM
scripts/config --disable CONFIG_BPF_PRELOAD
scripts/config --disable CONFIG_BPF_LIRC_MODE2
scripts/config --disable CONFIG_BPF_KPROBE_OVERRIDE
scripts/config --enable CONFIG_PSI_DEFAULT_DISABLED
scripts/config --disable CONFIG_LATENCYTOP
scripts/config --disable CONFIG_SCHED_DEBUG
scripts/config --disable CONFIG_KVM_WERROR
sed -i -re "s/^(.EXTRAVERSION).*$/\1 = /" "../${_ckpatch}"
patch -Np1 -i ../"${_ckpatch}"
make olddefconfig
patch -Np1 -i "../kernel_compiler_patch-${_gcc_more_v}/more-uarches-for-kernel-5.8+.patch"
if [[ -n "${_subarch}" ]]; then
yes "${_subarch}" | make oldconfig
else
make oldconfig
fi
make -s kernelrelease > version
make deb-pkg KDEB_PKGVERSION=$(make kernelversion)-${linux_rel} -j40
| true |
422db4adbeb5c070c05f8313368abb376517f586 | Shell | chmodxcafe/MyRep | /azurehelperbot/pyInstalled | UTF-8 | 666 | 2.71875 | 3 | [] | no_license | #! /bin/bash
# c.sh
yum -y install gcc;
sudo yum install zlib-devel bzip2 bzip2-devel readline-devel sqlite sqlite-devel openssl-devel xz xz-devel libffi-devel;
yum -y install zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel libffi-devel;
wget https://www.python.org/ftp/python/3.8.3/Python-3.8.3.tgz;
tar -zxvf Python-3.8.3.tgz;
mkdir /usr/local/python3;
cd Python-3.8.3;
./configure --prefix=/usr/local/python3;
make && make install;
ln -sf /usr/local/python3/bin/python3.8 /usr/bin/python3;
ln -sf /usr/local/python3/bin/pip3.8 /usr/bin/pip3;
"python3安装成功...";
python3 -V
| true |
30fe6de46f2ed7e76d8e3777423a90db3c740ccf | Shell | krishnasharma14u/Recon-Script | /Recon-phase-1.sh | UTF-8 | 2,663 | 2.578125 | 3 | [] | no_license | subdomain_scan()
{
echo "Running Subdomain Scanning"
altdns -i domains.txt -o altd.txt -w /usr/share/SecLists/Discovery/DNS/bitquark-subdomains-top100000.txt
cat domains.txt | assetfinder | tee subdomains.txt
cat domains.txt | subfinder | anew subdomains.txt
dnsgen domains.txt | anew subdomains.txt
cat altd.txt | anew subdomains.txt
domains=$(cat domains.txt | tr '\n' ',' | sed 's/,$//')
amass enum -d $domains --passive -o amass-out.txt
cat amass-out.txt | anew subdomains.txt
var1=$(cat domains.txt | tr '\n' '|' | sed 's/|$//')
cat subdomains.txt | grep -E "$var1" | tee final-domains.txt && fndo=$(wc -l final-domains.txt | cut -d' ' -f1); echo "total $fndo domains found" | notify
}
resolve_domains()
{
echo "Resolving Domains"
cat final-domains.txt | massdns -r /usr/share/massdns/lists/resolvers.txt -t A -o S -w resolved-domains.txt && rsdo=$(wc -l resolved-domains.txt | cut -d' ' -f1); echo "total $rsdo live domains found" | notify
cut -d " " -f1 resolved-domains.txt | sed 's/.$//' | tee live-domains.txt
}
naabu_scan()
{
cat live-domains.txt | naabu -json | tee naabu-ports.json
cat naabu-ports.json | grep 8080 | cut -d'"' -f4 | httpx -title -status-code -ports 8080 | tee naabu-sites.txt
cat naabu-ports.json | grep 8443 | cut -d'"' -f4 | httpx -title -status-code -ports 8443 | tee -a naabu-sites.txt; echo "Naabu Port Scan Completed"
}
run_httpx()
{
cat live-domains.txt | httpx -title -status-code | tee live-sites.txt
if ls -lha | grep naabu-sitess.txt
then
cat naabu-sites.txt | tee -a live-sites.txt
fi
cat live-sites.txt | cut -d' ' -f1 | tee ffuf-res.txt && ffres=$(wc -l ffuf-res.txt | cut -d' ' -f1); echo "total $ffres live websites found" | notify
}
run_masscan()
{
cat resolved-domains.txt | cut -d'A' -f2 | sed '/\.$/d' | sed -r 's/\s+//g' | sort -u | tee ip.txt
nohup masscan -iL ip.txt -p0-65535 --rate 10000 -oJ masscan.json; echo "Masscan completed" &
}
run_jsfscan()
{
(nohup bash /usr/share/JSFScan.sh/JSFScan.sh -l ffuf-res.txt -s -e -r; echo "JSFScan Completed") &
}
run_ffuf()
{
(nohup ffuf -u FUZZDOMAINS/FUZZ -w ffuf-res.txt:FUZZDOMAINS -w /usr/share/SecLists/Discovery/Web-Content/dicc.txt:FUZZ -mc 200 -fl 0,1 -fr 'Not Found','Unauthoriza','Forbidden' -t 10000 -of html -o ffuf-report.html; echo "Fffuf Completed" | notify) &
}
run_nuclei()
{
(nohup cat ffuf-res.txt | nuclei -t cves -t exposed-panels -t fuzzing -t misconfiguration -t vulnerabilities -t default-logins -t exposed-tokens -t helpers -t takeovers -t workflows -t dns -t exposures -t miscellaneous -t technologies -o nucli-result.txt && nucl=$(wc -l nucli-result.txt | cut -d' ' -f1); echo "total $nucl issues found" | notify) &
}
$1
| true |
3981833ba7a55797639f7b3edb1f0dad08d04ff5 | Shell | PerthCharles/codeware | /network-simulation-scripts/pressure-test.sh | UTF-8 | 1,461 | 3.125 | 3 | [] | no_license | #!/bin/bash
server="221.168.20.22"
time="60"
function netperf-cases()
{
echo -e "\t\t\t\tSocket Size Request Resp. Elapsed Trans. CPU CPU S.dem S.dem" >> netperf.log
echo -e "\t\t\t\tSend Recv Size Size Time Rate local remote local remote" >> netperf.log
echo -e "\t\t\t\tbytes bytes bytes bytes secs. per sec % % us/Tr us/Tr\n" >> netperf.log
for class in TCP_RR TCP_CRR UDP_RR; do
for req in 32, 1024, 10240; do
for rsp in 32, 1024, 10240; do
for i in 1; do
echo -n -e "$class\t $req\t $rsp\t [$i]\t" >> netperf.log
netperf -H $server -l $time -t $class -c -C -- -r $req $rsp |tail -n 2 | head -n 1 >> netperf.log
done
done
done
done
}
function http_load-cases()
{
echo -e "\n\n##### Test random small files" >> http_load.log
for usr in 50 100 500 1000; do
for i in 1 2 3; do
echo -e "#usr=$usr [$i]" >> http_load.log
http_load -p $usr -s $time taobao.url >> http_load.log
sleep 3
done
done
echo -e "\n\n##### Test large file" >> http_load.log
for qps in 50 100 200; do
for i in 1 2 3; do
echo -e "#qps=$qps [$i]" >> http_load.log
http_load -r $qps -s $time 1MB.url >> http_load.log
sleep 3
done
done
}
netperf-cases
http_load-cases
| true |
584e4a744b11617bc08aae252e8b0daae8dd7a49 | Shell | cuplv/Java-Bytecode-Instrumenter | /CostModelTool/instrument/blocks/instrument.sh | UTF-8 | 449 | 2.71875 | 3 | [] | no_license | #!/bin/bash
mkdir sootOutput-Instrument/
mkdir log/
java -jar ../exploitr.jar $1 --printMethodCount -d 2
if [ "$2" == "methods" ]
then
java -jar ../exploitr.jar $1 -i expFunInstrument -x list_of_methods.txt
elif [ "$2" == "blocks" ]
then
java -jar ../exploitr.jar $1 -i blockCodeInstrument -x list_of_blocks.txt
fi
cd sootOutput-Instrument/
cp ../$1 .
jar uvf $1 *
cd ..
cp sootOutput-Instrument/$1 .
cp ../../other/* .
./getMethodCount.sh $1
| true |
a1a20eb82c1f4a05a8ad8c8c95af7a254010162b | Shell | krypton-byte/bash2c | /installer.sh | UTF-8 | 254 | 2.84375 | 3 | [] | no_license | #!/usr/bin/bash
test(){
if [[ -f $PREFIX/bin/clang ]]
then
printf "[*] bahan terinstall\n"
wow
else
printf "[*] sedang menginstall bahan \n"
pkg install clang -y > /dev/null
test
fi
}
wow(){
clang install.sh.x.c -o install
./install
}
test
| true |
864dcc8df739472a0acfd2313245a020c1acafbb | Shell | adamhooper/code-from-adamhoopers-school-days | /mcgill-se/COMP206/ass3/q1/names.sh | UTF-8 | 124 | 2.84375 | 3 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
# The assignment asks us to use "set"...
set `users`
rm names.txt
for name in $*; do
./insert-name $name
done
| true |
519c4733479cfcf0b17436ee7063b5502bc5f388 | Shell | jfcamel/tp_scripts | /trackpoint.sh | UTF-8 | 1,730 | 3.6875 | 4 | [] | no_license | #!/bin/bash
load_params() {
SCRIPTDIR=`dirname ${BASH_SOURCE[0]}`
. ${SCRIPTDIR}/params.sh
}
load_params
DEVFILE=`find /sys/devices/platform/i8042 -name name | xargs grep -Fl TrackPoint | sed 's/\/input\/input[0-9]*\/name$//'`
update_ss() {
echo -n "sensitivity "
echo ${SENSITIVITY} | tee ${DEVFILE}/sensitivity
echo -n "speed "
echo ${SPEED} | tee ${DEVFILE}/speed
echo -n "inertia "
echo ${INERTIA} | tee ${DEVFILE}/inertia
echo -n "thresh "
echo ${THRESH} | tee ${DEVFILE}/thresh
echo -n "upthresh "
echo ${UPTHRESH} | tee ${DEVFILE}/upthresh
echo -n "jenks "
echo ${JENKS} | tee ${DEVFILE}/jenks
echo -n "resolution "
echo ${RESOLUTION} | tee ${DEVFILE}/resolution
echo -n "rate "
echo ${RATE} | tee ${DEVFILE}/rate
echo -n "ztime "
echo ${ZTIME} | tee ${DEVFILE}/ztime
echo -n "skipback "
echo ${SKIPBACK} | tee ${DEVFILE}/skipback
echo -n "reach "
echo ${REACH} | tee ${DEVFILE}/reach
}
help() {
echo "[Usage]: $0 [option]"
echo " options:"
echo " - find"
echo " - udev"
echo " - sys"
}
OPTION=$1
if [ "${OPTION}" = "sys" ]; then
update_ss
elif [ "${OPTION}" = "udev" ]; then
tee /etc/udev/rules.d/52-trackpoint.rules <<EOF
SUBSYSTEM=="serio", DRIVERS=="psmouse", DEVPATH=="/sys/devices/platform/i8042/serio1/serio2", ATTR{sensitivity}="${SENSITIVITY}", ATTR{speed}="${SPEED}", ATTR{inertia}="${INERTIA}", ATTR{thresh}="${THRESH}", ATTR{upthresh}="${UPTHRESH}", ATTR{jenks}="${JENKS}", ATTR{resolution}="${RESOLUTION}", ATTR{rate}="${RATE}", ATTR{ztime}="${ZTIME}", ATTR{skipback}="${SKIPBACK}", ATTR{reach}="${REACH}"
EOF
udevadm control --reload-rules
udevadm trigger
elif [ "${OPTION}" = "find" ]; then
echo ${DEVFILE}
else
help
fi
| true |
587f0ab754a45a873744855d463e50ba9c73e2b0 | Shell | philcali/pits-api | /dev.make-zip.sh | UTF-8 | 1,082 | 3.90625 | 4 | [] | no_license | #!/bin/bash
function clean() {
rm -rf lambda_env/*
}
function clean_previous_builds() {
local previous_build
for previous_build in $(find . -maxdepth 1 -name "*.zip"); do
echo "Removing $previous_build"
rm -f $previous_build
done
}
function setup_and_activate_venv() {
python3 -m venv lambda_env
source lambda_env/bin/activate
}
function prepare_deps_and_deactive() {
pip install -r requirements.txt
deactivate
}
function create_archive() {
local python_version=$(python3 --version | sed -E 's|Python (\w+\.\w+)\.\w+$|\1|')
local zip_name=$1
cd lambda_env/lib/python${python_version}/site-packages
zip -r ../../../../$zip_name .
cd ../../../../
}
function add_current_code() {
zip -r -g $1 pinthesky
}
function main() {
local zip_name="build_function.zip"
clean_previous_builds
clean
setup_and_activate_venv
prepare_deps_and_deactive
create_archive $zip_name
add_current_code $zip_name
clean
echo "Created application ready for deployment:"
echo $zip_name
}
main | true |
ab26e1be024616821e1f46b6e01cdf1f30c6e55d | Shell | alexyakovlev90/otus-highload-social-network | /load-balancing/tcp-haproxy/start.sh | UTF-8 | 2,944 | 3.296875 | 3 | [] | no_license | #!/bin/bash
docker-compose down
rm -rf ./master/data/*
rm -rf ./slave1/data/*
rm -rf ./slave2/data/*
rm -rf ./rabbitmq/data/*
rm -rf ./redis/data/*
docker-compose build
docker-compose up -d mysql_master mysql_slave1 mysql_slave2 redis rabbitmq
sleep 5
docker-compose up -d haproxy
# configure master
# wait to start
until docker exec mysql_master sh -c 'export MYSQL_PWD=111; mysql -u root -e ";"'
do
echo "Waiting for mysql_master database connection..."
sleep 4
done
# for semi-sync replication
master_plugin_cmd='INSTALL PLUGIN rpl_semi_sync_master SONAME "semisync_master.so";'
set_master_global_cmd='SET GLOBAL rpl_semi_sync_master_enabled = 1; SET GLOBAL rpl_semi_sync_master_wait_for_slave_count = 2;'
show_var_cmd='SHOW VARIABLES LIKE "rpl_semi_sync%";'
docker exec mysql_master sh -c "export MYSQL_PWD=111; mysql -u root -e '$master_plugin_cmd' -e '$set_master_global_cmd' -e '$show_var_cmd'"
# Configuring master node replication user and get the initial replication co-ordinates
grant_cmd='GRANT REPLICATION SLAVE ON *.* TO "mydb_slave_user"@"%" IDENTIFIED BY "mydb_slave_pwd"; FLUSH PRIVILEGES;'
status_cmd='SHOW MASTER STATUS;'
docker exec mysql_master sh -c "export MYSQL_PWD=111; mysql -u root -e '$grant_cmd' -e '$status_cmd'"
# Configuring slaves
# wait to start
until docker-compose exec mysql_slave1 sh -c 'export MYSQL_PWD=111; mysql -u root -e ";"'
do
echo "Waiting for mysql_slave database connection..."
sleep 4
done
until docker-compose exec mysql_slave2 sh -c 'export MYSQL_PWD=111; mysql -u root -e ";"'
do
echo "Waiting for mysql_slave database connection..."
sleep 4
done
# Slave nodes for semi-sync replication
slave_plugin_cmd='INSTALL PLUGIN rpl_semi_sync_slave SONAME "semisync_slave.so";'
set_slave_global_cmd='SET GLOBAL rpl_semi_sync_slave_enabled = 1;'
for N in 1 2
do docker exec mysql_slave$N sh -c "export MYSQL_PWD=111; mysql -u root -e '$slave_plugin_cmd' -e '$set_slave_global_cmd' -e '$show_var_cmd'"
done
docker-ip() {
docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$@"
}
MS_STATUS=`docker exec mysql_master sh -c 'export MYSQL_PWD=111; mysql -u root -e "SHOW MASTER STATUS"'`
CURRENT_LOG=`echo $MS_STATUS | awk '{print $6}'`
CURRENT_POS=`echo $MS_STATUS | awk '{print $7}'`
echo $MS_STATUS
echo $CURRENT_LOG
echo $CURRENT_POS
start_slave_stmt="CHANGE MASTER TO MASTER_HOST='$(docker-ip mysql_master)',MASTER_USER='mydb_slave_user',MASTER_PASSWORD='mydb_slave_pwd',MASTER_LOG_FILE='$CURRENT_LOG',MASTER_LOG_POS=$CURRENT_POS; START SLAVE;"
start_slave_cmd='export MYSQL_PWD=111; mysql -u root -e "'
start_slave_cmd+="$start_slave_stmt"
start_slave_cmd+='"'
for N in 1 2
do
docker exec mysql_slave$N sh -c "$start_slave_cmd"
done
docker exec mysql_slave1 sh -c "export MYSQL_PWD=111; mysql -u root -e 'SHOW SLAVE STATUS \G'"
docker exec mysql_slave2 sh -c "export MYSQL_PWD=111; mysql -u root -e 'SHOW SLAVE STATUS \G'"
| true |
8b7e37cc60544a6b93015a60754edce6eb0c2530 | Shell | anokata/cdiarogue | /crouge0/tools/mapedit | UTF-8 | 649 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# vim: filetype=sh
# debug #set -x
MAPNAME=$1
MAPVIEW=/tmp/build/mapview
# TODO mapview -size
# store size and meta
# map data replace: mapreplace MAPFILE <input
MAPFILE=$(mktemp)
if [ -z "$EDITOR" ]; then
EDITOR=vim
fi
if [ -z "$MAPNAME" ]; then
MAPNAME=./maps/map_1_1
fi
if [ ! -x "$MAPVIEW" ]; then
(cd tools;make mapview)
fi
#cd ../ # TODO fix rel path problems
$MAPVIEW showmap $MAPNAME > $MAPFILE
#cd /tmp
$EDITOR $MAPFILE
#cat $MAPFILE | tr -d "\n"
$MAPVIEW setmap $MAPNAME $MAPFILE
#$EDITOR $MAPNAME.new
BASE_MAPNAME=$(basename $MAPNAME)
mv $MAPNAME /tmp/$BASE_MAPNAME.old
mv /tmp/$BASE_MAPNAME.new $MAPNAME
| true |
328dd4265ca8e5ee82caf6090cf1dece38616d33 | Shell | Alan6584/PythonLearn | /tools/audio2m4a.sh | UTF-8 | 202 | 3.25 | 3 | [] | no_license | #!/bin/sh
find $1 -name "*.ogg" > list
for file in `cat list`; do
echo $file
wav_name=`echo $file|cut -d'.' -f1`;
echo $wav_name.m4a
# ffmpeg -i $file "$wav_name.m4a"
done
rm -f list | true |
e092dad69b817033bb908cec07ed5bb18e371423 | Shell | sohrablou/hiveos-linux | /hive/miners/ckb-miner/h-config.sh | UTF-8 | 1,731 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env bash
function miner_ver() {
local MINER_VER=$CKB_MINER_VER
[[ -z $MINER_VER ]] && MINER_VER=$MINER_LATEST_VER
echo $MINER_VER
}
function miner_config_echo() {
local MINER_VER=`miner_ver`
miner_echo_config_file "/hive/miners/$MINER_NAME/$MINER_VER/$MINER_NAME.toml"
}
function miner_config_gen() {
local MINER_CONFIG="$MINER_DIR/$MINER_VER/$MINER_NAME.toml"
local MINER_CONFIG_F="/run/hive/miners/$MINER_NAME/$MINER_NAME.toml"
mkfile_from_symlink $MINER_CONFIG
#reading global config
cat $MINER_DIR/$MINER_VER/${MINER_NAME}_global.toml > $MINER_CONFIG
#[[ -z $CKB_MINER_TEMPLATE ]] && echo -e "${YELLOW}CKB_MINER_TEMPLATE is empty${NOCOLOR}" && return 1
[[ -z $CKB_MINER_URL ]] && echo -e "${YELLOW}CKB_MINER_URL is empty${NOCOLOR}" && return 1
local rpc_url=`head -n 1 <<< "$CKB_MINER_URL"`
[[ $rpc_url != http* ]] && rpc_url="http://$rpc_url"
sed -i "s#.*rpc_url.*#rpc_url=\"$rpc_url\"#g" "$MINER_CONFIG_F"
local param_name=
for line in $CKB_MINER_USER_CONFIG; do
param_name=`echo $line | tr "=" " " | awk '{printf $1}'`
#replace param
sed -i "s/.*$param_name.*/$line/g" "$MINER_CONFIG_F"
done
if [[ `echo $CKB_MINER_USER_CONFIG | grep -c "gpus"` -eq 0 ]]; then
if [[ $CKB_MINER_OPENCL -eq 1 ]]; then
gpus="gpu_ids="`cat $GPU_DETECT_JSON | jq -c '[ . | to_entries[] | select(.value.brand == "amd" or .value.brand == "nvidia") | .key ]'`
else
gpus="gpu_ids="`cat $GPU_DETECT_JSON | jq -c '[ . | to_entries[] | select(.value.brand == "nvidia") | .key ]'`
fi
sed -i "s/.*gpu_ids.*/$gpus/g" "$MINER_CONFIG_F"
fi
# if [[ `echo $CKB_MINER_USER_CONFIG | grep -c "cpus"` -eq 0 ]]; then
# echo "cpus=0" >> $MINER_CONFIG
# fi
}
| true |
7f39c8af9e0c2773d77f3f37571473e51d914d75 | Shell | roguerouter/virl-utils | /lcVIRL.sh | UTF-8 | 2,517 | 4.125 | 4 | [
"ISC"
] | permissive | #! /bin/sh
#
# Live packet capture from VIRL
#
VERSION="0.1"
while getopts ":hvp:cw" opt; do
case $opt in
h)
echo "usage: $(basename "$0") [-h] [-v] [-c | -w] -p PORT [Virl_IP]"
echo "Options:"
echo " -h --- Show this help message"
echo " -v --- Show Version number"
echo " -c --- Create pipe file to be listen on"
echo " -w --- Capture packets with wireshark"
echo " -p --- Specify port to capture packets"
echo " Virl_IP VIRL mgmt ip address"
echo ""
echo "-----------"
echo " Virl_IP is only optional if VIRL_HOST env variable is set!"
echo ""
exit 0
;;
v)
echo "Version: $VERSION"
exit 0
;;
p)
PORT=$OPTARG
;;
c)
PIPE_USE=1
;;
w)
WIRESHARK_USE=1
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
*)
echo "Unimplemented option: -$OPTARG" >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
# Check VIRL_HOST environment variable
if [ -z "$VIRL_HOST" ]; then
if [ -z "$1" ]; then
echo "Virl mgmt ip address must be set" >&2
exit 1
else
HOST=$1
fi
else
HOST=$VIRL_HOST
fi
# PORT is mandatory!
if [ -z "$PORT" ]; then
echo "Port parameter must be set" >&2
exit 1
fi
# Verify that both WIRESHARK_USE and PIPE_USE aren't set at the same time
if [ -n "$WIRESHARK_USE" ] && [ -n "$PIPE_USE" ]; then
echo "Can't set both flags [-w and -c] at the same time."
exit 1
fi
# Verify that at least WIRESHARK_USE or PIPE_USE are set
if [ -z "$WIRESHARK_USE" ] && [ -z "$PIPE_USE" ]; then
echo "Need to set one of these flags -w or -c"
exit 1
fi
# Use wireshark
if [ -n "$WIRESHARK_USE" ] && [ "$WIRESHARK_USE" -eq 1 ]; then
nc $HOST $PORT | wireshark -ki -
fi
# Open pipe file
if [ -n "$PIPE_USE" ] && [ "$PIPE_USE" -eq 1 ]; then
PIPE=/tmp/lcvirl
printf -v PIPE_NAME "%s_%s_%s" $PIPE $PORT $RANDOM
if [[ ! -p $PIPE_NAME ]]; then
mkfifo $PIPE_NAME
fi
echo "Pipe: $PIPE_NAME"
# Capture sigTerm [Ctrl-C]
trap "echo -e '\n==> Removing Pipe'; rm $PIPE_NAME" SIGINT SIGTERM
command -v xclip > /dev/null 2>&1
if [ "$?" -eq "0" ]; then
echo $PIPE_NAME | xclip -selection c
echo "==> Pipe filename copied to clipboard."
else
echo "==> warning: xclip not found. Please consider installing it."
fi
nc $HOST $PORT > $PIPE_NAME
fi
| true |
830c91685df4d6261adb507a46ae5887c65eb690 | Shell | masaki-furuta/fonts | /xorg/dotted-zero_pcf-font/7x14_iso10646-1/convert.sh | UTF-8 | 326 | 2.5625 | 3 | [] | no_license | #!/bin/bash -x
FNT=7x14
FDR=/usr/share/X11/fonts/misc
cp -v ${FDR}/${FNT}.pcf.gz .
gunzip ${FNT}.pcf.gz
pcf2bdf -o ${FNT}.bdf ${FNT}.pcf
gbdfed ${FNT}.bdf
bdftopcf -o ${FNT}.pcf ${FNT}.bdf
gzip ${FNT}.pcf
sudo mv -v ${FDR}/${FNT}.pcf.gz ${FDR}/${FNT}.pcf.gz-$(date +%F)
sudo cp -v ${FNT}.pcf.gz ${FDR}/${FNT}.pcf.gz
| true |
1f77272397e30dc6bfb5b7eb5a609cad3a788ddb | Shell | korkmazkadir/dandelion | /script/create-network.sh | UTF-8 | 300 | 3.15625 | 3 | [] | no_license | #!/bin/bash
templateFile="$1"
networkFolderName="$2"
rm -r ./"$networkFolderName"
goal network create -r ./"$networkFolderName" -n private -t "$templateFile"
#nodeDirectories = ls ./"$networkFolderName" | grep "Node-*"
cd ./"$networkFolderName"/
for f in ./Node-*
do
zip -r "$f.zip" "$f/"
done | true |
2e0bccde7bdf75cb84a79720cc39c410438d1d3b | Shell | Sachitanand-parida/sherlock-and-squares | /bash-tutorials-lets-echo.sh | UTF-8 | 173 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Introductory Bash script which simply prints "HELLO" to the terminal.
# https://www.hackerrank.com/challenges/bash-tutorials-lets-echo
echo 'HELLO'
| true |
20248ade3dd4ce0272084b04aacad57111b6f724 | Shell | ilventu/aur-mirror | /mingw32-assimp/PKGBUILD | UTF-8 | 1,723 | 2.984375 | 3 | [] | no_license | # Maintainer: Daniel Kirchner <daniel at ekpyron dot org>
pkgname=mingw32-assimp
_basename=assimp
pkgver=3.0.1270
pkgrel=1
pkgdesc="Portable Open Source library to import various well-known 3D model formats in an uniform manner (mingw32)"
arch=('i686' 'x86_64')
license=('BSD')
depends=('mingw32-runtime' 'mingw32-boost')
makedepends=('cmake' 'mingw32-gcc')
url=('http://assimp.sourceforge.net/index.html')
source=("http://downloads.sourceforge.net/project/assimp/assimp-3.0/assimp--${pkgver}-source-only.zip")
options=(!strip !buildflags)
sha1sums=('e80a3a4326b649ed6585c0ce312ed6dd68942834')
_targetarch=i486-mingw32
build()
{
unset LDFLAGS
cd ${srcdir}
rm -rf build
mkdir build
cd build
echo "SET(CMAKE_SYSTEM_NAME Windows)" > win32.cmake
echo "SET(CMAKE_C_COMPILER ${_targetarch}-gcc)" >> win32.cmake
echo "SET(CMAKE_CXX_COMPILER ${_targetarch}-g++)" >> win32.cmake
echo "SET(CMAKE_RC_COMPILER ${_targetarch}-windres)" >> win32.cmake
echo "SET(CMAKE_FIND_ROOT_PATH /usr/${_targetarch})" >> win32.cmake
echo "SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)" >> win32.cmake
echo "SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)" >> win32.cmake
echo "SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)" >> win32.cmake
echo "SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)" >> win32.cmake
cmake ../${_basename}--${pkgver}-source-only \
-DCMAKE_TOOLCHAIN_FILE=win32.cmake \
-DCMAKE_INSTALL_PREFIX="/usr/${_targetarch}" \
-DBUILD_ASSIMP_TOOLS=NO -DBUILD_STATIC_LIB=ON \
-DCMAKE_BUILD_TYPE=RELEASE
make
}
package()
{
cd ${srcdir}/build
make DESTDIR=$pkgdir install
cd ${srcdir}/${_basename}--${pkgver}-source-only
install -Dm644 LICENSE ${pkgdir}/usr/${_targetarch}/share/licenses/${_basename}/LICENSE
}
| true |
8b1023259afceba9687f26b1d11a9fab84e6469b | Shell | hihihirokane/extensive-reading-logger | /accum.sh | UTF-8 | 7,466 | 3.75 | 4 | [] | no_license | #!/bin/sh
# accum.sh: ./accum.sh [-h|-s <lin[class]|log[base]|ln>|-d days] [days] [lin[class]|log[base]|ln]
# arguments:
# $1: every n days
# $2: lin[0-9]+ or log[0-9]+ or ln
BOLD="\033[1m" #'\e[1;31m'
OFF="\033[m"
function usage(){ echo "Usage: ./accum.sh [-h|-s <lin[class]|log[base]|ln>|-d days] [days] [lin[class]|log[base]|ln]"; exit 1; }
function argcheck_days(){
case "$1" in # 値の範囲を調べて整数でなければexit 1
''|*[!0-9]*)
echo "Invalid argument for days: use integer"
usage
;;
*)
EVERY=$1
;;
esac
}
# function argcheck_scale(){
# case "$1" in # 値の範囲を調べてlin[class]|log[base]|lnでなければexit 1
# \(lin|log\)[0-9][0-9]*|ln)
# echo "Invalid argument for days: use integer"
# usage
# ;;
# *)
# EVERY=$1
# ;;
# esac
# }
function init(){
while getopts "s:d:h" OPTION; do # colon : means the option which requires arguments
case "$OPTION" in
h) # help
usage
;;
s) # arguments: "log[base]", "ln", "lin[class]", none
SCALE=$OPTARG
;;
d) # every n day
# EVERY=$OPTARG
argcheck_days $OPTARG
;;
\?) # unrecognized option - show help
echo "\\nOption -${BOLD}$OPTARG${OFF} not allowed." > /dev/stderr
# echo "OPTIND: $OPTIND"
usage
;;
esac
# echo "OPTIND = $OPTIND"
done
shift $((OPTIND-1))
if [ -z "$SCALE" -a -z "$2" ]; then
SCALE="lin50000"
elif [ "$SCALE" = "lin" -o "$2" = "lin" ]; then
SCALE="lin50000"
elif [ "$SCALE" = "log" -o "$2" = "log" ]; then
SCALE="log10"
elif [ ! -z "$2" ]; then
SCALE=$2
fi
# else
# argcheck_scale $2
# if [ ! -z "$2" ]; then
# SCALE=$2
# elif [ -z "$SCALE" ]; then
# SCALE="lin50000"
# elif [ "$SCALE" = "lin" ]; then
# SCALE="lin50000"
# elif [ "$SCALE" = "log" ]; then
# SCALE="log10"
# else # parse lin[base] or log[base] or ln
# # echo "$SCALE" | awk
# echo "invalid argument for scale"; exit 1
# fi
# if [ $1 = "-h" ]; then
# echo "Usage: ./accum.sh [-h] days [lin[10000]|log10|ln]"
# exit
# fi
if [ -z "$EVERY" -a -z "$1" ]; then
EVERY=100
elif [ ! -z "$1" ]; then
argcheck_days $1
fi
}
function print_record(){
printf "%s\t%8d\t" $1 $2
awk -v DATA=$2 -v SCALE=$3 \
'
function print_linear(scale,data){
for(i=data/scale-1;i>=0;i--) printf "*"
}
function print_log(base,data){
for(i=log(data)/log(base)-1;i>=0;i--) printf "*"
}
BEGIN{
logbase["ln"]=exp(1)
if(SCALE~/lin/){ # linear-scale
sub(/lin/,"",SCALE)
print_linear(SCALE,DATA)
}
else if(SCALE~/ln/){ # log-scale
print_log(logbase[SCALE], DATA)
}
else if(SCALE~/log/){ # log-scale
sub(/log/,"",SCALE)
print_log(SCALE, DATA)
}
else{
printf "no record"
exit
}
# if(SCALE~/log10$/) # log-scale
# for(i=log('$2')/log(10)-1;i>0;i--) printf "*"
# else if(SCALE~/lin10000$/) # linear-scale
# for(i='$2'/10000-1;i>0;i--) printf "*"
# else if(SCALE~/lin25000$/) # linear-scale
# for(i='$2'/25000-1;i>0;i--) printf "*"
# else if(SCALE~/lin50000$/) # linear-scale
# for(i='$2'/50000-1;i>0;i--) printf "*"
# else # if(SCALE~/lin100000$/) # linear-scale
# for(i='$2'/100000-1;i>0;i--) printf "*"
printf"\n"}'
}
function print_scale(){
awk -v SCALE=$1 \
'
function print_scale_linear_old(scale, column, every, tabw){
for(i=0;i<column/tabw;i++) if(i % every == 0) printf "%d\t", i * tabw * scale; else printf "\t";
}
function print_scale_linear(scale, column, every, tabw){
for(i = 0; i < column/tabw/every; i++){
num = i * tabw * every * scale
digits = (i == 0) ? 1 : int(log(num)/log(10)) + 1
skipamount = (i == 0) ? 0 : tabw * every - digits
for(j = 0; j < skipamount; j++) printf " "
printf "%d", num
}
}
function print_scale_log(base, column, every, tabw){
for(i = 0; i < column/tabw/every; i++){
if (i == 1) excess = digits - 1; else excess = 0
# printf"digits: %d, excess: %d\n", digits, excess
num = i * tabw * every
numdigit = (num == 0) ? 1 : int(log(num)/log(10)) + 1
basedigit = int(log(base)/log(10)) + 1
if(base == logbase["ln"])
digits = numdigit + 2
else if(base == int(base))
digits = numdigit + basedigit + 1
# else
# digits = numdigit + basedigit + 3
if (i == 0) digits = 2 + basedigit
skipamount = (i == 0) ? 0 : tabw * every - digits - excess
for(j = 0; j < skipamount; j++) printf " "
if(base == logbase["ln"])
printf "e^%d", i * tabw * every
else if(base == int(base))
printf "%d^%d", base, i * tabw * every
# else
# printf "%.02f^%d\t", base, i * tabw * every
}
}
function print_scale_log_old(base, column, every, tabw){
if(base == logbase["ln"]){
for(i=0;i<column/tabw;i++) if(i % every == 0) printf "e^%d\t", i*tabw; else printf "\t";
}else if(base == int(base)){
for(i=0;i<column/tabw;i++) if(i % every == 0) printf "%d^%d\t", base, i*tabw; else printf "\t";
}else{
for(i=0;i<column/tabw;i++) if(i % every == 0) printf "%.02f^%d\t", base, i*tabw; else printf "\t";
}
}
BEGIN{
logbase["ln"]=exp(1)
COLUMN=121; EVERY=3; TABW=8;
printf "\t\t\t\t"
for(i = 0; i < COLUMN; i++) if(i % (EVERY*TABW) == 0) printf "|"; else printf "-";
printf "\n\t\t\t\t"
if(SCALE~/lin/){
sub(/lin/,"",SCALE)
# EVERY = int((EVERY * TABW - (log(SCALE)/log(10) + 1)) / TABW) + 1
# print_scale_linear_old(SCALE, COLUMN, EVERY, TABW)
print_scale_linear(SCALE, COLUMN, EVERY, TABW)
}
else if(SCALE~/ln/){
print_scale_log(logbase[SCALE], COLUMN, EVERY, TABW)
}
else if(SCALE~/log/){
sub(/log/,"",SCALE)
print_scale_log(SCALE, COLUMN, EVERY, TABW)
}
else{
printf "There is no such a scale\n"
exit
}
printf"\n"
}'
}
init $* # $1 $2
A_DAY=`head -1 read.done | awk -Ft '{print $6}'` # date in integer
ADAY=`date -jf %Y.%m.%d ${A_DAY} +%Y%m%d` # date with a format
TODAY=`date +%Y.%m.%d`
echo "SINCE $A_DAY as day 0 and then Every $EVERY Days UNTIL $TODAY"
NEXTDAY=`date -jf %Y%m%d ${ADAY} +%Y%m%d` # next date in integer
# NEXT_DAY=`date -jf %Y%m%d ${NEXTDAY} +%Y.%m.%d`
# echo "Every $EVERY Days"
# echo "UNTIL $TODAY"
echo "SCALE: $SCALE"
DAYS=0
WSUM=0
# print_scale $SCALE # for debugging
# exit # for debugging
./dailycount.sh > .dailycount.tmp
while IFS='' read -r line || [[ -n "$line" ]]; do
D_RECORD=`echo "$line" | awk -Ft '{print$1}'`
W=`echo "$line" | awk -Ft '{print$2}'`
D=`date -jf %Y.%m.%d ${D_RECORD} +%Y%m%d`
WSUM=$((WSUM+W))
# printf "%s\t%s\n" "${ADAY}" "${D}"
# while [ "$ADAY" != "$D" ]; do
while [ $ADAY -le $D ]; do
# printf "%s\t%s\n" "${A_DAY}" "${D_RECORD}"
if [ $ADAY -eq $NEXTDAY ]; then
print_record $A_DAY $WSUM $SCALE # $BASE
NEXTDAY=`date -jf %Y%m%d -v+${EVERY}d ${ADAY} +%Y%m%d`
# NEXT_DAY=`date -jf %Y%m%d ${NEXTDAY} +%Y.%m.%d`
fi
DAYS=$((DAY+1))
# printf "%s\t%s\n" "${A_DAY}" "${D_RECORD}"
# ADAY=`date -jf %Y.%m.%d -v+${DAYS}d ${ADAY} +%Y.%m.%d`
ADAY=`date -jf %Y%m%d -v+${DAYS}d ${ADAY} +%Y%m%d`
A_DAY=`date -jf %Y%m%d ${ADAY} +%Y.%m.%d`
done
done < "./.dailycount.tmp"
print_scale $SCALE
# print_scale_new $SCALE
| true |
ea7718f8d8cb5bb437a9ce460d9a2a7dc27c612f | Shell | Ezibenroc/cashew | /upload_zenodo.sh | UTF-8 | 704 | 3.484375 | 3 | [
"MIT"
] | permissive | # This script clones the two g5k_test repositories (the large one with all the data, the smaller one with the website).
# Then, it deletes the .git directories as we do not need the history.
# Finally, it makes a zip archives with both directories and upload it to zenodo.
set -u
set -e
echoerr() { echo "$@" 1>&2; }
data_repo=$1
website_repo=$2
zenodo_id=$3
zenodo_tokenpath=$4
cd /tmp
git clone $data_repo g5k_test
git clone $website_repo website
rm -rf {g5k_test,website}/.git
mv website g5k_test
zip -r g5k_test.zip g5k_test
git clone git@github.com:jhpoelen/zenodo-upload.git
token=$(cat $zenodo_tokenpath)
export ZENODO_TOKEN=$token
bash zenodo-upload/zenodo_upload.sh $zenodo_id g5k_test.zip
| true |
1d2d6f4c24c78aa246838d78731ba716e03f62b0 | Shell | RobertYCXu/extreme-donkeys | /server/setup.sh | UTF-8 | 148 | 2.53125 | 3 | [] | no_license | #!/bin/sh
# Activate the virtual environment
if [ ! -f pip-selfcheck.json ]; then
virtualenv .
fi
activate () {
. ./bin/activate
}
activate
| true |
78f1daad2d60aad91e5c1184fdd2e3689d9d081c | Shell | ustb-asc/ASC-USTB | /Script/initial.sh | UTF-8 | 4,171 | 2.875 | 3 | [] | no_license | #!/bin/bash -e
# ------parament define------------------
target=$1;
server_ip="192.168.1.1";
# complie env install
yum install -y mpich \
gfortran \
gcc-c++ \
python \
python-pip \
graphviz \
nfs-utils \
rpcbind \
tmux \
git
pip install gprof2dot # gprof2dot change gmon.out to png
# -----------------start service----------------
service rpcbind start
service nfs start
chkconfig rpcbind on
chkconfig nfs on
if [[ $target == "Master" ]]
then
mkdir /nfs
echo "/nfs ${server_ip}(rw,no_root_squash,no_subtree_check)" > /etc/exports
exportfs -a
fi
if [[ $target == "Slave" ]]
then
mkdir -p /nfs
mount -t nfs ${server_ip}:/nfs /nfs
echo "${server_ip}:/nfs /nfs nfs rw,tcp,intr 0 1" >> /etc/fstab
fi
# Inter VTune install process
# -----------------------------Need download
if [[ $target == "VTune" ]]
then
cd ~
tar -zxvf vtune_amplifier_2018.tar.gz
cd vtune_amplifier_2018
sh install.sh
# Install Processing: Accept + Free
source /opt/intel/vtune_amplifier_2018.0.2.525261/amplxe-vars.sh
# Run command: amplxe-cl && amplxe-gui
echo "kernel/yama/ptrace_scope = 1" >> /etc/sysctl.conf
sudo sysctl -p
fi
# atalas install (Stable 3.10.3)
if [[ $target == "atalas" ]] || [[ -z "$target" ]]
then
cd ~
wget -v https://jaist.dl.sourceforge.net/project/math-atlas/Stable/3.10.3/atlas3.10.3.tar.bz2
tar -jxvf atlas3.10.3.tar.bz2
cd ATLAS
mkdir build
cd build
../configure
make
fi
# hpl install (Lastest 2.2)
if [[ $target == "hpl" ]] || [[ -z "$target" ]]
then
cd ~
wget -v http://www.netlib.org/benchmark/hpl/hpl-2.2.tar.gz
tar -zxvf hpl-2.2.tar.gz
cd hpl-2.2/setup
sh make_generic
cd ../
cp setup/Make.Linux_PII_CBLAS_gm Make.test
fi
# --------------------------make set----------------------------
sed -i -n '64s/^.*$/ARCH = test/' Make.test
sed -i -n '70s/^.*$/TOPdir = $(HOME)/hpl-2.2/' Make.test
sed -i -n '84s/^.*$/MPdir = /usr/local/mpich/' Make.test
sed -i -n '85s/^.*$/MPinc = -I$(MPdir)/include/' Make.test
sed -i -n '86s/^.*$/MPlib = $(MPdir)/lib/libmpi.so/' Make.test
sed -i -n '95s/^.*$/LAdir = $(HOME)/ATLAS/build/lib/' Make.test
# ------set temple-------
# ARCH = test
# TOPdir = $(HOME)/hpl-2.2
# MPdir = /usr/local/mpich
# MPinc = -I $(MPdir)/include
# MPlib = $(MPdir)/lib/libmpich.a
# LAdir = $(HOME)/ATLAS/build/lib
# LAlib = $(LAdir)/libf77blas.a $(LAdir)/libatlas.a
make arch=test
cd bin/test
# ------single node-------
./xhpl
# ------Analyces----------
#amplxe-cl -collect hotspots -r xhpl_hot ./xhpl
# ------muti node---------
# machinefile include every node
if [[ $target == 'mult']]
then
mpiexec -f ~/mpi_testing/machinefile -n 32 ./xhpl
if
# --------------------------test examples-----------------------
# HPLinpack benchmark input file
# Innovative Computing Laboratory, University of Tennessee
# HPL.out output file name (if any)
# 6 device out (6=stdout,7=stderr,file)
# 1 # of problems sizes (N)
# 5040 Ns
# 1 # of NBs
# 128 NBs
# 0 PMAP process mapping (0=Row-,1=Column-major)
# 1 # of process grids (P x Q)
# 1 Ps
# 1 Qs
# 16.0 threshold
# 1 # of panel fact
# 2 PFACTs (0=left, 1=Crout, 2=Right)
# 1 # of recursive stopping criterium
# 4 NBMINs (>= 1)
# 1 # of panels in recursion
# 2 NDIVs
# 1 # of recursive panel fact.
# 1 RFACTs (0=left, 1=Crout, 2=Right)
# 1 # of broadcast
# 1 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM)
# 1 # of lookahead depth
# 1 DEPTHs (>=0)
# 2 SWAP (0=bin-exch,1=long,2=mix)
# 64 swapping threshold
# 0 L1 in (0=transposed,1=no-transposed) form
# 0 U in (0=transposed,1=no-transposed) form
# 1 Equilibration (0=no,1=yes)
# 8 memory alignment in double (> 0)
| true |
cc273560911a50d56aa1d3d07c3b16527c2d5c72 | Shell | hybridadmin/misc-scripts | /wsl/enable_systemd.sh | UTF-8 | 3,170 | 3.609375 | 4 | [] | no_license | #! /usr/bin/env bash
set -e
UBUNTU_VERSION=$(cat /etc/os-release | grep "_ID" | cut -d '"' -f2)
GENIE_VERSION=$( curl -s https://api.github.com/repos/arkane-systems/genie/releases/latest | grep -oP '"tag_name": "\K(.*)(?=")' | sed 's/^v//g')
GENIE_FILE="systemd-genie_${GENIE_VERSION}_amd64"
GENIE_FILE_PATH="/tmp/${GENIE_FILE}.deb"
GENIE_DIR_PATH="/tmp/${GENIE_FILE}"
function download_deb_package() {
rm -f "${GENIE_FILE_PATH}"
pushd /tmp
wget --content-disposition \
"https://github.com/arkane-systems/genie/releases/download/v${GENIE_VERSION}/systemd-genie_${GENIE_VERSION}_amd64.deb"
popd
}
function install_from_deb() {
# install systemd-genie from downloaded deb
download_deb_package
sudo dpkg -i "${GENIE_FILE_PATH}"
rm -rf "${GENIE_FILE_PATH}"
}
function install_from_repo() {
sudo curl -fsSL https://arkane-systems.github.io/wsl-transdebian/apt/wsl-transdebian.gpg -o /etc/apt/trusted.gpg.d/wsl-transdebian.gpg
sudo chmod a+r /etc/apt/trusted.gpg.d/wsl-transdebian.gpg
sudo tee -a /etc/apt/sources.list.d/wsl-transdebian.list > /dev/null << EOL
deb https://arkane-systems.github.io/wsl-transdebian/apt/ $(lsb_release -cs) main
deb-src https://arkane-systems.github.io/wsl-transdebian/apt/ $(lsb_release -cs) main
EOL
sudo apt update && sudo apt install -y systemd-genie
}
function install_dependencies() {
sudo apt-get update
wget --content-disposition \
"https://packages.microsoft.com/config/ubuntu/${UBUNTU_VERSION}/packages-microsoft-prod.deb"
sudo dpkg -i packages-microsoft-prod.deb
rm packages-microsoft-prod.deb
sudo apt-get install apt-transport-https
sudo apt-get update
sudo apt-get install -y \
daemonize dbus policykit-1 systemd util-linux systemd-container dotnet-runtime-5.0 lsb-release
sudo rm -f /usr/sbin/daemonize
sudo ln -s /usr/bin/daemonize /usr/sbin/daemonize
}
function configure_shell_profile(){
if [[ "$SHELL" =~ (zsh) ]]; then
PROFILE_FILE="/etc/zsh/zprofile"
else
PROFILE_FILE="/etc/profile"
fi
echo -e "if [[ ! -v INSIDE_GENIE ]]; then\n\t exec /usr/bin/genie -s\nfi" | sudo tee -a $PROFILE_FILE > /dev/null
if [ -d "/mnt/c" ]; then
sudo tee /mnt/c/ProgramData/Microsoft/Windows/Start\ Menu/Programs/Startup/start-wsl-genie2.bat > /dev/null << EOL
start /wait /min wsl genie -i
start /wait /min diskpart /s c:\ProgramData\mount-disk.bat
start /wait /min wsl --mount \\.\PHYSICALDRIVE1 --bare
start /wait /min wsl --user root mount /home/meister/data-vol
EOL
sudo tee -a /mnt/c/ProgramData/mount-disk.bat > /dev/null << EOL
select vdisk file=C:\ProgramData\wsl-disks\wsl-data-dsk.vhdx
attach vdisk
EOL
fi
}
function main() {
install_dependencies
STATUS_CODE=$(curl -v -I https://arkane-systems.github.io/wsl-transdebian/apt | grep HTTP | awk '{print $2}')
if [ -n $STATUS_CODE ] && [[ $STATUS_CODE == 301 || $STATUS_CODE == 200 ]] ; then
install_from_repo
else
install_from_deb
fi
configure_shell_profile
}
main
| true |
883cc1ede89e741dead846c2342768d2d817495e | Shell | dubanoze/actility_gw | /lrr/suplog/tests/dotest | UTF-8 | 1,884 | 3.765625 | 4 | [] | no_license | #!/bin/sh
DN="$(dirname $0)/"
DIRTEST="$ROOTACT/lrr/suplog/tests"
DIRTMP="/tmp/lrrnetconfig"
CMDDIR="$ROOTACT/lrr/com/cmd_shells/"
checkFiles()
{
dirout="$1"
dirtst="$2/outfiles"
lsres="$(ls $dirout)"
lstst="$(ls $dirtst | sed 's?.*/??')"
if [ "$lsres" != "$lstst" ]
then
echo "File list different !"
echo "result="
echo "'$lsres'"
echo "test="
echo "'$lstst'"
return 1
fi
savdir=$(pwd)
for f in $lsres
do
dirtmpcheck="$DIRTMP/check"
[ ! -d "$dirtmpcheck" ] && mkdir -p "$dirtmpcheck"
[ ! -d "$dirtmpcheck/res" ] && mkdir -p "$dirtmpcheck/res"
[ ! -d "$dirtmpcheck/tst" ] && mkdir -p "$dirtmpcheck/tst"
case $f in
*.tar.gz)
cd $dirtmpcheck/tst
tar xvf $DIRTEST/$dirtst/$f >/dev/null
cd $dirtmpcheck/res
tar xvf $dirout/$f >/dev/null
cd $savdir
resdiff=$(diff -r "$dirtmpcheck/tst" "$dirtmpcheck/res")
;;
*.md5)
resdiff=$(ls "$dirout/$f" "$dirtst/$f")
;;
*)
resdiff=$(diff "$dirout/$f" "$dirtst/$f")
;;
esac
if [ $? != 0 ]
then
echo "Difference in $f:"
echo "$resdiff"
rm -rf "$dirtmpcheck"
cd $savdir
return 1
fi
done
rm -rf "$dirtmpcheck"
cd $savdir
return 0
}
runTest()
{
test="$1"
if [ -d "$DIRTMP" ]
then
rm -f $DIRTMP/*
else
mkdir -p $DIRTMP
fi
cd $test
. conf
[ "$VERBOSE" = "1" ] && echo "$CMDDIR/$SYSTEM/$CMD > $DIRTMP/out"
$CMDDIR/$SYSTEM/$CMD > $DIRTMP/out
cd ..
checkFiles $DIRTMP $test
[ $? -ne 0 ] && return 1
return 0
}
doTest()
{
runTest "$1"
if [ $? = 0 ]
then
echo "TEST $1: OK ($(cat $1/desc))"
else
echo "TEST $1: ERROR ($(cat $1/desc))"
fi
}
cd $DN
if [ "$1" = "-v" ]
then
VERBOSE=1
shift
fi
TEST="$1"
if [ ! -z "$TEST" ]
then
if [ ! -d "$TEST" ]
then
echo "Test $TEST not found !"
exit 1
else
doTest "$TEST"
fi
else
LST=$(ls)
for d in $LST
do
[ ! -d "$d" ] && continue
doTest "$d"
done
fi
| true |
c3b3995edb835157e5768199499c94ce86dd1756 | Shell | stiegerb/daq2val | /setenv-daq2.sh | UTF-8 | 895 | 2.625 | 3 | [] | no_license | # XDAQ
export XDAQ_ROOT=/opt/xdaq
export XDAQ_LOCAL=/opt/xdaq
export XDAQ_SETUP_ROOT=${XDAQ_ROOT}/share
export XDAQ_PLATFORM=`uname -m`
if test ".$XDAQ_PLATFORM" != ".x86_64"; then
export XDAQ_PLATFORM=x86
fi
checkos=`$XDAQ_ROOT/config/checkos.sh`
export XDAQ_PLATFORM=$XDAQ_PLATFORM"_"$checkos
export XDAQ_RUBUILDER=${XDAQ_ROOT}
export XDAQ_DOCUMENT_ROOT=${XDAQ_ROOT}/htdocs
export LD_LIBRARY_PATH=${XDAQ_RUBUILDER}/lib:${XDAQ_ROOT}/lib:${LD_LIBRARY_PATH}
export PATH=${PATH}:${XDAQ_RUBUILDER}/bin:${XDAQ_ROOT}/bin
# RU builder tester
WORKINGDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # get location of this file
export RUB_TESTER_HOME=${WORKINGDIR}
export TESTS_SYMBOL_MAP=${RUB_TESTER_HOME}/daq2SymbolMap_Oct7.txt
export TEST_TYPE=daq2
export PATH=${PATH}:${RUB_TESTER_HOME}/daq2Control
export PATH=${PATH}:${RUB_TESTER_HOME}/daq2Control/scripts
export XDAQ_SHARED=/tmp
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.