blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
09f4a622cd652e381d337d66a74ba58d06c3b408
|
Shell
|
joshkennedy/dotfiles
|
/install/brew-cask.sh
|
UTF-8
| 538
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
if ! is-macos -o ! is-executable brew; then
echo "Skipped: Homebrew-Cask"
return
fi
brew tap caskroom/cask
# Install packages
apps=(
alfred
dropbox
firefox
glimmerblocker
google-chrome
macdown
slack
sourcetree
spotify
atom
sublime-text
virtualbox
)
brew cask install "${apps[@]}"
# Quick Look Plugins (https://github.com/sindresorhus/quick-look-plugins)
brew cask install qlcolorcode qlstephen qlmarkdown quicklook-json qlprettypatch quicklook-csv betterzipql qlimagesize webpquicklook suspicious-package
| true
|
33b3312812d994f537f48ba1114a886f9f194418
|
Shell
|
banzaicloud/pipeline-cluster-images
|
/scripts/bootstrap.sh
|
UTF-8
| 2,962
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset
set -o pipefail
set -o errexit
export DEBIAN_FRONTEND=noninteractive
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
echo "deb https://download.docker.com/linux/ubuntu xenial stable" > /etc/apt/sources.list.d/docker-ce.list
apt-get update -y
apt-get install -y \
apt-transport-https \
socat \
ebtables \
cloud-utils \
cloud-init \
cloud-initramfs-growroot \
docker-ce=17.12.0~ce-0~ubuntu \
kubelet="${KUBERNETES_VERSION}-00" \
kubeadm="${KUBERNETES_VERSION}-00" \
kubernetes-cni=0.6.0-00 \
sysstat \
iotop \
rsync \
ngrep \
tcpdump \
atop \
python-pip \
jq
# We don't want to upgrade them.
apt-mark hold kubeadm kubectl kubelet kubernetes-cni docker-ce
systemctl enable docker
systemctl start docker
apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --force-yes
# Check this list. :)
# kubectl get pods --all-namespaces -o jsonpath="{..image}" |\
# tr -s '[[:space:]]' '\n' |\
# sort |\
# uniq -c
images=(
"banzaicloud/spark-driver:${SPARK_RELEASE_TAG}"
"banzaicloud/spark-driver-py:${SPARK_RELEASE_TAG}"
"banzaicloud/spark-executor:${SPARK_RELEASE_TAG}"
"banzaicloud/spark-executor-py:${SPARK_RELEASE_TAG}"
"banzaicloud/spark-init:${SPARK_RELEASE_TAG}"
"banzaicloud/spark-resource-staging-server:${SPARK_RELEASE_TAG}"
"banzaicloud/spark-shuffle:${SPARK_RELEASE_TAG}"
"banzaicloud/zeppelin-server:${ZEPPELIN_RELEASE_TAG}"
"gcr.io/google_containers/etcd-amd64:${ETCD_RELEASE_TAG}"
"gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${K8S_DNS_RELEASE_TAG}"
"gcr.io/google_containers/k8s-dns-kube-dns-amd64:${K8S_DNS_RELEASE_TAG}"
"gcr.io/google_containers/k8s-dns-sidecar-amd64:${K8S_DNS_RELEASE_TAG}"
"gcr.io/google_containers/kube-apiserver-amd64:${KUBERNETES_RELEASE_TAG}"
"gcr.io/google_containers/kube-controller-manager-amd64:${KUBERNETES_RELEASE_TAG}"
"gcr.io/google_containers/kube-proxy-amd64:${KUBERNETES_RELEASE_TAG}"
"gcr.io/google_containers/kube-scheduler-amd64:${KUBERNETES_RELEASE_TAG}"
"gcr.io/google_containers/kube-state-metrics:v1.2.0"
"gcr.io/google_containers/pause-amd64:3.0"
"gcr.io/kubernetes-helm/tiller:${HELM_RELEASE_TAG}"
"banzaicloud/pushgateway:${PUSHGATEWAY_RELEASE_TAG}"
"prom/prometheus:${PROMETHEUS_RELEASE_TAG}"
"jimmidyson/configmap-reload:v0.1"
"traefik:${TRAEFIK_RELEASE_TAG}"
"weaveworks/weave-npc:${WEAVE_RELEASE_TAG}"
"weaveworks/weave-kube:${WEAVE_RELEASE_TAG}"
)
for i in "${images[@]}" ; do docker pull "${i}" ; done
## Cleanup packer SSH key and machine ID generated for this boot
rm /root/.ssh/authorized_keys
rm /home/ubuntu/.ssh/authorized_keys
rm /etc/machine-id
rm -rf /var/lib/cloud/instances/*
touch /etc/machine-id
| true
|
26a5b8a6804e229ca1846596ec09a2a90a484a92
|
Shell
|
wonmin82/dotfiles
|
/install-ubuntu-config.sh
|
UTF-8
| 3,240
| 3.375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
scriptfile="$(readlink -f "$0")"
scriptpath="$(readlink -m "$(dirname "${scriptfile}")")"
configspath="${scriptpath}/ubuntu"
flag_superuser=false
flag_force=false
[ ${EUID} == 0 ] && flag_superuser=true
uid=${EUID}
[ ${flag_superuser} == true ] && [ ! -z ${SUDO_USER} ] && uid=${SUDO_UID}
gid=$(id -g ${uid})
home="$(getent passwd ${uid} | cut -f6 -d:)"
root_uid=$(id -u root)
root_gid=$(id -g ${root_uid})
root_home="$(getent passwd ${root_uid} | cut -f6 -d:)"
while [[ $# -gt 0 ]]; do
case $1 in
-f | --force)
flag_force=true
shift
;;
*)
echo "Unknown option."
exit 1
;;
esac
done
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.bash_aliases -t ${home}/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.inputrc -t ${home}/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.tmux.conf -t ${home}/
if [ ! -f ${home}/.gitconfig ] || [ ${flag_force} == true ]; then
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.gitconfig -t ${home}/
fi
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.clang-format -t ${home}/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.wgetrc -t ${home}/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.curlrc -t ${home}/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/.axelrc -t ${home}/
install -v -m 755 -o ${uid} -g ${gid} \
-d ${home}/.config/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/flake8 -t ${home}/.config/
install -v -m 755 -o ${uid} -g ${gid} \
-d ${home}/.config/pip/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/pip.conf -t ${home}/.config/pip/
install -v -m 755 -o ${uid} -g ${gid} \
-d ${home}/.config/python_keyring/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/keyringrc.cfg -t ${home}/.config/python_keyring/
install -v -m 755 -o ${uid} -g ${gid} \
-d ${home}/.config/fontconfig/
install -v -m 644 -o ${uid} -g ${gid} \
-D ${configspath}/fonts.conf -t ${home}/.config/fontconfig/
[ ${flag_superuser} == false ] && exit 0
install -v -m 755 -o ${root_uid} -g ${root_gid} \
-d ${root_home}/.config/pip/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/pip.conf -t ${root_home}/.config/pip/
install -v -m 755 -o ${root_uid} -g ${root_gid} \
-d ${root_home}/.config/fontconfig/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/fonts.conf -t ${root_home}/.config/fontconfig/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/local.conf -t /etc/fonts/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/preferences.d/ppa -t /etc/apt/preferences.d/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/preferences.d/runit -t /etc/apt/preferences.d/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/preferences.d/docker -t /etc/apt/preferences.d/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/preferences.d/llvm -t /etc/apt/preferences.d/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/apt.conf.d/99dpkg-options -t /etc/apt/apt.conf.d/
install -v -m 644 -o ${root_uid} -g ${root_gid} \
-D ${configspath}/apt.conf.d/99retries -t /etc/apt/apt.conf.d/
exit 0
| true
|
bb2d29fcd7939e6923a5aeced3405d8692aa7e01
|
Shell
|
lived-tw/dokku-etcd
|
/commands
|
UTF-8
| 5,241
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail; [[ $DOKKU_TRACE ]] && set -x
source "$PLUGIN_CORE_AVAILABLE_PATH/common/functions"
source "$(dirname "$0")/config"
source "$(dirname "$0")/functions"
if [[ $1 == etcd:* ]]; then
if [[ ! -d $ETCD_ROOT ]]; then
dokku_log_fail "etcd: Please run: sudo dokku plugin:install"
fi
fi
if [[ -d "$ETCD_ROOT/*" ]]; then
rm -rf "$ETCD_ROOT/*"
fi
case "$1" in
etcd:create)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
[[ ! -d "$ETCD_ROOT/$2" ]] || dokku_log_fail "etcd service $2 already exists"
SERVICE="$2"; SERVICE_ROOT="$ETCD_ROOT/$SERVICE"; LINKS_FILE="$ETCD_ROOT/LINKS"
if ! docker images | grep -e "^$ETCD_IMAGE "; then
dokku_log_fail "etcd image not found"
fi
mkdir -p "$SERVICE_ROOT" || dokku_log_fail "Unable to create service directory"
touch "$LINKS_FILE"
dokku_log_info1 "Starting container"
echo "" > "$SERVICE_ROOT/ENV"
SERVICE_NAME=$(get_service_name "$SERVICE")
# ID=$(docker run --name "$SERVICE_NAME" -v "$SERVICE_ROOT/data:/var/lib/etcd" --env-file="$SERVICE_ROOT/ENV" -d --restart always --label dokku=service --label dokku.service=etcd "$ETCD_IMAGE":v2.3.8 -advertise-client-urls http://etcd.tld:2379,http://etcd.tld:4001 -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 -listen-peer-urls http://0.0.0.0:2380)
ID=$(docker run --name "$SERVICE_NAME" -v "$SERVICE_ROOT/data:/var/lib/etcd" --env-file="$SERVICE_ROOT/ENV" -d --restart always --label dokku=service --label dokku.service=etcd "$ETCD_IMAGE":v2.3.8)
echo "$ID" > "$SERVICE_ROOT/ID"
dokku_log_info2 "etcd container created: $SERVICE"
;;
etcd:destroy)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
SERVICE="$2"; SERVICE_ROOT="$ETCD_ROOT/$SERVICE"; LINKS_FILE="$SERVICE_ROOT/LINKS"
SERVICE_NAME=$(get_service_name "$SERVICE")
[[ -s "$LINKS_FILE" ]] && dokku_log_fail "Cannot delete linked service"
[[ "$3" == "force" ]] && DOKKU_APPS_FORCE_DELETE=1
if [[ -z "$DOKKU_APPS_FORCE_DELETE" ]]; then
dokku_log_warn "WARNING: Potentially Destructive Action"
dokku_log_warn "This command will destroy $SERVICE etcd service."
dokku_log_warn "To proceed, type \"$SERVICE\""
echo ""
read -p "> " service_name
if [[ "$service_name" != "$SERVICE" ]]; then
dokku_log_warn "Confirmation did not match $SERVICE. Aborted."
exit 1
fi
fi
dokku_log_info1 "Deleting $SERVICE"
if [[ -n $(docker ps -aq -f name="$SERVICE_NAME") ]]; then
service_stop "$SERVICE"
sleep 1
dokku_log_verbose_quiet "Removing container"
docker rm -v "$SERVICE_NAME" > /dev/null
sleep 1
else
dokku_log_verbose_quiet "No container exists for $SERVICE"
fi
dokku_log_verbose_quiet "Removing data"
rm -rf "$SERVICE_ROOT"
dokku_log_info2 "etcd container deleted: $SERVICE"
;;
etcd:start)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
service_start "$2"
;;
etcd:stop)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
service_stop "$2"
;;
etcd:restart)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
service_stop "$2"
service_start "$2"
dokku_log_info1 "Please call dokku ps:restart on all linked apps"
;;
etcd:link)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
[[ -z $3 ]] && dokku_log_fail "Please specify an app to run the command on"
verify_app_name "$3"
verify_service_name "$2"
service_link "$2" "$3"
;;
etcd:unlink)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
[[ -z $3 ]] && dokku_log_fail "Please specify an app to run the command on"
verify_app_name "$3"
verify_service_name "$2"
service_unlink "$2" "$3"
;;
etcd:expose)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
service_port_expose "$2" "${@:3}"
;;
etcd:unexpose)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
service_port_unexpose "$2"
;;
etcd:logs)
[[ -z $2 ]] && dokku_log_fail "Please specify a name for the service"
verify_service_name "$2"
service_logs "$2" "$3"
;;
help | etcd:help)
cat<<EOF
etcd:create <name>, Create new ETCD container
etcd:destroy <name>, Destroy ETCD container
etcd:start <name>, Start a previously stopped etcd service
etcd:stop <name>, Stop a running etcd service
etcd:restart <name>, Graceful shutdown and restart of the etcd service container
etcd:link <name> <app>, Link etcd service to the app
etcd:unlink <name> <app>, Unlink etcd service from the app
etcd:expose <name> [port], Expose on a custom port if provided (random port otherwise)
etcd:unexpose <name>, Unexpose a previously exposed etcd service
etcd:logs <name> [-t], Print the most recent log(s) for this service
EOF
;;
*)
exit $DOKKU_NOT_IMPLEMENTED_EXIT
;;
esac
| true
|
e9545890be9b5c06f8d3c22390dad2b22f977030
|
Shell
|
miscord/build-scripts
|
/make.sh
|
UTF-8
| 1,994
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# --- functions ---
# https://stackoverflow.com/questions/25288194/
# pushd () {
# command pushd "$@" &> /dev/null
# }
# popd () {
# command popd "$@" &> /dev/null
# }
download_dependencies () {
mkdir "scripts" "assets"
get_script package-mac.sh
get_script snap.sh
get_script gh-upload.sh
get_script makeself.sh
get_asset Miscord.app.zip
get_asset snap.zip
get_asset makeself.zip
wget --quiet "https://github.com/github/hub/releases/download/v2.4.0/hub-linux-amd64-2.4.0.tgz"
tar -xzf hub-linux-amd64-2.4.0.tgz
mv hub-linux-amd64-2.4.0/bin/hub scripts/hub
rm -r hub-linux-amd64-2.4.0.tgz hub-linux-amd64-2.4.0
}
get_script () {
wget --quiet "https://raw.githubusercontent.com/Bjornskjald/miscord-build-scripts/master/$1" -O "scripts/$1"
chmod +x "scripts/$1"
}
get_asset () {
wget --quiet "https://github.com/Bjornskjald/miscord-build-scripts/releases/download/assets/$1" -O "assets/$1"
pushd "assets"
unzip -qq "$1"
popd
}
# --- functions end ---
VERSION=$(node -pe "require('./package.json').version")
echo "Building Miscord v$VERSION..."
mkdir -p build
echo "Downloading dependencies..."
download_dependencies
npm install
npm run build:linux -- -o build/miscord-linux &
# npm run build:linux32 -- -o build/miscord-$VERSION-linux-x86 &
npm run build:win -- -o build/miscord-win.exe &
# npm run build:win32 -- -o build/miscord-$VERSION-win-x86.exe &
npm run build:mac -- -o build/miscord-macos &
wait
source scripts/makeself.sh
makeself_linux &
# makeself_linux_x86 &
makeself_mac &
wait
source scripts/package-mac.sh
assets=()
for f in build/*; do
if ! [ -f "$f" ]; then continue; fi
assets+=(-a "$f")
done
MESSAGE="Release generated automatically with [\`miscord-build-scripts\`](https://github.com/Bjornskjald/miscord-build-scripts/) via [\`hub\`](https://github.com/github/hub/)"
FULL="$VERSION
$MESSAGE"
scripts/hub release create "${assets[@]}" -m "$FULL" "v$VERSION"
source scripts/snap.sh
echo "Finished."
| true
|
c05202eae9cdbe9fd809b5ec32f5cbfcaae27ff4
|
Shell
|
Killasnipawoof/CSCI230Files
|
/Midterm Question Answers/Q5.sh
|
UTF-8
| 245
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Enter your name: " name
read -p "Enter your age: " age
if [ $age >= 18 ]; then
echo "You are allowed to watch Defender."
else
years = $[18 - age]
echo "You are not permitted to watch Defender, you must wait $years years."
| true
|
aa8991794a767aa0cb2d1fa7c8b5db5bb0863a86
|
Shell
|
kinglarce/jenkins-understanding
|
/Jenkins 6/git/post-receive
|
UTF-8
| 1,761
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# Get branch name from ref head
# this file is for gitlab hooks(CI), if ever somebody pushes to master then it will generate a crumb and that crumb
# will be use for POST request to jenkins that will trigger the "Build"
# To actually trigger the hooks to a specific repositories(only in gitlab) or in this case nodejs repor, we need to go to
# "/var/opt/gitlab/git-data/repositories/jenkins/nodejs.git" which is located in the "git-server" container then create
# a directory called "custom_hooks", and in that dir, add this file "post-receive" and gitlab would already know this hooks.
# NOTE: make sure this "custom_hooks" and "post-receive" file mode is executable and the user is not "root"
# If everything is right, then this will trigger the "node-express-realworld-example-app" job automatically using crumb POST req
# The condition here is to check whenever someone pushes to branch
if ! [ -t 0 ]; then
read -a ref
fi
IFS='/' read -ra REF <<< "${ref[2]}"
branch="${REF[2]}"
# if the branch is master
if [ "$branch" == "master" ]; then
# then get the crumb id and assign it to crumb
crumb=$(curl -u "jenkins_user:jenkins_user" -s 'http://192.168.1.22:8080/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)')
# and then trigger the job "node-express-realworld-example-app" using POST request with header of the crumb id
curl -u "jenkins_user:jenkins_user" -H "$crumb" -X POST http://192.168.1.22:8080/job/node-express-realworld-example-app/build?delay=0sec
# Below is for trigger job with parameters
# curl -u "jenkins_user:jenkins_user" -H "$crumb" -X POST http://192.168.1.22:8080/job/pipeline-docker-maven/buildWithParameters?BUILD_TAG=10
if [ $? -eq 0 ] ; then
echo "*** Ok"
else
echo "*** Error"
fi
fi
| true
|
91b8b3a6ad2995aa7d8280ca79dd1a59424525f1
|
Shell
|
TripleTrable/Opportunity_rice
|
/.zprofile
|
UTF-8
| 242
| 2.546875
| 3
|
[] |
no_license
|
# CLEANUP:
[ -f ~/.config/zsh/zenvironment ] && source ~/.config/zsh/zenvironment
#if [[ ! $DISPLAY && $XDG_VTNR -le 2]] then
if [[ "$(tty)" = "/dev/tty1" ]] then
pgrep dwm || startx
xrdb ~/.Xresources
exec startx "$XINITRC" vt1
fi
| true
|
985eeeb341f7e31bfff998691f45e77455b0bb26
|
Shell
|
HackTrinity/challenges
|
/networking/smell_3/build.sh
|
UTF-8
| 1,808
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
OPENSSL_VERSION="1.0.1"
NGINX_VERSION="1.16.1"
DEPS="musl-dev gcc make pkgconfig perl zlib-dev pcre-dev"
apk --no-cache add $DEPS
mkdir /tmp/build
cd /tmp/build
# OpenSSL
cat > openssl_termios.patch <<EOF
diff --git a/crypto/ui/ui_openssl.c b/crypto/ui/ui_openssl.c
index 5832a73cf5..511a1d2baf 100644
--- a/crypto/ui/ui_openssl.c
+++ b/crypto/ui/ui_openssl.c
@@ -178,41 +178,9 @@
* TERMIO, TERMIOS, VMS, MSDOS and SGTTY
*/
-#if defined(__sgi) && !defined(TERMIOS)
-# define TERMIOS
-# undef TERMIO
-# undef SGTTY
-#endif
-
-#if defined(linux) && !defined(TERMIO)
-# undef TERMIOS
-# define TERMIO
-# undef SGTTY
-#endif
-
-#ifdef _LIBC
-# undef TERMIOS
-# define TERMIO
-# undef SGTTY
-#endif
-
-#if !defined(TERMIO) && !defined(TERMIOS) && !defined(OPENSSL_SYS_VMS) && !defined(OPENSSL_SYS_MSDOS) && !defined(OPENSSL_SYS_MACINTOSH_CLASSIC) && !defined(MAC_OS_GUSI_SOURCE)
-# undef TERMIOS
-# undef TERMIO
-# define SGTTY
-#endif
-
-#if defined(OPENSSL_SYS_VXWORKS)
-#undef TERMIOS
-#undef TERMIO
-#undef SGTTY
-#endif
-
-#if defined(OPENSSL_SYS_NETWARE)
-#undef TERMIOS
+// HACK musl has only termios.h...
#undef TERMIO
-#undef SGTTY
-#endif
+#define TERMIOS
#ifdef TERMIOS
# include <termios.h>
EOF
wget "https://www.openssl.org/source/old/$OPENSSL_VERSION/openssl-$OPENSSL_VERSION.tar.gz"
tar zxf "openssl-$OPENSSL_VERSION.tar.gz"
cd "openssl-$OPENSSL_VERSION/"
patch -p1 < ../openssl_termios.patch
./config --prefix=/usr/local --openssldir=/etc/ssl shared
make -j$(nproc) || true
make
make install_sw
cd ../
# nginx
wget "https://nginx.org/download/nginx-$NGINX_VERSION.tar.gz"
tar zxf "nginx-$NGINX_VERSION.tar.gz"
cd "nginx-$NGINX_VERSION/"
./configure --with-http_ssl_module
make -j$(nproc)
make install
cd ../
apk --no-cache del $DEPS
cd /
rm -r /tmp/build
| true
|
5364642b15a5b225dc752fbcc76e8b22ae3fab66
|
Shell
|
LeeGeunHAHAHA/chamber_spo
|
/scripts/func/admin_get_log/4.smart_log.4096
|
UTF-8
| 4,375
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ../common > /dev/null 2>&1
# Media and Data Integrity Errors
nvme delete-ns $FQA_DEVICE -n 0xFFFFFFFF
nvme create-ns $FQA_DEVICE -c 0x100000 -s 0x100000 -f 1 -d 1 # Data Protection Type 1
nvme attach-ns $FQA_DEVICE -n 1 -c $CTRL_ID
rescan-ns
MEDIA_ERRORS_OLD=`nvme smart-log $FQA_DEVICE | grep media_errors | sed 's/,//g' | awk '{ print $3 }'`
echo "abcd" | nvme write $FQA_DEVICE -n 1 -z 4 -p 0xf -a 0xdead -m 0xffff
# success
nvme read $FQA_DEVICE -n 1 -z 4 -y 1 -p 0xf -a 0xdead -m 0xffff | xxd -p -l 4
MEDIA_ERRORS_NEW=`nvme smart-log $FQA_DEVICE | grep media_errors | sed 's/,//g' | awk '{ print $3 }'`
if [[ $MEDIA_ERRORS_OLD != $MEDIA_ERRORS_NEW ]]; then
echo "expected <$MEDIA_ERRORS_OLD>, but was <$MEDIA_ERRORS_NEW>"
fi
# apptag error
# nvme read $FQA_DEVICE -n 1 -z 4 -y 1 -p 0xf -a 0x1111 -m 0xffff | xxd -p -l 4
# MEDIA_ERRORS_NEW=`nvme smart-log $FQA_DEVICE | grep media_errors | sed 's/,//g' | awk '{ print $3 }'`
# if [[ $((MEDIA_ERRORS_OLD+1)) != $MEDIA_ERRORS_NEW ]]; then
# echo "expected <$((MEDIA_ERRORS_OLD+1))>, but was <$MEDIA_ERRORS_NEW>"
# fi
# Data Units Read/Written, Host Read/Write Commands
nvme delete-ns $FQA_DEVICE -n 0xFFFFFFFF
nvme create-ns $FQA_DEVICE -c 0x100000 -s 0x100000
nvme attach-ns $FQA_DEVICE -n 1 -c $CTRL_ID
rescan-ns
nvme smart-log $FQA_DEVICE > smart.tmp
DATA_UNIT_RD_OLD=`cat smart.tmp | grep data_units_read | sed 's/,//g' | awk '{ print $3 }'`
DATA_UNIT_WR_OLD=`cat smart.tmp | grep data_units_written | sed 's/,//g' | awk '{ print $3 }'`
HOST_RD_CMDS_OLD=`cat smart.tmp | grep host_read_commands | sed 's/,//g' | awk '{ print $3 }'`
HOST_WR_CMDS_OLD=`cat smart.tmp | grep host_write_commands | sed 's/,//g' | awk '{ print $3 }'`
echo "abcd" | nvme write $FQA_DEVICE -n 1 -z 4 -c 999
echo "abcd" | nvme write $FQA_DEVICE -n 1 -z 4 -c 1998
echo "abcd" | nvme write $FQA_DEVICE -n 1 -z 4 -c 999
nvme smart-log $FQA_DEVICE > smart.tmp
DATA_UNIT_RD_NEW=`cat smart.tmp | grep data_units_read | sed 's/,//g' | awk '{ print $3 }'`
DATA_UNIT_WR_NEW=`cat smart.tmp | grep data_units_written | sed 's/,//g' | awk '{ print $3 }'`
HOST_RD_CMDS_NEW=`cat smart.tmp | grep host_read_commands | sed 's/,//g' | awk '{ print $3 }'`
HOST_WR_CMDS_NEW=`cat smart.tmp | grep host_write_commands | sed 's/,//g' | awk '{ print $3 }'`
if [[ $((DATA_UNIT_RD_OLD)) != $DATA_UNIT_RD_NEW ]]; then
echo "expected <$((DATA_UNIT_RD_OLD))>, but was <$DATA_UNIT_RD_NEW>"
fi
if [[ $((DATA_UNIT_WR_OLD+32)) != $DATA_UNIT_WR_NEW ]]; then
echo "expected <$((DATA_UNIT_WR_OLD+32))>, but was <$DATA_UNIT_WR_NEW>"
fi
if [[ $((HOST_RD_CMDS_OLD)) != $HOST_RD_CMDS_NEW ]]; then
echo "expected <$((HOST_RD_CMDS_OLD))>, but was <$HOST_RD_CMDS_NEW>"
fi
if [[ $((HOST_WR_CMDS_OLD+32)) != $HOST_WR_CMDS_NEW ]]; then
echo "expected <$((HOST_WR_CMDS_OLD+32))>, but was <$HOST_WR_CMDS_NEW>"
fi
nvme smart-log $FQA_DEVICE > smart.tmp
DATA_UNIT_RD_OLD=`cat smart.tmp | grep data_units_read | sed 's/,//g' | awk '{ print $3 }'`
DATA_UNIT_WR_OLD=`cat smart.tmp | grep data_units_written | sed 's/,//g' | awk '{ print $3 }'`
HOST_RD_CMDS_OLD=`cat smart.tmp | grep host_read_commands | sed 's/,//g' | awk '{ print $3 }'`
HOST_WR_CMDS_OLD=`cat smart.tmp | grep host_write_commands | sed 's/,//g' | awk '{ print $3 }'`
nvme read $FQA_DEVICE -n 1 -z 4 -c 999
nvme read $FQA_DEVICE -n 1 -z 4 -c 1998
nvme read $FQA_DEVICE -n 1 -z 4 -c 999
nvme smart-log $FQA_DEVICE > smart.tmp
DATA_UNIT_RD_NEW=`cat smart.tmp | grep data_units_read | sed 's/,//g' | awk '{ print $3 }'`
DATA_UNIT_WR_NEW=`cat smart.tmp | grep data_units_written | sed 's/,//g' | awk '{ print $3 }'`
HOST_RD_CMDS_NEW=`cat smart.tmp | grep host_read_commands | sed 's/,//g' | awk '{ print $3 }'`
HOST_WR_CMDS_NEW=`cat smart.tmp | grep host_write_commands | sed 's/,//g' | awk '{ print $3 }'`
if [[ $((DATA_UNIT_RD_OLD+32)) != $DATA_UNIT_RD_NEW ]]; then
echo "expected <$((DATA_UNIT_RD_OLD+32))>, but was <$DATA_UNIT_RD_NEW>"
fi
if [[ $((DATA_UNIT_WR_OLD)) != $DATA_UNIT_WR_NEW ]]; then
echo "expected <$((DATA_UNIT_WR_OLD))>, but was <$DATA_UNIT_WR_NEW>"
fi
if [[ $((HOST_RD_CMDS_OLD+32)) != $HOST_RD_CMDS_NEW ]]; then
echo "expected <$((HOST_RD_CMDS_OLD+32))>, but was <$HOST_RD_CMDS_NEW>"
fi
if [[ $((HOST_WR_CMDS_OLD)) != $HOST_WR_CMDS_NEW ]]; then
echo "expected <$((HOST_WR_CMDS_OLD))>, but was <$HOST_WR_CMDS_NEW>"
fi
rm -f smart.tmp
| true
|
9b77a61e18cc4a7ca5786b549e2bcb75dc172775
|
Shell
|
satoshi-hirayama/techtalk4
|
/provisioning/roles/ecs-agent/templates/ecs-agent
|
UTF-8
| 1,292
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
start() {
docker ps -a | grep ecs-agent
if [ $? = 0 ]; then
docker restart ecs-agent
else
export AWS_DEFAULT_REGION=$(ec2metadata --availability-zone | sed -e 's/.$//')
export ECS_CLUSTER={{ cluster_name }}
docker run --name {{ ecs_agent_container_name }} \
--detach={{ docker_detach }} \
--restart={{ docker_restart_policy }}{{ docker_restart_times }} \
--volume={{ volume_docker_sock }} \
--volume={{ volume_ecs_log }} \
--volume={{ volume_ecs_data }} \
--volume={{ volume_cgroup }} \
--volume={{ volume_docker_lib }} \
--net={{ netowrk_mode }} \
--env=ECS_LOGFILE={{ ecs_logfile }} \
--env=ECS_LOGLEVEL={{ ecs_loglevel }} \
--env=ECS_DATADIR={{ ecs_datadir }} \
--env=ECS_CLUSTER=$ECS_CLUSTER \
--env=ECS_ENABLE_TASK_IAM_ROLE={{ ecs_enable_task_iam_role }} \
--env=ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST={{ ecs_enable_task_iam_role_netowrk_host }} \
--env=ECS_RESERVED_PORTS={{ ecs_reserved_ports }} \
--env=ECS_CONTAINER_STOP_TIMEOUT={{ ecs_container_stop_timeout }} \
amazon/amazon-ecs-agent:latest
fi
}
stop() {
docker stop ecs-agent
}
case "$1" in
start)
start
;;
stop)
stop
;;
*)
echo "Usage: ecs-agent (start|stop)"
exit 1
;;
esac
exit $?
| true
|
a41b2ead8dd20223fd2cd15aee0d19aadc64f169
|
Shell
|
pkolachi/corpus-preprocessing
|
/beda/stanford_corenlp-on_wiki_data.sh
|
UTF-8
| 502
| 2.59375
| 3
|
[] |
no_license
|
export STANFORD_CORENLP_DIR="$HOME/Documents/softwares/nlp-tools/language-specific/english/stanford-tools/stanford-corenlp-full-2014-01-04"
# Absolute path to this script, e.g. /home/user/bin/foo.sh
SCRIPT=$(readlink -f "$0")
# Absolute path this script is in, thus /home/user/bin
SCRIPTPATH=$(dirname "$SCRIPT")
java -Xmx30G -cp "$STANFORD_CORENLP_DIR/*" edu.stanford.nlp.pipeline.StanfordCoreNLP \
-annotators tokenize,ssplit,pos,lemma,ner,parse \
-threads 5 \
-filelist $1 \
-outputDirectory $2
| true
|
f0b531bddd5e736777d1f382292977fa38deda20
|
Shell
|
Amorymeltzer/dotfiles
|
/bin/utc
|
UTF-8
| 183
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Usage: utc [<seconds-since-epoch>]
# Show a utc date
set -e
if [[ -z "$1" ]]; then
date -u +"%Y-%m-%d %H:%M:%SZ"
else
date -r "$1" +"%Y-%m-%d %H:%M:%SZ"
fi
| true
|
a7dee33d2e1f8363236500d689aa5ef5f4a27db4
|
Shell
|
dugrema/millegrilles.consignation
|
/install_scripts/bin/setup-certs-signernoeud.sh
|
UTF-8
| 959
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Parametres obligatoires
if [ -z $NOM_MILLEGRILLE ] || [ -z $DOMAIN_SUFFIX ] || [ -z $REQ ]; then
echo "Les parametres NOM_MILLEGRILLE, DOMAIN_SUFFIX et REQ doivent etre definis globalement"
exit 1
fi
# Parametres optionnel
if [ -z $CURDATE ]; then
CURDATE=`date +%Y%m%d%H%M`
fi
# Importer les variables et fonctions
source /opt/millegrilles/etc/variables.txt
source setup-certs-fonctions.sh
# Sequence
sequence_chargement() {
# Creer le certificat noeud
echo -e "\n*****\nSigner un certificat de noeud"
# Changer l'extension .csr.pem pour .cert pour generer la requete
CERT=`echo $REQ | sed s/\.csr/\.cert/g`
SUFFIX_NOMCLE=middleware \
CNF_FILE=$ETC_FOLDER/openssl-millegrille.cnf \
KEYFILE=$MG_KEY \
PASSWD_FILE=$MILLEGRILLE_PASSWD_FILE \
REQ=$REQ \
CERT=$CERT \
signer_cert
if [ $? != 0 ]; then
exit $?
fi
echo -e "\nCertificat cree, copier vers noeud\n"
cat $CERT
}
# Executer
sequence_chargement
| true
|
c98565cb175aad7459c62f24ebe18699bcb9b032
|
Shell
|
brechtvdv/realtime-open-data-benchmark
|
/fill-in-master-ip-address.sh
|
UTF-8
| 539
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# replace all occurrences of EXTERNAL_IP in *.y* files with the Kubernetes master's IP address (reported by kubectl)
KUBE_MASTER_IP=$(kubectl cluster-info | head -n 1 | egrep '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' -o --color=never)
if [ "$KUBE_MASTER_IP" == "" ]; then
# Remove externalIP config
sed --in-place "s/- EXTERNAL_IP//g" *.y*
sed --in-place "s/EXTERNAL_IP/localhost/g" influxdb-datasource.yml
else
echo "Kubernetes master's IP address is ${KUBE_MASTER_IP}"
sed --in-place "s/EXTERNAL_IP/${KUBE_MASTER_IP}/g" *.y*
fi
| true
|
e037f2ead79db1cb2e6fe1870e22326e930224f7
|
Shell
|
bin2000/joshua
|
/test/scripts/support/moses_grammar/test.sh
|
UTF-8
| 193
| 2.578125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
cat input | $JOSHUA/scripts/support/moses2joshua_grammar.pl > output
diff -u output output.expected > diff
if [ $? -eq 0 ]; then
rm -f diff output
exit 0
else
exit 1
fi
| true
|
58025224b3d7e9e9427d92b2fd0786b19ccad2a3
|
Shell
|
CloudSide/imgx
|
/imgx/cl.sh
|
UTF-8
| 139
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]
then
echo $"Usage: $0 {bucket}"
else
cd cache && rm -rf ./$1/* && cd ..
./service.sh restart
fi
| true
|
062feda5191352d0a20d27bfa08d50e98ac40817
|
Shell
|
elhalag93/bash_scripting_database
|
/bash_project/dropdb.sh~
|
UTF-8
| 175
| 2.84375
| 3
|
[] |
no_license
|
#! /bin/bash
for i in ../bash/database/*
do
if [ "$i" == "../bash/database/$1" ]
then
rm -r ./RDBMS/database/$1
echo "Database Successfully Droped"
break
fi
done
| true
|
299a52f1b60e725bfc76c45a3145921965652c58
|
Shell
|
florent-engineering/anemomind
|
/src/compile_and_cp_bin.sh
|
UTF-8
| 625
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -DWITH_SAILROOT=OFF
# build dependencies
make help | grep ext | sed 's/\.\.\. //' \
| grep -v gflags | grep -v gtest \
| xargs make -j$(nproc)
make -j$(nproc) \
nautical_processBoatLogs logimport_summary \
anemobox_logcat logimport_try_load nautical_catTargetSpeed
TARGETS="./src/server/nautical/nautical_catTargetSpeed ./src/server/nautical/nautical_processBoatLogs ./src/server/nautical/logimport/logimport_try_load"
mkdir -p ../bin
mkdir -p ../lib
cp $(ldd $TARGETS | grep -o '/.\+\.so[^ ]*' | sort | uniq) ../lib
cp $TARGETS ../bin
| true
|
fad40b189b853d461979308d534f4b0a0cadafb0
|
Shell
|
Kryuk/pandora
|
/scripts/ensure-bucket.sh
|
UTF-8
| 644
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
# export the vars in .env into your shell:
export $(egrep -v '^#' ../.env | xargs)
MINIO_NAME=localhost
while ! nc -z ${MINIO_NAME} 9000; do echo 'wait minio...' && sleep 0.1; done; \
sleep 5 && \
s3cmd mb s3://${MINIO_BUCKET}
# docker-compose run minio --entrypoint sh minio/mc -c "\
# while ! nc -z minio 9000; do echo 'wait minio...' && sleep 0.1; done; \
# sleep 5 && \
# mc config host add myminio http://minio:9000 \$MINIO_ENV_MINIO_ACCESS_KEY \$MINIO_ENV_MINIO_SECRET_KEY && \
# mc rm -r --force myminio/\$MINIO_BUCKET || true && \
# mc mb myminio/\$MINIO_BUCKET && \
# mc policy download myminio/\$MINIO_BUCKET \
# "
| true
|
4f50b56bc76b61014560d1775a906c8b23f568ad
|
Shell
|
MariuszWisniewski/todo-app-syncano
|
/Build/Intermediates/ArchiveIntermediates/todo-app-syncano/IntermediateBuildFilesPath/todo-app-syncano.build/Release-iphoneos/todo-app-syncano.build/Script-1575F0981B7D525700D0E5EA.sh
|
UTF-8
| 275
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#Update build number with number of git commits if in release mode
if [ ${CONFIGURATION} == "Release" ]; then
buildNumber=$(git rev-list HEAD | wc -l | tr -d ' ')
/usr/libexec/PlistBuddy -c "Set :CFBundleVersion $buildNumber" "${PROJECT_DIR}/${INFOPLIST_FILE}"
fi;
| true
|
1df66d08c00971d84e5b6d6bcac35382ee54b06d
|
Shell
|
bootandy/.i3
|
/i3exit
|
UTF-8
| 253
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
case "$1" in
reboot)
sudo /sbin/reboot -i
;;
shutdown)
sudo /sbin/poweroff -i
;;
logout)
i3-msg exit
;;
*)
echo "Usage: $0 {reboot|shutdown|logout}"
exit 2
esac
exit 0
| true
|
92fc70704a06f667536ebd1f44468fc893f1c99f
|
Shell
|
Ananac0/AnanacScript
|
/ansc.sh
|
UTF-8
| 4,430
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
Version="0.2"
case "$1" in
"-S" | "-s" | "--Setup" | "--setup" ) mv ./ansc.sh /usr/local/bin;
echo "Установлено";
exit;;
"-V" | "-v" | "-Version" | "-version" ) echo "Version = $Version";
exit;;
esac
clear
echo "AnanacScript v0.2"
echo ""
echo ""
echo ""
echo -e "\e[31m ▄▄▄ ███▄ █ ▄▄▄ ███▄ █ ▄▄▄ ▄████▄ ██████ ▄████▄ ██▀███ ██▓ ██▓███ ▄▄▄█████▓ \e[0m"
echo -e "\e[31m ▒████▄ ██ ▀█ █ ▒████▄ ██ ▀█ █ ▒████▄ ▒██▀ ▀█ ▒██ ▒ ▒██▀ ▀█ ▓██ ▒ ██▒▓██▒▓██░ ██▒▓ ██▒ ▓▒ \e[0m"
echo -e "\e[31m ▒██ ▀█▄ ▓██ ▀█ ██▒▒██ ▀█▄ ▓██ ▀█ ██▒▒██ ▀█▄ ▒▓█ ▄ ░ ▓██▄ ▒▓█ ▄ ▓██ ░▄█ ▒▒██▒▓██░ ██▓▒▒ ▓██░ ▒░ \e[0m"
echo -e "\e[31m ░██▄▄▄▄██ ▓██▒ ▐▌██▒░██▄▄▄▄██ ▓██▒ ▐▌██▒░██▄▄▄▄██ ▒▓▓▄ ▄██▒ ▒ ██▒▒▓▓▄ ▄██▒▒██▀▀█▄ ░██░▒██▄█▓▒ ▒░ ▓██▓ ░ \e[0m"
echo -e "\e[31m ▓█ ▓██▒▒██░ ▓██░ ▓█ ▓██▒▒██░ ▓██░ ▓█ ▓██▒▒ ▓███▀ ░▒██████▒▒▒ ▓███▀ ░░██▓ ▒██▒░██░▒██▒ ░ ░ ▒██▒ ░ \e[0m"
echo -e "\e[31m ▒▒ ▓▒█░░ ▒░ ▒ ▒ ▒▒ ▓▒█░░ ▒░ ▒ ▒ ▒▒ ▓▒█░░ ░▒ ▒ ░▒ ▒▓▒ ▒ ░░ ░▒ ▒ ░░ ▒▓ ░▒▓░░▓ ▒▓▒░ ░ ░ ▒ ░░ \e[0m"
echo -e "\e[31m ▒ ▒▒ ░░ ░░ ░ ▒░ ▒ ▒▒ ░░ ░░ ░ ▒░ ▒ ▒▒ ░ ░ ▒ ░ ░▒ ░ ░ ░ ▒ ░▒ ░ ▒░ ▒ ░░▒ ░ ░ \e[0m"
echo -e "\e[31m ░ ▒ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░░ ░ ▒ ░░░ ░ \e[0m"
echo -e "\e[31m ░ ░ ░ ░ ░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ \e[0m"
echo ""
echo ""
echo ""
echo -e " \e[32m[1] Перевести адаптер в режим мониторинга\e[0m"
echo -e " \e[32m[2] Перевести адаптер с режима мониторинга\e[0m"
echo -e " \e[32m[3] Перезапустить Network-Manager\e[0m"
echo -e " \e[32m[4] *Создать фейковую ТД\e[0m"
echo -e " \e[32m[5] *[]\e[0m"
echo -e " \e[32m[6] *[]\e[0m"
echo -e " \e[32m[7] *[]\e[0m"
echo -e " \e[32m[8] *[]\e[0m"
echo -e " \e[32m[9] *[]\e[0m"
echo ""
echo -e " \e[41mВведите номер команды: \e[0m"
read Event
case "$Event" in
"1" ) echo -e " \e[41mВведите названия адаптера: \e[0m";
read Adapter;
echo -e " \e[41mОтключаем ненужные процесы...\e[0m";
airmon-ng check kill;
echo -e " \e[41mПереводим адаптер в режим мониторинга...\e[0m";
airmon-ng start $Adapter;
echo -e " \e[41mГотово!\e[0m";;
"2" ) echo -e " \e[41mВведите название адаптера: \e[0m";
read Adapter;
echo -e " \e[41mПереводим адаптер с режима мониторинга...\e[0m";
airmon-ng stop $Adapter;
echo -e " \e[41mПерезапускаем Network-Manager...\e[0m";
service network-manager restart;
echo -e " \e[41mГотово!\e[0m";;
"3" ) echo -e " \e[41mПерезапускаем Network-Manager...\e[0m";
service network-manager restart;
echo -e " \e[41mГотово!\e[0m";;
"4" ) ;;
"5" ) ;;
"6" ) ;;
"7" ) ;;
"8" ) ;;
"9" ) ;;
esac
| true
|
71b126e1f428f6fe0bcd5b69808da7e992782e79
|
Shell
|
zdhuangelephant/xd_pro
|
/xiaodou-aos-client/src/main/python/api/extend/1/deploy.sh
|
UTF-8
| 722
| 3
| 3
|
[] |
no_license
|
#/bin/bash
#####SET_VER#####
BASEDIR=$1
REMOTEHOST=$6
USERNAME=$7
PASSWORD=$8
SER_NAME=$2
FILE_ADRESS=$3
SER_BASE=$BASEDIR/$SER_NAME
WARADRESS=$4
VERSION=$5
#####START#####
echo start
echo mkdir $BASEDIR
mkdir -p $BASEDIR
cd $BASEDIR
echo mkdir $BASEDIR/$SER_NAME
mkdir $SER_NAME
cd $SER_NAME
echo delFile
rm -rf $SER_BASE/*
echo ftp down start
wget -t 1 --timeout=8 -nH -m --ftp-user=$USERNAME --ftp-password=$PASSWORD ftp://$REMOTEHOST/$FILE_ADRESS/
cp -r $FILE_ADRESS/conf/ $SER_BASE/
rm -rf $FILE_ADRESS/
wget -t 1 --timeout=8 -nH -m --ftp-user=$USERNAME --ftp-password=$PASSWORD ftp://$REMOTEHOST/$WARADRESS/$VERSION/
cp -r $WARADRESS/$VERSION/* $SER_BASE/
rm -rf $WARADRESS/
echo ftp down success
echo end
| true
|
26cb25f2a7f50ae7c80d5eecd7b07536c390c1a8
|
Shell
|
sudheerbd/cicd-modernization
|
/CICD/BuildWar/build/reporting/sonar_analysis.sh
|
UTF-8
| 1,007
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
. ./CICD/BuildWar/variables.sh
. $main/scripts/env_variables.sh
extjsVersion=${EXTJS_VERSION}
if [ "$extjsVersion" = "Extjs_6.5.3" ]
then
export senchasource=$senchasource53
export senchapackages=$senchapackages53
elif [ "$extjsVersion" = "Extjs_6.7" ]
then
export senchasource=$senchasource53
export senchapackages=$senchapackages53
elif [ "$extjsVersion" = "Extjs_7.2" ]
then
export senchasource=$senchasource72
export senchapackages=$senchapackages72
else
echo "sencha source path not set properly"
fi
cp $config/build/orbit_env.properties $reportingSourceLocation
cd $reportingSourceLocation/src/main/webapp/orbitui
sencha app upgrade $senchasource
cp -r $senchapackages ./ext/packages >nul 2>&1
rm -rf ./ext/packages/d3/
cd $reportingSourceLocation/src/main/webapp/orbitui/ext/classic
git ls-files -d | xargs git checkout
cd ../..
cp -r $senchapackages/d3 ./packages/d3 >nul 2>&1
sencha app clean
cd $reportingSourceLocation
mvn clean verify sonar:sonar -Dmaven.test.skip=true
| true
|
856d03563820569f394cf2b3340236e52a1a9c32
|
Shell
|
foobarjimmy/productionScripts
|
/readoptions.sh
|
UTF-8
| 195
| 3.03125
| 3
|
[] |
no_license
|
#! /bin/bash
echo "Username and password"
read -n4 -p "Username : " username
read -n1 -p "Password : " password
echo
echo "Your username is : $username , your password is : $password"
exit 0
| true
|
81b8a215c3289aeab3f607cf4b6ad763cb69d69d
|
Shell
|
Innoactive/docker-postgres-backup-local
|
/build_and_deploy.sh
|
UTF-8
| 351
| 2.84375
| 3
|
[] |
no_license
|
#! /bin/sh
# Helper file to build and deploy to the innoactive registry.
version='0.1.1'
REGISTRY_HOST='registry.docker.innoactive.de'
docker login --username=${DOCKER_REGISTRY_USERNAME} --password=${DOCKER_REGISTRY_PASSWORD} ${REGISTRY_HOST}
tag=${REGISTRY_HOST}'/docker-postgres-backup-local:'${version}
docker build -t ${tag} .
docker push ${tag}
| true
|
48518d4eadab8bb88d8113e1fa741535658c7a06
|
Shell
|
haaruhito/ShellScripting
|
/FunctionAndLocalVariable.sh
|
UTF-8
| 829
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# All variables are global in shell script.
function Hello(){
firstname=$1
echo "Hello I am $firstname."
}
firstname="Rojna"
echo "Hello, I am $firstname." # It prints Rojna
Hello Shree # It prints Shree
echo "$firstname" # It prints Shree
echo "------------------------------------------"
# Lets create another function where I create local variable
function HelloAgain(){
local lastname=$1 # This lastname is a local variable.
echo "$lastname is my last name."
}
lastname="Sapkota" # This lastname is a global variable
echo "$lastname is my last name." # It prints Sapkota
HelloAgain Humagain # It prints Humagain
echo "$lastname is my last name. " # It prints Sapkota
| true
|
f1a1d143c19467ce3eaecf7f8488f695ebad966d
|
Shell
|
hopehpc/hope-singularity
|
/definition-files/amber18/scripts/BKtest_pmemd_serial_x5650.sh
|
UTF-8
| 1,307
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# This job will run one set of amber tests (serial AmberTools)
# User needs to be root to run this because it needs to write to /usr/local/amber18
date_string=`date +%Y-%m-%d_%H-%M-%S`
logdir="${AMBERHOME}/logs/test_pmemd_serial"
#logdir="/home/krieg/testing/logs/test_pmemd_serial"
logprefix="${logdir}/${date_string}"
logfile="${logprefix}.log"
difffile="${logprefix}.diff"
mkdir -p ${logdir}
#Run the test programs
cd $AMBERHOME/test
(make -k -f Makefile test.serial.pmemd 2>&1) | tee ${logfile}
(make -k -f Makefile test.serial.pmemd.gem 2>&1) | tee -a ${logfile}
(make -k -f Makefile finished.serial 2>&1) | tee -a ${logfile}
#Parse the output
passed_count=`grep PASS ${logfile} | wc -l`
questionable_count=`grep -e "FAILURE:" -e "FAILED:" ${logfile} | wc -l`
error_count=`grep "Program error" ${logfile} | wc -l`
echo "${passed_count} file comparisons passed" | tee -a ${logfile}
echo "${questionable_count} file comparisons failed" | tee -a ${logfile}
echo "${error_count} tests experienced errors" | tee -a ${logfile}
echo "Test log file saved as ${logfile}" | tee -a ${logfile}
if [ -f TEST_FAILURES.diff ]; then
mv TEST_FAILURES.diff ${difffile}
echo "Test diffs file saved as ${difffile}" | tee -a ${logfile}
else
echo "No test diffs to save!" | tee -a ${logfile}
fi
| true
|
ef7c4f35c3ee187b914ff1fe0dd924068db94465
|
Shell
|
tied/osx-bootstrapping
|
/dotfiles/roles/osx/files/aliases
|
UTF-8
| 2,112
| 2.953125
| 3
|
[] |
no_license
|
# vim: set filetype=sh :
# Shortcuts
alias d="cd ~/Documents/Dropbox"
alias dl="cd ~/Downloads"
alias dt="cd ~/Desktop"
alias p="cd ~/projects"
# Get OS X Software Updates, and update installed Ruby gems, Homebrew, npm, and their installed packages
alias update='sudo softwareupdate -i -a; brew update; brew upgrade; brew cleanup; gem update'
# Flush Directory Service cache
alias flush="dscacheutil -flushcache && sudo killall -HUP mDNSResponder"
# Recursively delete `.DS_Store` files
alias cleanup="find . -type f -name '*.DS_Store' -ls -delete"
# Empty the Trash on all mounted volumes and the main HDD
# Also, clear Apple’s System Logs to improve shell startup speed
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl"
# Kill all the tabs in Chrome to free up memory
# [C] explained: http://www.commandlinefu.com/commands/view/402/exclude-grep-from-your-grepped-output-of-ps-alias-included-in-description
alias chromekill="ps ux | grep '[C]hrome Helper --type=renderer' | grep -v extension-process | tr -s ' ' | cut -d ' ' -f2 | xargs kill"
# Alias for colorssh
alias cs="${HOME}/bin/colorssh"
# APG alias for friendly passwords
alias apgf='apg -a 0 -n 10 -m 10 -t -E lI10O -M NCSL'
# Use the vim that comes with homebrew
alias vi="/usr/local/bin/vim"
# Run the ansible playbook that puts all the dotfiles in place locally
alias dflocal='ANSIBLE_CONFIG=~/projects/osx-bootstrapping/ansible.cfg ansible-playbook --vault-password-file ~/projects/osx-bootstrapping/vault_from_gpg_agent.py -i ~/projects/osx-bootstrapping/dotfiles/inventory ~/projects/osx-bootstrapping/dotfiles/osx.yml | ~/projects/osx-bootstrapping/sanitize_output.awk'
# Run the ansible playbook that install all osx apps w/settings (locally)
alias dfosx="ANSIBLE_CONFIG=~/projects/osx-bootstrapping/ansible.cfg ansible-playbook --vault-password-file ~/projects/osx-bootstrapping/vault_from_gpg_agent.py -i ~/projects/osx-bootstrapping/osx_apps/inventory ~/projects/osx-bootstrapping/osx_apps/osx.yml --extra-vars 'computername=${HOSTNAME}' --limit '${HOSTNAME}'"
| true
|
f9a08a0d0eb2e8edd4937ef16975fd2cdcaacab6
|
Shell
|
vwheezy22/dots
|
/.config/shell/profile
|
UTF-8
| 1,931
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
# Default programs
export EDITOR="nvim"
export VISUAL="nvim"
export TERMINAL="st"
export BROWSER="firefox"
export PAGER="less"
export HISTSIZE="INFINITE"
export WM="dwm"
export COLORTERM="truecolor"
export SHELL="zsh"
# ~/ Clean-up
export XDG_CONFIG_HOME="$HOME/.config"
export XDG_DATA_HOME="$HOME/.local/share"
export XDG_CACHE_HOME="$HOME/.cache"
export XAUTHORITY="$XDG_RUNTIME_DIR/Xauthority"
export HISTFILE="$XDG_DATA_HOME/history"
export GNUPGHOME="$XDG_DATA_HOME/gnupg"
export GOPATH="$XDG_DATA_HOME/go"
export NODE_REPL_HISTORY="$XDG_DATA_HOME/node_repl_history"
export XORG_LOG="$XDG_DATA_HOME/xorg/xorg.log"
export RUSTUP_HOME="$XDG_DATA_HOME/rustup"
export TEXMFHOME="$XDG_DATA_HOME/texmf"
export CARGO_HOME="$XDG_DATA_HOME/cargo"
export TASKDATA="$XDG_DATA_HOME/task"
export TIMEWARRIORDB="$XDG_DATA_HOME/timew"
export NPM_CONFIG_USERCONFIG="$XDG_CONFIG_HOME/npm/npmrc"
export XINITRC="$XDG_CONFIG_HOME/X11/xinitrc"
export INPUTRC="$XDG_CONFIG_HOME/shell/inputrc"
export ZDOTDIR="$XDG_CONFIG_HOME/shell/zsh"
export GTK2_RC_FILES="$XDG_CONFIG_HOME/gtk-2.0/gtkrc-2.0"
export TEXMFCONFIG="$XDG_CONFIG_HOME/texlive/texmf-config"
export WGETRC="$XDG_CONFIG_HOME/wget/wgetrc"
export _JAVA_OPTIONS="-Djava.util.prefs.userRoot="$XDG_CONFIG_HOME/java""
export TASKRC="$XDG_CONFIG_HOME/task/taskrc"
export TASKOPENRC="$XDG_CONFIG_HOME/taskopen/taskopenrc"
export TEXMFVAR="$XDG_CACHE_HOME/texlive/texmf-var"
export LESSHISTFILE="-"
export _JAVA_AWT_WM_NONREPARENTING=1 # Fix for Java applications in dwm
export QT_QPA_PLATFORMTHEME="qt5ct" # Have QT use gtk2 theme.
export SUDO_ASKPASS="/usr/lib/ssh/ssh-askpass"
# Adds `~/.local/bin` to $PATH
export PATH="$XDG_DATA_HOME/npm-global/bin:$HOME/.local/bin:$CARGO_HOME/bin:$PATH"
aa-notify -p -s 1 -w 60 -f /var/log/audit/audit.log
pgrep startx >/dev/null || startx "$XINITRC" -- -keeptty &> "$XORG_LOG"
[ -n "$BASH_VERSION" ] && [ -f "$HOME/.bashrc" ] && . "$HOME/.bashrc"
| true
|
beb8e1846d9b1b1e531a937e6978f11edfbdf793
|
Shell
|
aravinds0217/bash-easy-callenges
|
/easy7.4
|
UTF-8
| 106
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
length(){
echo enter a string:
read str
l=`expr length $str`
echo length $str is :$l
}
length
| true
|
4269c6dfa277c8f038307afca80bb40e42728550
|
Shell
|
openxpki/openxpki
|
/tools/testdata/_create-certs.sh
|
UTF-8
| 9,031
| 4.03125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
#
# Create certificates and store them in $OXI_CONF/etc/openxpki/ca/xxx/
# Import the certificates into MySQL using "openxpkiadm".
#
# This script assumes that there is a valid OpenXPKI configuration below
# $OXI_CONF so that "openxpkiadm" can run and access the database.
#
# Please note that the PKI realms used in this script must correspond to the
# ones found in $OXI_CONF/realms.yaml.
#
# Where to store the certificates
OXI_CONF="$1"
if [[ -z "$OXI_CONF" ]]; then
echo "$(basename $0): test config directory must be specified as first parameter";
exit 1
fi
if [[ ! -d "$OXI_CONF" ]]; then
echo "$(basename $0): given test config directory $OXI_CONF does not exist";
exit 1
fi
# Only variable that is globally used
TEMPDIR=`mktemp -d`
# Exit handler
function _exit () {
if [ $1 -ne 0 ]; then
echo "ERROR - last command exited with code $1, output:" >&2 && cat $TEMPDIR/log >&2
fi
rm -Rf $TEMPDIR
exit $1
}
trap '_exit $?' EXIT
write_openssl_config() {
cat <<EOF > $TEMPDIR/openssl.cnf
HOME = .
RANDFILE = \$ENV::HOME/.rnd
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = $TEMPDIR
certs = $TEMPDIR/certs
crl_dir = $TEMPDIR/crl
database = $TEMPDIR/index.txt
new_certs_dir = $TEMPDIR/
serial = $TEMPDIR/serial
crlnumber = $TEMPDIR/crlnumber
crl = $TEMPDIR/crl.pem
private_key = $TEMPDIR/cakey.pem
RANDFILE = $TEMPDIR/.rand
default_md = sha256
preserve = no
policy = policy_none
default_days = 365
x509_extensions = usr_cert
[policy_none]
domainComponent = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 2048
distinguished_name = req_distinguished_name
x509_extensions = v3_ca
[ req_distinguished_name ]
domainComponent = Domain Component
commonName = Common Name
[ usr_cert ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
[ vault_cert ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
keyUsage = keyEncipherment
extendedKeyUsage = emailProtection
[ v3_ca ]
basicConstraints = critical,CA:true
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
keyUsage = cRLSign, keyCertSign
EOF
touch $TEMPDIR/index.txt
touch $TEMPDIR/index.txt.attr
echo 01 > $TEMPDIR/serial
echo 00 > $TEMPDIR/crlnumber
}
# If SIGNER_NAMESTEM == "SELF" we assume the cert is self signed
issue_cert() {
local ADD_PARAMS=$1
local CERT_NAMESTEM=$2
local LABEL=$3
local SIGNER_NAMESTEM=$4
local VALID_FROM=$5
local VALID_TO=$6
# Certificate Signing Request
openssl req -config $TEMPDIR/openssl.cnf -batch \
-verbose \
-newkey rsa:2048 \
-subj "$LABEL" \
-keyout $CERT_NAMESTEM.pem -passout pass:root \
-out $CERT_NAMESTEM.csr \
> $TEMPDIR/log 2>&1
# Issuance of Certificate
local signerRef="-keyfile $SIGNER_NAMESTEM.pem -cert $SIGNER_NAMESTEM.crt"
[ $SIGNER_NAMESTEM == "SELF" ] && local signerRef="-keyfile $CERT_NAMESTEM.pem -selfsign"
openssl ca -config $TEMPDIR/openssl.cnf -batch \
$ADD_PARAMS \
-in $CERT_NAMESTEM.csr \
$signerRef -passin pass:root \
-startdate $VALID_FROM -enddate $VALID_TO \
-notext \
-out $CERT_NAMESTEM.crt \
> $TEMPDIR/log 2>&1
}
make_certs() {
local TARGET_DIR=$1
local REALM=$2
local GEN=$3
local VALID_FROM=$4
local VALID_TO=$5
local SPECIAL=$6
local SUBJECT_BASE="/DC=ORG/DC=OpenXPKI/OU=ACME/CN=$(echo "$REALM" | tr [:lower:] [:upper:])"
local BASEPATH="$TEMPDIR/$REALM"
# Remove OpenSSL info of previous certs (otherwise it mixes our generations)
rm -f $TEMPDIR/index.txt*
touch $TEMPDIR/index.txt
touch $TEMPDIR/index.txt.attr
echo "Certificates for CA $2 (generation $GEN)"
echo " - create via OpenSSL"
# Self signed DataVault cert
issue_cert "-extensions vault_cert" \
$BASEPATH-datavault-$GEN "$SUBJECT_BASE DataVault $GEN" \
SELF \
$VALID_FROM $VALID_TO
# Self signed Root CA cert
issue_cert "-extensions v3_ca" \
$BASEPATH-root-$GEN "$SUBJECT_BASE Root CA $GEN" \
SELF \
$VALID_FROM $VALID_TO
# Signing CA cert (signed by Root CA)
issue_cert "-extensions v3_ca" \
$BASEPATH-signer-$GEN "$SUBJECT_BASE Signing CA $GEN" \
$BASEPATH-root-$GEN \
$VALID_FROM $VALID_TO
# SCEP cert (signed by Root CA)
issue_cert "" \
$BASEPATH-scep-$GEN "$SUBJECT_BASE SCEP $GEN" \
$BASEPATH-root-$GEN \
$VALID_FROM $VALID_TO
# Client cert #1 (signed by Signing CA)
issue_cert "" \
$BASEPATH-alice-$GEN "$SUBJECT_BASE Client Alice $GEN" \
$BASEPATH-signer-$GEN $VALID_FROM $VALID_TO
# Client cert #2 (signed by Signing CA)
issue_cert "" \
$BASEPATH-bob-$GEN "$SUBJECT_BASE Client Bob $GEN" \
$BASEPATH-signer-$GEN $VALID_FROM $VALID_TO
# Create two more client certs that will be revoked
if [ "$SPECIAL" == "REVOKE" ]; then
# Client cert #3 (signed by Signing CA)
issue_cert "" \
$BASEPATH-christine-$GEN "$SUBJECT_BASE Client Christine $GEN" \
$BASEPATH-signer-$GEN $VALID_FROM $VALID_TO
# Client cert #4 (signed by Signing CA)
issue_cert "" \
$BASEPATH-don-$GEN "$SUBJECT_BASE Client Don $GEN" \
$BASEPATH-signer-$GEN $VALID_FROM $VALID_TO
echo " - revoke certificates christine and don"
param=(-config $TEMPDIR/openssl.cnf -batch -verbose -keyfile $BASEPATH-signer-$GEN.pem -cert $BASEPATH-signer-$GEN.crt -passin pass:root)
openssl ca ${param[@]} -revoke $BASEPATH-christine-$GEN.crt -crl_compromise 20100304070830Z > $TEMPDIR/log 2>&1
openssl ca ${param[@]} -revoke $BASEPATH-don-$GEN.crt -crl_reason cessationOfOperation > $TEMPDIR/log 2>&1
echo " - create CRL"
openssl ca ${param[@]} -gencrl -crldays 18250 -out $TARGET_DIR/ca/$REALM/$REALM-$GEN.crl > $TEMPDIR/log 2>&1
fi
# PKCS7 for client alice
openssl crl2pkcs7 -nocrl \
-certfile $BASEPATH-root-$GEN.crt \
-certfile $BASEPATH-signer-$GEN.crt \
-certfile $BASEPATH-alice-$GEN.crt \
-out $BASEPATH-alice-$GEN.p7b > $TEMPDIR/log 2>&1
echo " - import into OpenXPKI"
local OXI_IMPORT="openxpkiadm certificate import --force-no-verify --gen $GEN --realm $REALM"
if [ "$SPECIAL" != "ORPHAN" ]; then
$OXI_IMPORT --file $BASEPATH-root-$GEN.crt --token root > $TEMPDIR/log 2>&1
$OXI_IMPORT --file $BASEPATH-signer-$GEN.crt --token certsign > $TEMPDIR/log 2>&1
$OXI_IMPORT --file $BASEPATH-datavault-$GEN.crt --token datasafe > $TEMPDIR/log 2>&1
$OXI_IMPORT --file $BASEPATH-scep-$GEN.crt --token scep > $TEMPDIR/log 2>&1
$OXI_IMPORT --file $BASEPATH-alice-$GEN.crt --alias "$REALM-alice-${GEN}" > $TEMPDIR/log 2>&1
$OXI_IMPORT --file $BASEPATH-bob-$GEN.crt --alias "$REALM-bob-${GEN}" > $TEMPDIR/log 2>&1
if [ "$SPECIAL" == "REVOKE" ]; then
$OXI_IMPORT --file $BASEPATH-christine-$GEN.crt --alias "$REALM-christine-${GEN}" > $TEMPDIR/log 2>&1
$OXI_IMPORT --file $BASEPATH-don-$GEN.crt --alias "$REALM-don-${GEN}" > $TEMPDIR/log 2>&1
fi
else
$OXI_IMPORT --file $BASEPATH-bob-$GEN.crt --alias "$REALM-bob-${GEN}" --force-no-chain > $TEMPDIR/log 2>&1
fi
mkdir -p $TARGET_DIR/ca/$REALM
if [ "$SPECIAL" != "ORPHAN" ]; then
mv $BASEPATH*.crt $TARGET_DIR/ca/$REALM || true
mv $BASEPATH*.pem $TARGET_DIR/ca/$REALM || true
mv $BASEPATH*.p7b $TARGET_DIR/ca/$REALM || true
else
mv $BASEPATH-bob-$GEN.* $TARGET_DIR/ca/$REALM || true
fi
}
set -e
write_openssl_config
# Needed for openxpkiadm to work
export OPENXPKI_CONF_PATH=$OXI_CONF/config.d
make_certs $OXI_CONF alpha 1 20060101000000Z 20070131235959Z
make_certs $OXI_CONF alpha 2 20070101000000Z 21000131235959Z REVOKE
make_certs $OXI_CONF alpha 3 21000101000000Z 21050131235959Z
make_certs $OXI_CONF beta 1 20170101000000Z 21050131235959Z
make_certs $OXI_CONF gamma 1 20170101000000Z 21050131235959Z ORPHAN
# democa 1 already exists in sample installation (whose database we currently use)
make_certs $OXI_CONF democa 2 20170101000000Z 21050131235959Z
rm -Rf $TEMPDIR
# openxpkiadm alias list --realm alpha
# openxpkiadm alias list --realm alpha
| true
|
e7361b9b770fda197dc387340cc8468c9e522b4e
|
Shell
|
thotakabsp/Morty
|
/sources/meta-thermo-mx6/recipes-qt/tfsapp/files/Scripts/13_TEMP_test.sh
|
UTF-8
| 1,979
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "**********<<<<<<<<<< Temparature Sensor TEST START>>>>>>>>>>**********"
i=3
while [ $i -lt 10 ];
do
in_temp_raw=$(cat /sys/bus/iio/devices/iio:device0/in_temp_raw || true)
in_temp_offset=$(cat /sys/bus/iio/devices/iio:device0/in_temp_offset || true)
in_temp_scale=$(cat /sys/bus/iio/devices/iio:device0/in_temp_scale || true)
if [ ! "x${in_temp_raw}" = "x" ] && [ ! "x${in_temp_offset}" = "x" ] && [ ! "x${in_temp_scale}" = "x" ] ; then
temp=$(echo "${in_temp_raw} + ${in_temp_offset}" | bc)
temp=$(echo "scale=1; ${temp} * ${in_temp_scale}" | bc)
temp=$(echo "scale=1; ${temp} / 1000" | bc)
echo "Current Temp is : ${temp} °C"
temp=$(echo "scale=1; ${temp} * 9" | bc)
temp=$(echo "scale=1; ${temp} / 5" | bc)
temp=$(echo "scale=1; ${temp} + 32" | bc)
echo "Current Temp is : ${temp} °F"
fi
in_humidityrelative_raw=$(cat /sys/bus/iio/devices/iio:device0/in_humidityrelative_raw || true)
in_humidityrelative_offset=$(cat /sys/bus/iio/devices/iio:device0/in_humidityrelative_offset || true)
in_humidityrelative_scale=$(cat /sys/bus/iio/devices/iio:device0/in_humidityrelative_scale || true)
if [ ! "x${in_humidityrelative_raw}" = "x" ] && [ ! "x${in_humidityrelative_offset}" = "x" ] && [ ! "x${in_humidityrelative_scale}" = "x" ] ; then
humidityrelative=$(echo "${in_humidityrelative_raw} + ${in_humidityrelative_offset}" | bc)
humidityrelative=$(echo "scale=1; ${humidityrelative} * ${in_humidityrelative_scale}" | bc)
humidityrelative=$(echo "scale=1; ${humidityrelative} / 1000" | bc)
echo "Current humidityrelative is : ${humidityrelative} %"
fi
echo "do you want to continue (y/n): "
read ans
if [ $ans == "n" ];
then
i=10
fi
done
echo "**********<<<<<<<<<< Temparature Sensor TEST END>>>>>>>>>>**********"
sleep 1
exit 0
| true
|
ecf8515d9643a63a3fe4d84a3bb35b7d8bc65a61
|
Shell
|
AlexeiKharchev/bash_functions_library
|
/test/terminal.bats
|
UTF-8
| 3,572
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
#shellcheck disable
# Unittests for the functions in lib/terminal
#
# The unit tests in this script are written using the BATS framework.
# See: https://github.com/sstephenson/bats
# **************************************************************************** #
# Imports #
# **************************************************************************** #
[[ ${_GUARD_BFL_autoload} -eq 1 ]] || { . ${HOME}/getConsts; . "$BASH_FUNCTION_LIBRARY"; }
# **************************************************************************** #
# Init #
# **************************************************************************** #
load 'test_helper/bats-support/load'
load 'test_helper/bats-file/load'
load 'test_helper/bats-assert/load'
#ROOTDIR="$(git rev-parse --show-toplevel)"
# **************************************************************************** #
# Setup tests #
# **************************************************************************** #
setup() {
TESTDIR="$(temp_make)"
curPath="${PWD}"
BATSLIB_FILE_PATH_REM="#${TEST_TEMP_DIR}"
BATSLIB_FILE_PATH_ADD='<temp>'
pushd "${TESTDIR}" >&2
######## DEFAULT FLAGS ########
LOGFILE="${TESTDIR}/logs/log.txt"
BASH_INTERACTIVE=true
LOGLEVEL=ERROR
VERBOSE=false
FORCE=false
DRYRUN=false
# _setColors_ # Set Color Constants
set -o errtrace
set -o nounset
set -o pipefail
}
teardown() {
set +o nounset
set +o errtrace
set +o pipefail
popd >&2
temp_del "${TESTDIR}"
}
# **************************************************************************** #
# Test Casses #
# **************************************************************************** #
@test "Sanity..." {
run true
assert_success
assert_output ""
}
# ---------------------------------------------------------------------------- #
# bfl::terminal_print_2columns #
# ---------------------------------------------------------------------------- #
@test "bfl::terminal_print_2columns: key/value" {
run bfl::terminal_print_2columns "key" "value"
assert_output --regexp "^key.*value"
}
@test "bfl::terminal_print_2columns: indented key/value" {
run bfl::terminal_print_2columns "key" "value" 1
assert_output --regexp "^ key.*value"
}
# ---------------------------------------------------------------------------- #
# bfl::terminal_spinner #
# ---------------------------------------------------------------------------- #
@test "bfl::terminal_spinner: verbose" {
verbose=true
run bfl::terminal_spinner
assert_success
assert_output ""
}
@test "bfl::terminal_spinner: quiet" {
quiet=true
run bfl::terminal_spinner
assert_success
assert_output ""
}
# ---------------------------------------------------------------------------- #
# bfl::terminal_progressbar #
# ---------------------------------------------------------------------------- #
@test "bfl::terminal_progressbar: verbose" {
verbose=true
run bfl::terminal_progressbar 100
assert_success
assert_output ""
}
@test "bfl::terminal_progressbar: quiet" {
quiet=true
run bfl::terminal_progressbar 100
assert_success
assert_output ""
}
| true
|
0bf5d6bc6fba580df5443c3cf06bf0c190c05e64
|
Shell
|
m2-local/bin
|
/dkr-env
|
UTF-8
| 616
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#-------------------------------------------------------------------------
# Copyright (c) 2017 - Moxune LLC - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Nathan Nobbe<nathan@moxune.com>
#-------------------------------------------------------------------------
# Execute an "env" file inside the environment
# (scripts in the ~/home/env directory of the docker environment)
. $(dirname "$0")/ini-parser.sh
cfg.parser $(dirname "$0")/../dkr-config.ini
cfg.section.docker
$(dirname "$0")/dkr-exec /var/www/bin/env/"$1" "$@"
| true
|
a141f56b86553aec33034db20bbe8640ceaa1756
|
Shell
|
iamanatolich/centos7
|
/scripts/remove-vhost
|
UTF-8
| 314
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z ${1} ]
then
echo "Set virtual host"
exit
fi
httpdPath=/etc/httpd
sharePath=/vagrant
config=$1.conf
sudo service httpd stop
sudo rm $httpdPath/sites-enabled/$config
sudo rm $httpdPath/sites-available/$config
sudo rm -rf $sharePath/html/$1
sudo rm -rf $sharePath/log/$1
sudo server-restart
| true
|
5735bb14a5eb3c24bc46b0964ed361efa0b718f8
|
Shell
|
Navyashree211/Batch349
|
/ForLoop/PowerOfTwo.sh
|
UTF-8
| 188
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash -x
a=("$@")
b=${a[0]}
p=2
pw=$(awk -v x=$b -v y=$p 'BEGIN {print(x^y)}')
echo "$pw"
k=2
while [ "$k" -lt "$pw" ]
do
echo "$k"
k=$(awk -v x=$k -v y=2 'BEGIN {print(x*y)}')
done
| true
|
31cf988c6ece9914d7d52cbd9750d8fe99ab8e33
|
Shell
|
cd-yangling/libaddns
|
/libfnaddns
|
UTF-8
| 5,549
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
NR_keys=0
hostname="alidns.aliyuncs.com"
hostpath="/?"
#
# href: https://stackoverflow.com/questions/296536/how-to-urlencode-data-for-curl-command
# href: https://gist.github.com/cdown/1163649
#
# @参数1: 准备进行 URI 编码的字符串
#
# 返回值: 编码完成的字符串
#
urlencode() {
local old_lc_collate=$LC_COLLATE
LC_COLLATE=C
local length="${#1}"
for (( i = 0; i < length; i++ )); do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
%) printf "%%25";;
*) printf "$c" | xxd -u -p -c1 | while read x;do printf "%%%s" "$x";done
esac
done
LC_COLLATE=$old_lc_collate
}
#
# @参数1: Key
# @参数2: Value
#
set_param()
{
eval shrt_key_$NR_keys=$1
eval shrt_val_$1=$2
NR_keys=$((++NR_keys))
}
#
#
# href: https://help.aliyun.com/document_detail/29747.html?spm=a2c4g.11186623.4.3.6ef97ebbHSzvzy
#
# 对参数进行排序并安装阿里云文档要求进行相应的签名处理
#
# 返回值: 签名后的字符串
#
calc_signature()
{
# 对 key 进行排序放入 sorted_keys 数组
local sorted_keys=$(
i=0
while [ $i -lt $NR_keys ]
do
eval key=\$shrt_key_$i
echo $key
i=$((++i))
done | LC_COLLATE=C sort
)
# 组装签名字符串
local src_string=""
for key in $sorted_keys
do
eval val="\$shrt_val_$key"
val=$(urlencode $val)
src_string="$src_string$key=$val&"
done
# 删除行尾的 &
src_string=$(echo $src_string | sed 's/&$//g')
# 转义处理
src_string=$(urlencode $src_string)
# 拼接头部
src_string="GET&%2F&"$src_string
echo -n $src_string | openssl dgst -binary -sha1 -hmac "$AccessKeySecret&" | openssl enc | base64
}
#
# 构建最终 curl 命令使用的请求 URL 字符串
#
build_final_url()
{
local i=0
local url_string="https://$hostname$hostpath"
while [ $i -lt $NR_keys ]
do
eval key=\$shrt_key_$i
eval val=\$shrt_val_$key
key_enc=$(urlencode $key)
val_enc=$(urlencode $val)
# key_enc=`echo -n $key | perl -MURI::Escape -le 'print uri_escape <STDIN>'`
# val_enc=`echo -n $val | perl -MURI::Escape -le 'print uri_escape <STDIN>'`
url_string="$url_string$key_enc=$val_enc&"
i=$((++i))
done
# 删除行尾的 &
url_string=$(echo $url_string | sed 's/&$//g')
echo $url_string
}
#
# 设置公共参数
#
set_pub_param()
{
set_param "Format" "JSON"
set_param "Version" "2015-01-09"
set_param "AccessKeyId" $AccessKeyId
set_param "SignatureMethod" "HMAC-SHA1"
set_param "SignatureVersion" "1.0"
local time_utc=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
set_param "Timestamp" $time_utc
local rand_num=$(openssl rand -hex 16)
set_param "SignatureNonce" $rand_num
}
#
# 获取解析记录列表 ( DescribeDomainRecords )
#
# 参数1: domain
#
# 注意1: 需要 export AccessKeySecret & AccessKeyId 变量
#
# 返回v: 最终可以被用于 curl 或者 wget 发送 GET 请求的字符串
#
DescribeDomainRecords()
{
set_pub_param
set_param "Action" "DescribeDomainRecords"
set_param "DomainName" $1
set_param "Signature" $(calc_signature)
build_final_url
}
#
# 修改解析记录 ( UpdateDomainRecord )
#
# 参数1: RR
# 参数2: RecordId
# 参数3: Type
# 参数4: Value
#
# 注意1: 需要 export AccessKeySecret & AccessKeyId 变量
#
# 返回v: 最终可以被用于 curl 或者 wget 发送 GET 请求的字符串
#
UpdateDomainRecord()
{
set_pub_param
set_param "Action" "UpdateDomainRecord"
set_param "RR" $1
set_param "RecordId" $2
set_param "Type" $3
set_param "Value" $4
set_param "Signature" $(calc_signature)
build_final_url
}
#
# 添加解析记录 ( AddDomainRecord )
#
# 参数1: DomainName
# 参数2: RR
# 参数3: Type
# 参数4: Value
#
# 注意1: 需要 export AccessKeySecret & AccessKeyId 变量
#
# 返回v: 最终可以被用于 curl 或者 wget 发送 GET 请求的字符串
#
AddDomainRecord()
{
set_pub_param
set_param "Action" "AddDomainRecord"
set_param "DomainName" $1
set_param "RR" $2
set_param "Type" $3
set_param "Value" $4
set_param "Signature" $(calc_signature)
build_final_url
}
#
# 删除解析记录 ( DeleteDomainRecord )
#
# 参数1: RecordId
#
# 注意1: 需要 export AccessKeySecret & AccessKeyId 变量
#
# 返回v: 最终可以被用于 curl 或者 wget 发送 GET 请求的字符串
#
DeleteDomainRecord()
{
set_pub_param
set_param "Action" "DeleteDomainRecord"
set_param "RecordId" $1
set_param "Signature" $(calc_signature)
build_final_url
}
#
# 获取解析记录信息 ( DescribeDomainRecordInfo )
#
# 参数1: RecordId
#
# 注意1: 需要 export AccessKeySecret & AccessKeyId 变量
#
# 返回v: 最终可以被用于 curl 或者 wget 发送 GET 请求的字符串
#
DescribeDomainRecordInfo()
{
set_pub_param
set_param "Action" "DescribeDomainRecordInfo"
set_param "RecordId" $1
set_param "Signature" $(calc_signature)
build_final_url
}
#
# 设置解析记录状态 ( SetDomainRecordStatus )
#
# 参数1: RecordId
# 参数2: Status
#
# 注意1: 需要 export AccessKeySecret & AccessKeyId 变量
#
# 返回v: 最终可以被用于 curl 或者 wget 发送 GET 请求的字符串
#
SetDomainRecordStatus()
{
set_pub_param
set_param "Action" "SetDomainRecordStatus"
set_param "RecordId" $1
set_param "Status" $2
set_param "Signature" $(calc_signature)
build_final_url
}
| true
|
7c33bd4b065bcb7069c0273e4d8ee266218ccf83
|
Shell
|
ThomasDickey/cm_tools-snapshots
|
/src/checkup/test/run_test.sh
|
UTF-8
| 1,147
| 3.34375
| 3
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/sh
# $Id: run_test.sh,v 11.4 2019/12/02 22:11:31 tom Exp $
date
#
# run from test-versions:
for p in .. ../bin ../.. ../../bin
do
for q in . checkin checkout
do
[ -d $p/$q ] && PATH=`unset CDPATH;cd $p/$q && pwd`:$PATH
done
done
PATH=:`pwd`:$PATH
export PATH
#
TTY=/tmp/test$$
rm -rf junk
trap "rm -rf junk; rm -f $TTY" 0
#
if test -z "$RCS_DEBUG"
then
RCS_DEBUG=0
export RCS_DEBUG
fi
if test $RCS_DEBUG != 0
then
set -x
Q=""
S="ls -lR; cat $TTY"
else
Q="-q"
S=""
fi
#
mkdir junk
cd junk
rm -f junk.* RCS/junk.*
#
cp ../makefile.in junk.txt
echo 'test file'>>junk.desc
#
cat <<eof/
**
**
** Case 1. Shows junk.desc (which is not checked-in).
eof/
checkin $Q -u -tjunk.desc junk.txt
checkup junk.* >>$TTY
eval $S
#
cat <<eof/
**
**
** Case 2. Shows junk.txt (which is assumed to have changes), and suppress
** junk.desc using the -x option.
eof/
checkout $Q -l junk.txt
touch junk.txt
checkup -x.desc junk.* >>$TTY
eval $S
#
#
cat <<eof/
**
**
** Case 3. Traverses the local tree, suppressing ".out" and ".desc" files.
** Again, junk.txt is reported.
eof/
checkup -x.out -x.desc .. >>$TTY
eval $S
rm -f junk.* RCS/junk.*
cd ..
| true
|
13a9af2d6f63d1cfc8bceb8a6146d9fa6ab06d27
|
Shell
|
kenzo0107/FacialRecognitionSystem
|
/step3_classify.sh
|
UTF-8
| 1,037
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
SETTING="config.txt"
# initialize.
cp /dev/null deeplearning/train.txt
cp /dev/null deeplearning/test.txt
while read line
do
arr=( `echo $line | tr -s "," ' '`)
LABEL="${arr[0]}"
SEARCH_WORD="${arr[1]}"
NUMBER="${arr[2]}"
echo "LABEL:${LABEL} - SEARCH_WORD:${SEARCH_WORD}"
echo << EOV
==============================
顔検出画像を train, test に分類
==============================
EOV
sh classify.sh "${LABEL}" 70
echo << EOW
============================
訓練画像に分類
============================
EOW
if [ -e out/train/${LABEL} ];
then
python deeplearning/gen_testdata.py out/train "${LABEL}" ${NUMBER} >> deeplearning/train.txt
echo "---> out/train から label:${LABEL} 付けしたデータを train.txt へ保存\n"
fi
if [ -e out/test/${LABEL} ];
then
python deeplearning/gen_testdata.py out/test "${LABEL}" ${NUMBER} >> deeplearning/test.txt
echo "---> out/test から label:${LABEL} 付けしたデータを test.txt へ保存\n"
fi
done < $SETTING
| true
|
73e67bf1f1e0c812c195475e8484d801331c529c
|
Shell
|
whaxkl/test
|
/HTTP/shell自动搭建iis网站.sh
|
GB18030
| 4,475
| 3.21875
| 3
|
[] |
no_license
|
#echo "ڴ..."
#for var in $*
#do
#cp -r D:\/wordpress\/wordpress D:\/wordpress\/${var}
#done
# ************************************************************
# * *
# * PowershellIISű *
# * *
# ************************************************************
# :Ϲܼάʦ--Sanddy QQ:442405
# ڣ2014-06-02
#set-executionpolicy remotesigned
Import-Module WebAdministration #IISģ
# IISվò²Խͬվ㣩
#################################################################################################
$sitePort = 80 #˿
$SiteName = $1 #վ
$SiteAppPools = "DefaultAppPool" #̳
$SiteAppPoolsModel = "" #̳ʹͨģʽ
$AppPoolType = "LocalSystem" #ָӦóҪʹõʻʶ0 >Local Service 1 >Local System 2 >Network Service 3 >User 4 >ApplicationPoolIdentity
$managedRuntimeVersion = "v4.0" #.net汾
$WebSitePath = "D:\/wordpress\/$1" #վ·
$HostHeader1 = $1 #վ
$HostHeader2 = $1 #վ
$defaultDocument = "index.php"
$IISLogFile = "D:\/wordpress\/wordpress\/$SiteName" #IIS־·
$net32Or64 = $true #Ƿ.net32ģʽ
#################################################################################################
#IISӦó
function BuildAppPool(){
$AppPool = "iis:\AppPools\" + $SiteAppPools
$existAppPool = Test-Path $AppPool
if($existAppPool -eq $false){
#Ӧó
.$Env:windir\system32\inetsrv\appcmd.exe add apppool /name:$SiteAppPools /managedRuntimeVersion:$managedRuntimeVersion /managedPipelineMode:$SiteAppPoolsModel
#ָӦóҪʹõʻʶ
.$Env:windir\system32\inetsrv\appcmd.exe set config /section:applicationPools /[name="'$SiteAppPools'"].processModel.identityType:NetworkService
#Ӧóʹ.net汾
.$Env:windir\system32\inetsrv\appcmd.exe add apppool /name:$SiteAppPools /managedRuntimeVersion:$managedRuntimeVersion /managedPipelineMode:$SiteAppPoolsModel
#ƽʹڴΪ1.5G
.$Env:windir\system32\inetsrv\appcmd.exe set config /section:applicationPools /[name="'$SiteAppPools'"].recycling.periodicRestart.privateMemory:1572864
#̶ָ̹ʱ
.$Env:windir\system32\inetsrv\appcmd.exe set apppool /apppool.name: $SiteAppPools /recycling.periodicRestart.time:1.00:00:00
#.net32ģʽ
.$Env:windir\system32\inetsrv\appcmd.exe set config /section:applicationPools /[name="'$SiteAppPools'"].enable32BitAppOnWin64:$net32Or64
#ǷԶ
.$Env:windir\system32\inetsrv\appcmd.exe set config /section:applicationPools /[name="'$SiteAppPools'"].autoStart:$true
}
}
#IISӦվ
function BuildSite(){
$appSitePath = "iis:\sites\"+$SiteName
$existweb = Test-Path $appSitePath
if(!$existweb)
{
New-Website -name $SiteName -port $sitePort -ApplicationPool $SiteAppPools -PhysicalPath $WebSitePath
.$Env:windir\system32\inetsrv\appcmd.exe set site $SiteName /bindings:"http/*:80:$HostHeader1,http/*:80:$HostHeader2"
.$Env:windir\system32\inetsrv\appcmd.exe set config /section:directoryBrowse /enabled:false
}
else{
echo "'$SiteName' is Already exists"
}
}
#IIS־¼·
function CreatIISLogFile(){
.$Env:windir\system32\inetsrv\appcmd.exe set site $SiteName "-logfile.directory:$IISLogFile"
}
#ΪF5豸ISPAIɸѡ
function CreatISP(){
$x = [string](.$Env:windir\system32\inetsrv\appcmd.exe list config $SiteName /section:isapiFilters)
if ($x -like "*F5XForwardedFor*"){
echo "isapiFilters is Already exists"
}
else{
.$Env:windir\system32\inetsrv\appcmd.exe unlock config $SiteName "-section:system.webServer/isapiFilters"
.$Env:windir\system32\inetsrv\appcmd.exe set config $SiteName /section:isapiFilters /"+[name='F5XForwardedFor',path='$Env:windir\System32\F5XForwardedFor.dll',enabled='true']"
}
}
function RunBuild(){
#BuildAppPool
BuildSite
CreatIISLogFile
CreatISP
.$Env:windir\system32\inetsrv\appcmd.exe start site $SiteName
}
RunBuild
| true
|
a0df7ea9e4ec90a7c68d9e8f0e724d34a2ea76ca
|
Shell
|
csfreebird/dotfiles
|
/bin/zarchive/ccmtail
|
UTF-8
| 1,023
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
function killtails {
echo ""
echo "Cleaning up..."
for pid in $(ps ax | grep "tail -f ${cluster_dir}/node" | grep -v grep | awk '{print $1}')
do
echo "Killing pid: ${pid}"
kill ${pid}
done
#rm -vf "${cluster_dir}/system.log"
}
trap "killtails" 2
cluster_dir=$1
cluster_name=$(basename ${cluster_dir})
nodes=$(ls -A1 -I cluster.conf -I system.log ${cluster_dir})
echo "Nodes in cluster '${cluster_name}':"
echo "${nodes}"
#touch "${cluster_dir}/system.log"
for node in ${nodes}
do
echo "Attaching tail to node: ${node}"
echo "${node}" & tail -f "${cluster_dir}/${node}/logs/system.log" | awk '{print "${node}:" $0}' >> ${cluster_dir}/system.log &
done
tail -f "${cluster_dir}/system.log"
#for node in ${nodes}
#do
# ps ax | grep "tail -f \"${cluster_dir}/${node}/logs/system.log\"" | grep -v grep | awk '{print $1}'
# pid=$(ps ax | grep "tail -f \"${cluster_dir}/${node}/logs/system.log\"" | grep -v grep | awk '{print $1}')
# echo "Pid: ${pid}"
#done
| true
|
fcc37bbd37aa4bf365099a5a88a5eb3b34dfa7eb
|
Shell
|
tunagohan/ruby_install_developer_settings
|
/settings/github_setting.sh
|
UTF-8
| 505
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# ログ出力
function log() {
message="$*"
echo "$(date '+%Y/%m/%d %H:%M:%S'): ${message}"
}
# Github setting
log "githubの設定を行います"
echo -n "[Global settting] Github Username : "
read github_username
git config --global user.name ${github_username}
echo -n "[Global settting] Github Email : "
read github_email
git config --global user.email ${github_email}
git config --global credential.helper osxkeychain
git config --global push.default current
log "完了"
| true
|
f3fdaffcd97eea37c53c78f11b44e18ef8efa011
|
Shell
|
Yakoua/resourcepacks
|
/fetch.sh
|
UTF-8
| 314
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
branch=${1?}
git fetch -fv origin "$branch":"$branch" || :
while IFS= read -r -d '' pack_dir; do
pack=$(basename "$pack_dir").zip
git restore --source "$branch" --overlay -- "$pack" || :
done < <(find . -mindepth 2 -maxdepth 2 -type f -iname pack.mcmeta -print0 | xargs -0 dirname -z)
| true
|
cc23e2f961e2fc52daf3257a2a893ca79bba2a48
|
Shell
|
gentoo-perl/gentoo-perl-helpers
|
/libexec/list-blacklisted-for
|
UTF-8
| 934
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# ABSTRACT: Lists blacklisted atoms for installing targets
source "${LIBDIR}/core-functions.sh" || exit 1
BLACKLIST_FILE="${LIBDIR}/blacklists.map.tsv"
BLACKLIST_ROOT="${LIBDIR}/blacklists"
set -euo pipefail
require_bin grep sys-apps/grep
[[ $# -gt 0 ]] || die "Expected [atom] in ${cmdname} [atom]"
[[ $# -lt 2 ]] || die "Too many arguments, expected 1 at most."
atom="$1"
shift;
cut -f 1 "${BLACKLIST_FILE}" | grep -v "^#" | grep -xqF "${atom}" || die "No blacklist entries for ${atom}"
awk -v "blacklist_key=${atom}" '$1 == blacklist_key { print $2 }' "${BLACKLIST_FILE}" | while read -r i; do
cat "${BLACKLIST_ROOT}/${i}"
done
# help: Invocation
# help: gentoo-perl list-blacklisted-for [target-package]
# help:
# help: Example:
# help: gentoo-perl list-blacklisted-for dev-lang/perl:0/5.20=
# help:
# help: Outputs:
# help: A list of dependency strings used by subslots other than the desired one.
| true
|
76abe6dc84c3a2372b16c1c200a98f06c403d04c
|
Shell
|
COSMOS-ASMC/ShowerMC
|
/Cosmos9.00/UserHook/D-Skel/Assemble/classifyBylayer.sh
|
UTF-8
| 1,215
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# give arg1 /FDDbku/...
GOUTDIR=/Work1/kasahara/ForFDD
MCPU=50
exec 3<&0 <hostnumfile
nc=0
while read host_num
do
echo "host_num is" $host_num
# next for is to expande *; no pluaral candidate should exist
# in the files
# ( from= $GOUTDIR/*$host_num".dat-r" dose not expand *)
for to in $GOUTDIR/*$host_num".dat" ; do
echo "to is " $to
# to=/XXX/yyyy/qgsjet2.tasim509.00041.dat
basename=${to##*/}
basename=qgsjet2p19cos0.750.dat
echo "basename is " $basename
# basename=qgsjet2.tasim509.00041.dat
filen=${basename%.*}
echo "filen is " $filen
# filen=qgsjet2.tasim509.00041
# mv $GOUTDIR/*$host_num".dat-r" $GOUTDIR/*$host_num".dat"
# mv $from $to
# $TAMCDB/bin/md2WebSysDat $GOUTDIR/ *$host_num $1/ 1
mv $to $GOUTDIR/$basename
$TAMCDB/bin/md2WebSysDat $GOUTDIR/ $filen $1/ 1
done
nc=`expr $nc + 1`
if [ $nc = $MCPU ]; then
break
fi
done
# restore stdin and close 3
exec 0<&3 3<&-
if [ $MCPU -ne $nc ]; then
echo "# of cpu's not enough; there might be running jobs"
echo "$MCPU cpu's should exist but $nc is counted"
else
echo "All events have been successfully assmebled to $1/"
fi
| true
|
4ad5704ce32ca63a09bc50d08af192f8b9de4b36
|
Shell
|
nick-dai/Linux-Env-Setup
|
/ec2-lamp-drupal.sh
|
UTF-8
| 1,525
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Remove installed Apache or PHP
sudo yum remove httpd* php* -y
# Install Apache 2.4, MariaDB and PHP 7.0
sudo yum install -y httpd24 mariadb-server php70 php70-gd php70-imap php70-mbstring php70-mysqlnd php70-opcache php70-pdo php70-pecl-apcu
# Start Apache, MariaDB now, and enable Apache, MariaDB on startup
sudo service httpd start && sudo chkconfig httpd on
sudo service mysql start && sudo chkconfig mysql on
# Change the group of "ec2-user"
sudo usermod -a -G apache ec2-user
# Apache directory permission settings
sudo chown -R ec2-user:apache /var/www
sudo chmod 2775 /var/www
find /var/www -type d -exec sudo chmod 2775 {} \;
find /var/www -type f -exec sudo chmod 0664 {} \;
# Secure your MariaDB
sudo mysql_secure_installation
# In your home directory, download drupal and move it to Apache www directory
cd
wget https://ftp.drupal.org/files/projects/drupal-7.56.tar.gz
tar xvzf drupal-7.56.tar.gz
sudo mv drupal-7.56 /var/www/html/drupal
# Set the right group
sudo chown -R ec2-user:apache /var/www/html/drupal
# Lang files are here.
cd /var/www/html/drupal/profiles/standard/translations
wget http://ftp.drupal.org/files/translations/7.x/drupal/drupal-7.56.zh-hant.po
# Don't forget to change group
sudo chown ec2-user:apache drupal-7.56.zh-hant.po
# File system error
sudo chmod 775 -R /var/www/html/drupal/sites/defualt
# If no settings.php exist
cd /var/www/html/drupal/sites/defualt
sudo cp default.settings.php settings.php
sudo chown ec2-user:apache settings.php
sudo chmod 775 settings.php
| true
|
db66654ced344a1e5c3bcb07ff036304b70c5d97
|
Shell
|
yancongcong-com/GUI
|
/serverA.sh
|
UTF-8
| 8,488
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#serverA
systemctl stop firewalld.service >/dev/null
systemctl disable firewalld.service &>/dev/null
sed -ri '/^SELINUX=/cSELINUX=disabled' /etc/selinux/config >/dev/null
setenforce 0
x=1
A=22
network=33
cat > /etc/sysconfig/network-scripts/ifcfg-ens$network <<-EOF
TYPE=Ethernet
BOOTPROTO=none
NAME=ens$network
DEVICE=ens$network
IPADDR=172.16.$x.$A
PREFIX=24
GATEWAY=172.16.$x.2
DNS1=114.114.114.114
ONBOOT=yes
EOF
#
network=37
cat > /etc/sysconfig/network-scripts/ifcfg-ens$network <<-EOF
TYPE=Ethernet
BOOTPROTO=none
NAME=ens$network
DEVICE=ens$network
IPADDR=192.168.$x.$A
PREFIX=24
GATEWAY=192.168.$x.2
DNS1=114.114.114.114
ONBOOT=yes
EOF
##disk
disk=/dev/sdc
vg_name=datasotre
pe_size=8
lv_name=database
lv_size=8G
file_system=ext4
pvcreate $disk &>/dev/unll
vgcreate $vg_name $disk -s $pe_size &>/dev/unll
lvcreate -L $lv_size -n $lv_name $vg_name &>/dev/unll
if [ "$file_system" = "xfs" ];then
mkfs.xfs /dev/$vg_name/$lv_name &>/dev/unll
fi
if [ "$file_system" = "ext4" ];then
mkfs.ext4 /dev/$vg_name/$lv_name &>/dev/unll
fi
lvm_mount=/data/web_data
[ -d $lvm_mount ] || mkdir -p $lvm_mount
uuid=`blkid |grep $vg_name-$lv_name |awk '{print $2}'`
grep $uuid /etc/fstab >/dev/null || echo "$uuid $lvm_mount $file_system defaults 0 0 " >> /etc/fstab
mount -a
[ $? -eq 0 ] && echo "lvm成功" || echo "lvm失败"
serverA_name=serverA.rj.com
echo "$serverA_name" > /etc/hostname
#hostnamectl set-hostname $serverA_name
[ $? -eq 0 ] && echo "主机名修改成功" || echo "主机名修改失败"
nfs_yum=`yum -y install nfs-utils &> /dev/null `
nfs_dir=/data/web_data
nfs_net=192.168.1.0/24
$nfs_yum && echo "$nfs_dir $nfs_net(rw,no_root_squash)" > /etc/exports
systemctl restart nfs &>/dev/unll
[ $? -eq 0 ] && echo "nfs服务启动成功" || echo "nfs服务启动失败"
systemctl enable nfs &>/dev/null
#DNS
dns_yum=`yum -y install bind bind-utils &>/dev/null `
dns_zone=rj.com
dns_www_ip=172.16.1.22
dns_dns_ip=172.16.1.33
#正向解析
$dns_yum && sed -ri 's/127.0.0.1|localhost|::1/any/' /etc/named.conf
zone="zone \"$dns_zone\" IN { type master; file \"$dns_zone.zone\"; };"
grep $dns_zone /etc/named.conf &>/dev/unll || echo $zone >> /etc/named.conf
[ -f $dns_zone.zone ] && rm -rf $dns_zone.zone
cp -rf /var/named/{named.localhost,$dns_zone.zone}
sed -ri 's/@/dns/g' /var/named/$dns_zone.zone
sed -ri 's/^dns/@/g' /var/named/$dns_zone.zone
sed -ri 's/dns$/ftp/g' /var/named/$dns_zone.zone
sed -ri "/127.0.0.1/c ftp IN A $dns_dns_ip" /var/named/$dns_zone.zone
sed -ri "/AAAA/c www IN A $dns_www_ip" /var/named/$dns_zone.zone
#反向解析
dns_zone=172.16.1
fan_zone=`echo $dns_zone |awk -F"." '{print $3,$2,$1} BEGIN{OFS="."}'`
zone="zone \"$fan_zone.in-addr.arpa\" IN { type master; file \"$dns_zone.zone\"; };"
grep $dns_zone /etc/named.conf &>/dev/unll || echo $zone >> /etc/named.conf
[ -f $dns_zone.zone ] && rm -rf $dns_zone.zone
cp -rf /var/named/{named.loopback,$dns_zone.zone}
sed -ri 's/@/dns/g' /var/named/$dns_zone.zone
sed -ri 's/^dns/@/g' /var/named/$dns_zone.zone
sed -ri "/127.0.0.1/c dns IN A $dns_dns_ip" /var/named/$dns_zone.zone
sed -ri "/AAAA/d" /var/named/$dns_zone.zone
serverA_ip=`echo $dns_www_ip |awk -F"." '{print $NF}'`
serverB_ip=`echo $dns_dns_ip |awk -F"." '{print $NF}'`
sed -ri "/PTR/c $serverA_ip IN PTR www.rj.com" /var/named/$dns_zone.zone
echo "$serverB_ip IN PTR ftp.rj.com" >> /var/named/$dns_zone.zone
chgrp -R named /var/named/
chmod g+s /var/named/
systemctl restart named
[ $? -eq 0 ] && echo "dns服务启动成功" || echo "dns服务启动失败"
systemctl enable named &>/dev/null
#http
#yum -y install httpd mod_ssl php &>/dev/unll
#[ $? -eq 0 ] || echo " http服务安装失败"
#[ -f /etc/httpd/conf.d/ssl.conf ] && mv /etc/httpd/conf.d/{ssl.conf,ssl.conf.bak}
#http_doc=/etc/httpd/conf.d/virthost.conf
#echo "<VirtualHost $dns_www_ip:80>" > $http_doc
#echo " ServerName www.rj.com " >> $http_doc
#echo " DocumentRoot $lvm_mount " >> $http_doc
#echo "</VirtualHost>" >> $http_doc
#echo "<Directory \"$lvm_mount\" > " >> $http_doc
#echo " Require all granted" >> $http_doc
#echo "</Directory>" >> $http_doc
#echo Welcome to 2019 Computer Network Application contest! > /data/web_data/index.html
#chown -R apache:apache $lvm_mount
#systemctl restart httpd &>/dev/unll
#[ $? -eq 0 ] && echo "http服务启动成功" || echo "http服务启动失败"
#systemctl enable httpd &>/dev/null
#nginx
yum -y install nginx php php-fpm php-mysql php-gd gd &>/dev/null
chown -R nginx:nginx /data/web_data
sed -ri '/^user/cuser = nginx' /etc/php-fpm.d/www.conf
sed -ri '/^group/cgroup = nginx' /etc/php-fpm.d/www.conf
ssl_dir=/etc/nginx/ssl
[ -d $ssl_dir ] || mkdir -p $ssl_dir
cd $ssl_dir
openssl genrsa -out nginx.key 1024 &>/dev/null
openssl req -new -key nginx.key -out nginx.csr -days 365 -subj /C=CN/ST=Shanxi/L=Shanxi/O=ca/OU=ca/CN=www.rj.com/emaliAddress=ca@rj.com &>/dev/null
openssl x509 -req -days 365 -in nginx.csr -signkey nginx.key -out nginx.crt &>/dev/null
cat > /etc/nginx/conf.d/wordpress.conf <<-EOF
server {
listen 443 ssl;
server_name www.rj.com;
ssl_certificate /etc/nginx/ssl/nginx.crt;
ssl_certificate_key /etc/nginx/ssl/nginx.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;
location / {
root /data/web_data;
index index.php index.html index.htm;
}
location ~ \.php$ {
root /data/web_data;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi_params;
}
}
server {
listen 192.168.1.22:80;
server_name www.rj.com;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /data/web_data;
index index.php index.html index.htm;
}
location ~ \.php$ {
root /data/web_data;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi_params;
}
}
EOF
sed -ri '/fastcgi_index index.php;/afastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;' /etc/nginx/conf.d/wordpress.conf
sed -ri '/#access_log \/var\/log\/nginx\/host.access.log main;/arewrite ^(.*)$ https:\/\/$server_name$1 permanent;' /etc/nginx/conf.d/wordpress.conf
systemctl restart nginx php-fpm &>/dev/null
[ $? -eq 0 ] && echo "nginx服务启动成功" || echo "nginx服务启动失败"
systemctl enable nginx php-fpm &>/dev/null
echo Welcome to 2019 Computer Network Application contest! > /data/web_data/index.html
#haproxy
#yum -y install haproxy &>/dev/null
#
#systemctl restart haproxy &>/dev/unll
#[ $? -eq 0 ] && echo "http服务启动成功" || echo "http服务启动失败"
#systemctl enable haproxy &>/dev/nul
#ssl+httpi
systemctl restart nginx &>/dev/null
[ $? -eq 0 ] && echo "https服务启动成功" || echo "https服务启动失败"
systemctl enable nginx &>/dev/null
cd /data/web_data
yum -y install wget &>/dev/null
wget https://wordpress.org/wordpress-4.3.20.tar.gz &>/dev/null
tar -xf wordpress-4.3.20.tar.gz -C /data/web_data
chown -R nginx:nginx /data/web_data/
ystemctl restart nginx &>/dev/null
#[ $? -eq 0 ] && echo "wordpress服务启动成功" || echo "wordpress服务启动失败"
#ffff
cat > /etc/nginx/conf.d/proxy.conf <<-EOF
upstream web{
server 192.168.1.22:80;
server 192.168.1.33:80;
}
server {
listen 172.16.1.22:80;
server_name www.rj.com;
location / {
proxy_pass http://web;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
root /data/web_data;
index index.php index.html index.htm;
}
}
server {
listen 172.16.1.22:443;
server_name www.rj.com;
location / {
proxy_pass http://web;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
root /data/web_data;
index index.php index.html index.htm;
}
}
EOF
#
systemctl restart nginx php-fpm &>/dev/null
[ $? -eq 0 ] && echo "负载均衡服务启动成功" || echo "负载均衡服务启动失败"
systemctl enable nginx php-fpm &>/dev/null
| true
|
69f30aa9591a17806ee627f07c7bde62c06631c5
|
Shell
|
stam/finance-vagrant
|
/scripts/app_repo.sh
|
UTF-8
| 483
| 3.453125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ $EUID -eq 0 ]]; then
echo "This script can not be run as root" 1>&2
exit 1
fi
echo ">>> Cloning repository."
# Download application.
cd /vagrant
if [ -d "finance" ]; then
cd finance
# git pull
# git checkout master
cd /vagrant
else
git clone git@bitbucket.org:JasperStam/finance.git
if [ $? -eq 128 ]; then
echo "git returned exit code 128. Make sure you have access to GitHub with your keys."
exit 1
fi
fi
| true
|
093e7290b75f76ba28d8636cb50731a7bc1959dd
|
Shell
|
quanrd/SV_Discovery
|
/mergesample.sh
|
UTF-8
| 2,906
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
svtype="INV"
svtype_full="inversion"
outname="NB_${svtype}"
destpth="/scratch1/irri-bioinformatics/StructuralVariants/MergedSamples"
sourcepth="/scratch1/irri-bioinformatics/StructuralVariants/MergedCallers/${svtype}"
#Change run type
runtype=CLUSTER #CLUSTER or MERGE
if [[ $runtype == "MERGE" ]]; then
mergefile=$destpth/$outname"_mergesam.bed"
if [ -f $mergefile ]; then
rm $mergefile
fi
touch $mergefile
module load bedtools
#MERGE all samples into one file
for i in $(find $sourcepth -name "*inversion.txt" -printf "%f\n");
do
sample=${i/.${svtype_full}.txt/}
echo ${sample}
awk -v sam="${sample}" '{ print $0"\t"sam;}' $sourcepth/$i >> $mergefile
done
#sort the mergefile and cluster intervals using bedtools (initial cluster)
sort -k1,1 -k2,2n $mergefile | bedtools cluster -i - > $destpth/$outname"_mergesam_intersect.txt"
#count number of intervals per bedtools cluster;
#wk '{cluster[$6]++; if(!($6 in name)){ name[$6]=$1"\t"$2"\t"$3;}} END{for(i=1;i<=$6;i++) print i"\t"name[i]"\t"cluster[i];}' $destpth/${outname}"_mergesam_intersect.txt" > $destpth/$outname"_mergesam_intersect_sup.txt"
#compute distribution of support for all bedtools cluster
#wk '{tmp=sprintf("%d",$4/10); bin[tmp]++;} END{for(i=0;i<303;i++) if(bin[i]>0) print i*10"\t"bin[i]; else print i*10"\t0";}' $destpth/${chrom}"_mergesam_intersect_sup.txt" > $destpth/${chrom}"_mergesam_sup_dist.txt"
#Prepare input for hierarchical clustering
awk -F'\t' '{print $1"\t"$2"\t"$3"\t"$6;}' $destpth/${outname}"_mergesam_intersect.txt" | uniq -c - | awk -v OFS='\t' '{print $2,$3,$4,$5,$4-$3}' - > $destpth/$outname"_mergesam_intersect_uniq.txt"
#count number of unique intervals per bedtools cluster; useful on estiamting reclustering runtime in R especially for clusters with many unique intervals
awk '{cluster[$4]++; if(!($4 in name)){ name[$4]=$1"\t"$2"\t"$3;}} END{for(i=1;i<=$4;i++) print i"\t"name[i]"\t"cluster[i];}' $destpth/${outname}"_mergesam_intersect_uniq.txt" > $destpth/$outname"_mergesam_intersect_sup.txt"
fi
if [[ $runtype == "CLUSTER" ]]; then
#Adjust cluster ID after Hclustering
awk 'BEGIN{num=0;id="x";} $1~/^c/{tmp=$5"_"substr($6,0,length($6)-1); if(id!=tmp){id=tmp;num++;} print $1"\t"$2"\t"$3"\t"$4"\t"tmp"\t"num;}' $destpth/NB_${svtype}_cluster.txt | sort -k6,6n -k1,1 -k2,2n > $destpth/$outname"_mergesam_hclust.txt" #-1 in length remove ^M char at the end
awk 'NR==FNR{tmp=$1"\t"$2"\t"$3; map[tmp]=$6;} NR!=FNR{x=$1"\t"$2"\t"$3; if(x in map){print x"\t"$4"\t"$5"\t"map[x];} }' $destpth/${outname}"_mergesam_hclust.txt" $destpth/${outname}"_mergesam_intersect.txt" > $destpth/${outname}"_mergesam_clustered.txt"
#stat_brkpt.sh and stat_spread.sh for stat
#get length distribution
#awk '{tmp=sprintf("%d",$3-$2); bin[tmp]++;} END{for(i=0;i<=1000;i++) if(bin[i]==0) print i"\t0"; else print i"\t"bin[i];}' NB_mergesam_clustered.txt > length_dist.txt
fi
| true
|
9662c9f310f2232ec755234edc514d961cf03950
|
Shell
|
dirk/heroku-buildpack-rust
|
/bin/compile
|
UTF-8
| 2,541
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# bin/compile BUILD_DIR CACHE_DIR ENV_DIR
# This an amalgamation of:
# https://github.com/emk/heroku-buildpack-rust
# https://github.com/Hoverbear/heroku-buildpack-rust
set -e
set -o pipefail
set -u
log() {
echo "-----> $1"
}
indent() {
sed 's/^/ /'
}
# Build related variables.
BUILD_DIR=$1
CACHE_DIR=$2
ENV_DIR=$3
# Load our configuration variables.
log "Loading configuration variables..."
. "$BUILD_DIR/RustConfig"
CARGO_BUILD_ARGS="${CARGO_BUILD_ARGS:---release}"
# Check our configuration options.
if [ -z ${VERSION+x} ]; then
echo "failed: RustConfig must set VERSION to indicate the Rust version."
exit 1
fi
if [ -z ${RUSTUP_URL+x} ]; then
RUSTUP_URL="https://static.rust-lang.org/rustup.sh"
fi
# Notify users running old, unstable versions of Rust about how to deploy
# successfully.
if [ -n "${CARGO_URL-}" ] || [ ! -f "$BUILD_DIR/Cargo.toml" ]; then
cat <<EOF
To deploy a modern Rust app, make sure you have a Cargo.toml file, and that
you do not define CARGO_URL or CARGO_VERSION in RustConfig.
failed: Outdated configuration or missing Cargo.toml.
EOF
exit 1
fi
# Switch to our cache directory.
mkdir -p "$CACHE_DIR"
cd "$CACHE_DIR"
RUST_CACHE_NAME="rust-cache-$VERSION"
# Make sure we have the correct Rust binaries and set up PATH.
if [ -d $RUST_CACHE_NAME ]; then
log "Using Rust version $VERSION."
else
log "Downloading Rust install script for $VERSION from $RUSTUP_URL..."
rm -f rust.tar.gz
rm -rf rust-cache-*
curl -o rustup.sh "$RUSTUP_URL"
chmod +x rustup.sh
log "Installing Rust binaries..."
mkdir rust-cache-$VERSION
./rustup.sh --prefix=$RUST_CACHE_NAME -y --revision=$VERSION \
--disable-sudo --disable-ldconfig
fi
RUST_PATH=`ls -1d "$CACHE_DIR/$RUST_CACHE_NAME"`
if [ ! -x "$RUST_PATH/bin/rustc" ]; then
echo "failed: Cannot find Rust binaries at $RUST_PATH/bin."
exit 1
fi
PATH="$RUST_PATH/bin:$RUST_PATH/cargo/bin:$PATH"
LD_LIBRARY_PATH="$RUST_PATH/lib${LD_LIBRARY_PATH+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH
# Switch back into our main build area
cd "$BUILD_DIR"
if [[ ! -d "$CACHE_DIR/target" ]]; then
log "No cached crates detected."
else
log "Detected cached crates. Restoring..."
mv "$CACHE_DIR/target" "$BUILD_DIR/target"
fi
# To debug git issues:
# export RUST_LOG="cargo::sources::git=debug"
# Build the Rust app
log "Compiling application..."
CARGO_HOME="$CACHE_DIR/cargo" cargo build $CARGO_BUILD_ARGS
log "Caching build artifacts..."
cp -r "$BUILD_DIR/target" "$CACHE_DIR/target"
| true
|
604ef0be5fc3be76eeacefce8626b44f943d6f78
|
Shell
|
bitprophet/dotfiles
|
/.zshrc
|
UTF-8
| 800
| 3.265625
| 3
|
[] |
no_license
|
# Zsh dotdir folder (not actually setting ZDOTDIR tho, no point?)
export ZSH=~/.zsh
# Data directory
[[ -d $ZSH/data ]] || mkdir $ZSH/data
# Source some third party stuff
fpath=($ZSH/contrib $fpath)
# Helper for various 'is X on my path? then Y' tests in the below dotfiles
function have() {
which $1 &>/dev/null
}
# Source my dotfiles (in explicit order)
typeset -a DOTFILES
DOTFILES=(
pre-local
options
exports
mid-local
completion
aliases
platform
history
python
ruby
perl
wk
prompt
zmv
fzf
local
)
for file in $DOTFILES; do
file=$ZSH/$file
[[ -f $file ]] && source $file
done
# Sanity cleanup of PATH, which otherwise can grow duplicate entries (making
# troubleshooting harder than it needs to be)
typeset -U PATH
| true
|
11b35e89c024ccc95e35febba46a003e48a0c2b4
|
Shell
|
jsquyres/ompi-sphinx-dist
|
/configure.ac
|
UTF-8
| 3,838
| 3
| 3
|
[] |
no_license
|
# -*- shell-script -*-
AC_INIT([ompi-sphinx-dist], [0.5.0], [https://example.com])
AC_PREREQ(2.60)
AC_CONFIG_AUX_DIR(config)
AC_CONFIG_MACRO_DIR(config)
AC_CANONICAL_HOST
AC_CANONICAL_TARGET
AM_INIT_AUTOMAKE([foreign subdir-objects no-define 1.13.4 tar-ustar])
AM_SILENT_RULES([yes])
AC_PROG_CC_C99
AC_ENABLE_SHARED
AC_DISABLE_STATIC
LT_INIT
LT_OUTPUT
abs_builddir=`pwd`
cd $srcdir
abs_srcdir=`pwd`
cd "$abs_builddir"
# Can we build the docs?
#
# - If $with_docs_venv==yes, then assume yes
# - If we can find sphinx-build in our path and find all of the
# required pip modules, then yes
# - Otherwise, no
# This result is used below (in some cases)
AC_PATH_PROG([SPHINX_BUILD], [sphinx-build])
AC_MSG_CHECKING([if we can build the docs])
DOCS_CAN_BUILD=0
AS_IF([test "$with_docs_venv" = "yes"],
[DOCS_CAN_BUILD=1],
[AS_IF([test -z "SPHINX_BUILD"],
[AC_MSG_RESULT([no (cannot find sphinx-build executable)])],
[ # Check for the Python modules we need
missing_modules=
for line in `egrep -v '^#' $abs_srcdir/docs/sphinx-requirements.txt`; do
pip_module=`echo $line | cut -d= -f1 | cut -d\> -f1 | cut -d\< -f1`
python_module=`echo $pip_module | sed 's/-/_/g'`
echo "Testing python import of: $python_module" >&5
python3 -c "import $python_module" >&5 2>&5
st=$?
AS_IF([test $st -ne 0],
[missing_modules="$pip_module $missing_modules"])
done
AS_IF([test -n "$missing_modules"],
[msg=`echo $missing_modules`
AC_MSG_RESULT([no (missing pip modules: $msg)])],
[DOCS_CAN_BUILD=1])
])
])
AM_CONDITIONAL(DOCS_CAN_BUILD, [test $DOCS_CAN_BUILD -eq 1])
# We already printed the different "no" results, but we didn't print
# "yes".
AS_IF([test $DOCS_CAN_BUILD -eq 1], [AC_MSG_RESULT([yes])])
# Are we *installing* the docs?
#
# - If we're able to build the docs, yes
# - If the docs/html and docs/man directories already exist (i.e., a
# distribution tarball), yes
# - Otherwise, no
AC_MSG_CHECKING([if docs are already present])
DOCS_ALREADY_PRESENT=0
AS_IF([test -d $abs_srcdir/docs/build-html && test -d $abs_srcdir/docs/build-man],
[DOCS_ALREADY_PRESENT=1])
AS_IF([test $DOCS_ALREADY_PRESENT -eq 1],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
AC_MSG_CHECKING([if we will install the docs])
DOCS_CAN_INSTALL=0
AS_IF([test $DOCS_CAN_BUILD -eq 1 || test $DOCS_ALREADY_PRESENT -eq 1],
[DOCS_CAN_INSTALL=1])
AM_CONDITIONAL(DOCS_CAN_INSTALL, [test $DOCS_CAN_INSTALL -eq 1])
AS_IF([test $DOCS_CAN_INSTALL -eq 1],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
# Provide --enable-install-docs CLI option. This option will cause
# configure to fail/abort if we cannot install the docs. This is
# expected to be useful for people who cannot use an
# automatically-created Python virtual environment (e.g., if you're on
# a machine not connected to the internet, and "pip install ..." will
# fail).
AC_ARG_ENABLE([install-docs],
[AS_HELP_STRING([--enable-install-docs],
[Cause configure to fail/abort if we cannot install the docs. This option is typically only useful in builds from git clones (where the docs are not already built).])])
AS_IF([test $DOCS_CAN_INSTALL -eq 0 && test "$enable_install_docs" = "yes"],
[AC_MSG_WARN([--enable-install-docs was specified,])
AC_MSG_WARN([but configure cannot figure out how to build or install the docs])
AC_MSG_ERROR([Cannot continue])])
#########################
AC_CONFIG_FILES([
Makefile
config/Makefile
src/Makefile
docs/Makefile
])
AC_OUTPUT
| true
|
ee1cc0a8d87c8ab27edd852b4f33186249713f63
|
Shell
|
B1Sandmann/helm-charts
|
/common/rabbitmq/templates/bin/_rabbitmq-start.tpl
|
UTF-8
| 1,867
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
function bootstrap {
#Not especially proud of this, but it works (unlike the environment variable approach in the docs)
chown -R rabbitmq:rabbitmq /var/lib/rabbitmq
/etc/init.d/rabbitmq-server start
rabbitmq-plugins enable rabbitmq_tracing
rabbitmqctl trace_on
rabbitmqctl add_user {{ .Values.users.default.user }} {{ .Values.users.default.password | default (tuple . .Values.users.default.user | include "rabbitmq.password_for_user") | quote }} || true
rabbitmqctl set_permissions {{ .Values.users.default.user }} ".*" ".*" ".*" || true
rabbitmqctl add_user {{ .Values.users.admin.user }} {{ .Values.users.admin.password | default (tuple . .Values.users.admin.user | include "rabbitmq.password_for_user") | quote }} || true
rabbitmqctl set_permissions {{ .Values.users.admin.user }} ".*" ".*" ".*" || true
rabbitmqctl set_user_tags {{ .Values.users.admin.user }} administrator || true
{{- if .Values.metrics.enabled }}
rabbitmqctl add_user {{ .Values.metrics.user }} {{ .Values.metrics.password | default (tuple . .Values.metrics.user | include "rabbitmq.password_for_user") | quote }} || true
rabbitmqctl set_permissions {{ .Values.metrics.user }} ".*" ".*" ".*" || true
rabbitmqctl set_user_tags {{ .Values.metrics.user }} monitoring || true
{{- end }}
rabbitmqctl change_password guest {{ .Values.users.default.password | default (tuple . .Values.users.default.user | include "rabbitmq.password_for_user") | quote }} || true
rabbitmqctl set_user_tags guest monitoring || true
/etc/init.d/rabbitmq-server stop
}
function start_application {
echo "Starting RabbitMQ with lock /var/lib/rabbitmq/rabbitmq-server.lock"
LOCKFILE=/var/lib/rabbitmq/rabbitmq-server.lock
exec 9>${LOCKFILE}
/usr/bin/flock -n 9
exec gosu rabbitmq rabbitmq-server
}
bootstrap
start_application
| true
|
18905d7b7a288551e674808decf57681a8a1bb23
|
Shell
|
quodlibetor/pingmonitor
|
/pingmonitor
|
UTF-8
| 935
| 3.828125
| 4
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright Brandon W Maister 2012, rights reserved in accord with the BSD
# license: Keep my name, and this message, with the file. If you decide to
# share it. I'm not responsible if your computer blows up.
default_logfile="/var/log/network_uptime"
usage="pingmonitor [logfile]
Run this script and it will start ping appending to logfile,
default '${default_logfile}'. Ping will be in the background and
disowned, so you should be able to log out of an ssh session without
worrying about it.
I'm not being very careful, though, so Don't Trust Me.
The ping interval is 2 seconds, so you should probably run
pinglogcompress every day or two, as it tends to generate about 15MB/day."
if [ "$1" = "-h" ] || [ "$1" = "--help" ] ; then
echo "$usage"
exit
fi
if [ -n "$1" ] ; then
logfile=$1
else
logfile=$default_logfile
fi
nohup ping -D -i2 -W1 8.8.8.8 >>$logfile 2>>${logfile}.error &
| true
|
fb60e5d6817ebfeda479c4fd5f2e97c4432d09b5
|
Shell
|
bygui86/sso
|
/ocp-run.sh
|
UTF-8
| 2,359
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
function echo_header() {
echo
echo "########################################################################"
echo $1
echo "########################################################################"
}
function wait_while_empty() {
local _NAME=$1
local _TIMEOUT=$(($2/5))
local _CONDITION=$3
echo "Waiting for $_NAME to be ready..."
local x=1
while [ -z "$(eval ${_CONDITION})" ]
do
echo "."
sleep 5
x=$(( $x + 1 ))
if [ $x -gt $_TIMEOUT ]
then
echo "$_NAME still not ready, I GIVE UP!"
exit 255
fi
done
echo "$_NAME is ready."
}
pushd $(cd $(dirname $0) && pwd) > /dev/null
APP_DIR=$(pwd)
APP_NAME=$(basename $0)
echo_header "Running pre-checks...."
oc status >/dev/null 2>&1 || { echo >&2 "You are not connected to OCP cluster, please login using oc login ... before running $APP_NAME!"; exit 1; }
mvn -v -q >/dev/null 2>&1 || { echo >&2 "Maven is required but not installed yet... aborting."; exit 2; }
oc version | grep openshift | grep -q "v3\.[4-9]" || { echo >&2 "Only OpenShift Container Platfrom 3.4 or later is supported"; exit 3; }
echo "[OK]"
echo
# Display project and server for user and sleep 2 seconds to allow user to abort if wrong user
oc project
echo "Press CTRL-C NOW if this is not correct!"
sleep 4
# Service specific commands
echo_header "Creating the build configuration and image stream"
oc get bc/sso 2>/dev/null | grep -q "^sso" && echo "A build config for sso already exists, skipping" || { oc new-build --strategy=docker --binary --name=sso > /dev/null; }
echo_header "Starting build"
oc get builds 2>/dev/null | grep "^sso" | grep -q "Running" && echo "There is already a running build, skipping" || { oc start-build sso --from-dir=s2i > /dev/null; }
wait_while_empty "sso starting build" 600 "oc get builds 2>/dev/null| grep \"^sso\" | grep Running"
wait_while_empty "sso build" 600 "oc get builds 2>/dev/null| grep \"^sso\" | tail -1 | grep -v Running"
# echo_header "Creating application"
# oc get svc/sso 2>/dev/null | grep -q "^sso" && echo "A service named sso already exists, skipping" || { oc new-app sso > /dev/null; }
# echo_header "Exposing the route"
# oc get route/sso 2>/dev/null | grep -q "^sso" && echo "A route named sso already exists, skipping" || { oc expose service sso > /dev/null; }
| true
|
fb0149fe5aac9f9b4bb3f601fb15b60e667f63ec
|
Shell
|
illepic/downfall-guild
|
/project/scripts/vm/restore_d6.sh
|
UTF-8
| 456
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# RUN FROM WITHIN VAGRANT
cd /var/www/df
# `drush archive-restore` the entire site from within the Vagrant box. This will take a long time
echo "Extracting D6 site archive. Go make a sandwich, this is going to take awhile."
drush -v archive-restore dfmigrate/df.tar.gz --destination=web/d6 --db-url=mysql://dfdbuser:dfdbpass@localhost/downfall_d6 --db-prefix=demo_ --overwrite
# Copy over d6 settings
cp -R build/dev/d6 web/d6
exit
| true
|
f5a43ddb84d47e62806448eefd0a4e7d5789a0ea
|
Shell
|
arturh85/medienverwaltung
|
/Medienverwaltung/scripts/e2e-test-browser.sh
|
UTF-8
| 430
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ICEWEASEL=`which iceweasel`
if [ "$?" -eq 1 ];
then
echo "Iceweasel not found."
exit 1
fi
BASE_DIR=`dirname $0`
java -jar "$BASE_DIR/../frontend/test/lib/jstestdriver/JsTestDriver.jar" \
--port 9876 \
--browser $ICEWEASEL \
--config "$BASE_DIR/../jsTestDriver-scenario.conf" \
--basePath "$BASE_DIR/.." \
--testOutput "js-test-reports" \
--tests all --reset
RESULT=$?
exit $RESULT
| true
|
fe3f6741884d4326b9e032e3b6a28f0d7a9afe83
|
Shell
|
hdanak/xdg_config
|
/bash/bashrc
|
UTF-8
| 2,134
| 2.8125
| 3
|
[] |
no_license
|
# Don't run anything here unless we have an interactive shell.
if [[ $- != *i* ]] ; then
return
fi
[[ -f /etc/bashrc ]] && source /etc/bashrc
#set -o vi
export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
export PATH=$HOME/.local/bin:$PATH
export PAGER="less"
export EDITOR="vim"
export TRASH_DIR=~/".trash"
export LESS="-RSM~gIsw"
[ "$TERM" != "dumb" ] && eval "`dircolors -b`" && alias ls='ls --color=auto'
alias redshift='redshift -l 37.788081:-122.277832'
# prevent Java grey screen
export _JAVA_AWT_WM_NONREPARENTING=1
# Set prompt PS1
export PS1=`perl -w <<'EOF'
use strict;
use Term::ANSIColor qw/color/;
sub esc { '\[' . join('', @_) . '\]' };
sub colored { esc(color(shift)) . join('', @_) . esc(color('reset')) };
my $user_at_host = colored 'bright_green', '\u@\h';
my $time = colored 'white', '[\t]';
my $jobs = colored 'yellow', '[\j]';
my $path = colored 'bright_blue', '\W';
my $prompt = colored 'bright_blue', '\$';
print join(' ', $time, $user_at_host, $path, $jobs, $prompt, '');
EOF
`
# XDG Base Directory Specifications
export XDG_CONFIG_HOME=~/.config
export XDG_CACHE_HOME=~/.cache
export MAKEFLAGS="-j"
export CFLAGS="-march=native -O2 -pipe"
export CXXFLAGS="${CFLAGS}"
function man
{
vim -R -c 'runtime ftplugin/man.vim' \
-c 'map q :q<CR>' \
-c "Man ${*-man}" +"wincmd o"
}
#function info
#{
# vim -c "Info $1" -c "bdelete 1"
#}
function develop
{
cd ~/develop
}
function perldev
{
source ~/perl5/perlbrew/etc/bashrc
}
function genpasswd
{
local l=$1
[ "$l" == "" ] && l=16
tr -dc 'A-Za-z0-9_!@#$%()=+?.>,<~' < /dev/urandom | head -c ${l} | xargs
}
function winch
{
kill -28 $$
}
function exit_after
{
$* && exit
}
function mkcd
{
mkdir -p $1 && cd $1
}
function dus
{
du -hs $* | sort -h
}
perldev
which keychain &>/dev/null
if [ $? == 0 ]; then
eval `keychain --eval --agents ssh id_rsa`
#else
# which gnome-keyring-daemon &>/dev/null
# if [ $? == 0 ]; then eval `gnome-keyring-daemon -s`; fi
fi
if [ $TERM != 'screen' ]; then
tmux new-session -t 0 \; new-window
fi
winch
| true
|
63e6af4b7c6d0f8b34ba4e11c651ef688af18394
|
Shell
|
RenAndrew/pe_scrapy
|
/pe/gather_result.sh
|
UTF-8
| 689
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
RESULT_SRC_PATH='/home/ren/work/git_repos/pe_scrapy/pe/result'
RESULT_DST_PATH='/home/ren/boxing/result_bak'
echo `date "+%Y-%m-%d %H:%M:%S"`
echo 'Moving data files from '$RESULT_SRC_PATH' to '$RESULT_DST_PATH
for folder in $RESULT_SRC_PATH/*; do
if [ `ls $folder | wc -l` -eq 0 ]; then #empty folder
continue
fi
for file in $folder/*; do
# echo $file
timestamp=`echo ${file##*_} | cut -d '.' -f1`
# echo $timestamp
if [ ! -d ${RESULT_DST_PATH}/${timestamp} ]; then
mkdir -p ${RESULT_DST_PATH}/${timestamp}
fi
mv $file ${RESULT_DST_PATH}/${timestamp}/
# mv -r $folder/
done
# echo $folder
echo "Moving $folder"
echo '----------------------'
done
| true
|
ad05f2067b9ded67023e48e6ff412ce186b51f6d
|
Shell
|
eclipse/cloe
|
/setup.sh.example
|
UTF-8
| 5,429
| 3.953125
| 4
|
[
"JSON",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"Unlicense",
"MPL-2.0"
] |
permissive
|
#!/bin/bash
#
# This script can be used to safely inject secrets into the Docker
# build process.
#
# Configure each `setup_` function defined as required and uncomment
# them at the bottom. Then save the file as `setup.sh`, which `Makefile.docker`
# will automatically add as a secret to the Docker image build process.
#
# If any part of this script fails, the build process will too.
set -e
setup_hosts() {
# If you need to update /etc/hosts, you can use `--add-host` on the command line
# or you can add them here.
local HOSTS=(
"93.184.216.34 example.com"
)
echo "Extending /etc/hosts file."
cp /etc/hosts /tmp/hosts.bak
for line in $HOSTS; do
echo $line >> /etc/hosts
done
CLEANUP_FUNCTIONS+=(cleanup_hosts)
}
cleanup_hosts() {
echo "Restoring /etc/hosts file."
mv /tmp/hosts.bak /etc/hosts
}
setup_ssh() {
# If you need SSH for any reason, you can make your SSH agent available to Docker
# by passing the arguments `--ssh default=$SSH_AUTH_SOCK`.
# You can then use ssh-scankey to add known hosts or hosts you want to fetch
# things from.
#
# Using known-hosts is a security feature, and this function effectively
# circumvents these protections. Consider using a bind mount instead if
# protection against man-in-the-middle attacks are important!
local HOSTS=(
"-p 80 github.com"
)
echo "Performing keyscan for expected hosts."
for host in $HOSTS; do
if grep -vqF "$host" ~/.ssh/known_hosts; then
ssh-keyscan $host >> ~/.ssh/known_hosts
fi
done
CLEANUP_FUNCTIONS+=(cleanup_ssh)
}
cleanup_ssh() {
rm -rf ~/.ssh
}
network_available() {
# If you need to check whether network is available, i.e. Docker network is
# not "none", then you can use something like this, which checks that there is
# a network interface that is not "lo".
ip link | sed -nr 's/^[0-9]+: ([^:]+):.*/\1/p' | grep -vq lo
}
setup_conan() {
# Authenticate with the default remote using the correct username and password.
# This should run without any user interaction.
local CONAN_REMOTE="https://artifactory.example.com/artifactory/api/conan/cloe-conan-local"
local CONAN_REMOTE_VERIFY_SSL="True"
local CONAN_LOGIN_USERNAME=
local CONAN_PASSWORD=
# Set the request timeout to 360 seconds to work-around slow servers.
conan config set general.request_timeout=360
if [ "${CONAN_REMOTE}" != "" ]; then
echo "Adding Conan 'default' remote."
conan remote add default "${CONAN_REMOTE}" "${CONAN_REMOTE_VERIFY_SSL}"
fi
if [ "${CONAN_LOGIN_USERNAME}" != "" ]; then
echo "Authenticating with 'default' remote."
export CONAN_LOGIN_USERNAME CONAN_PASSWORD
conan user --remote=default -p
unset CONAN_LOGIN_USERNAME CONAN_PASSWORD
fi
CLEANUP_FUNCTIONS+=(cleanup_conan)
}
cleanup_conan() {
# Deauthenticate so that we don't leak credentials.
conan user --clean
}
setup_vtd() {
# Export environment variable telling VTD where it can find the license server:
export VI_LIC_SERVER="vtd-licenses.example.com"
}
indent_lines() {
sed -r -e 's/[[:cntrl:]]//g' -e 's/^/\t/g'
}
upload_package() {
pkgref="$1"
pkgname="$(echo "$pkgref" | sed -r 's/#.*$//')"
faillog="$2"
conan upload -r default --all --force -c "$pkgname"
if [ $? -ne 0 ]; then
echo "INFO: attempting recipe-download work-around"
conan download -r conancenter -re "$pkgref"
if [ $? -ne 0 ]; then
echo "CRITICAL: recipe download failed, skipping"
echo "$pkgref" >> "$faillog"
return
fi
echo "INFO: attempting upload for second time"
conan upload -r default --all --force -c "$pkgname"
if [ $? -ne 0 ]; then
echo "CRITICAL: package upload failed, skipping"
echo "$pkgref" >> "$faillog"
return
fi
fi
echo "INFO: upload successful"
}
release_packages() {
echo "Uploading packages..."
faillog=/tmp/conan-upload-failures.log
conan search --raw --rev | sed -r '/^[^@]+$/ { s!#!@_/_#! }' | while read line; do
echo "Upload: $line"
upload_package "$line" $faillog 2>&1 | indent_lines
done
if [ $(cat $faillog 2>/dev/null | wc -l) -gt 0 ]; then
echo "Failure uploading:"
cat $faillog | indent_lines
fi
}
upload_conan_packages() {
# Prequisites:
# 1. You need to add a 'default' remote and authenticate with it.
# 2. You need to keep the original 'conancenter' remote, so
# that Conan can fetch missing export_sources files.
conan upload -r default --all --force -c "*"
}
# This array with cleanup functions will be extended by each `setup_` function
# that is called that needs cleanup after the Docker RUN step is finished.
# This cleanup is ensured by the call to `trap` below.
CLEANUP_FUNCTIONS=()
cleanup_all() {
for func in $CLEANUP_FUNCTIONS; do
$func
done
}
trap cleanup_all EXIT
# Now uncomment the setups you want to happen in a Docker environment:
#
# In a Docker RUN step, it's possible to have `--network=none`, in which case
# we probably don't need to do anything in this script.
if [ -f /.dockerenv ] && [ "$(ls /sys/class/net)" != "lo" ]; then
#setup_hosts
#setup_ssh
#setup_conan
#setup_vtd
fi
set +e
| true
|
f10a65c742563be6c2a20fadbd71024a78afbd8d
|
Shell
|
phpmyadmin/scripts
|
/monitoring/check_phpmyadmin_composer.sh
|
UTF-8
| 673
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
supported_versions=$(curl -fsSL 'https://www.phpmyadmin.net/home_page/version.json'|jq -r '.releases|.[]|.version')
working_dir="$HOME/tmp/composer-test"
mkdir -p "$working_dir"
cd "$working_dir" || exit 1
test_version() {
ver=$1
echo "Processing $ver"
composer create-project -q phpmyadmin/phpmyadmin "$ver" "$ver" > /dev/null
grep "$ver" "$ver/ChangeLog" > /dev/null || echo "Failed to find version $ver in $ver/ChangeLog"
rm -rf "$ver"
}
while read -r full_version
# https://unix.stackexchange.com/questions/9784/how-can-i-read-line-by-line-from-a-variable-in-bash
do
test_version "$full_version"
done < <(printf '%s\n' "$supported_versions")
| true
|
045108127e2bf5ce7b6885005a8e91431e69179d
|
Shell
|
jeremyary/load-balancing-poc
|
/scripts/call_nginx.sh
|
UTF-8
| 174
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'STARTING CALLS TO NGINX LOAD BALANCER AT 192.168.1.99'
url="http://192.168.1.99"
while [ true ]
do
content=$(curl -s $url)
echo $content
sleep 1
done
| true
|
55e7e6db7747c2ed9de559e819753dbfdab02d22
|
Shell
|
PercussiveRepair/scripts
|
/cch/hive_get_users.sh
|
UTF-8
| 790
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
endPoint=$1; shift
cookie=$1; shift
cookie="Cookie: ApiToken=${cookie}"
baseUrl="https://${endPoint}/v5/users/all/search?fieldSet=user&"
first=1
quantity=1000
query=$1; shift
rm -f users "allUsers"
while (true); do
echo "Fetching first $quantity from $first"
curl -sL -w "%{http_code} %{url_effective}\\n" -o users --header "$cookie" "${baseUrl}query=$query&first=$first&quantity=$quantity"
echo "Fetched first $quantity from $first"
cat users | awk -F',' '{for (i=1; i<NF; i++) print $i}' | grep username | cut -d '"' -f 4 >> "allUsers$first"
users=$(cat users | awk -F',' '{for (i=1; i<NF; i++) print $i}' | grep username | wc -l)
if [[ $users -eq 0 ]]; then
break
fi
first=$(($first+$quantity))
done
rm -f users
wc -l "allUsers$first"
exit 0
| true
|
bb00cbeca9864822187de6ff930aded438f5a646
|
Shell
|
jaaouane/ms-sample
|
/purgeImages.sh
|
UTF-8
| 437
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(docker ps -a| grep ms-sample | awk '{print $1}' | uniq) ]]; then
docker ps -a| grep ms-sample | awk '{print $1}' | uniq | xargs docker rm -f
else
echo "no containers to remove found"
fi
if [[ $(docker images| grep ms-sample | awk '{print $1":" $2}' | uniq) ]]; then
docker images| grep ms-sample | awk '{print $1":" $2}' | uniq | xargs docker rmi -f
else
echo "no images to remove found"
fi
exit 0
| true
|
9d7e6b31f532904bfc751be08184e2e2d6e61f6d
|
Shell
|
mado89/netlogo-mapreduce
|
/cluster/hadoop/createnodenew.sh
|
UTF-8
| 2,148
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
MASTERIP=192.168.56.101
MASTER="Master"
NODE="Node0"
NODENAME="node0"
NODEIP=192.168.56.102
NEWMAC=0800279B4ED7
GATEWAY=192.168.56.255
NETMASK=255.255.255.0
master_preclone () {
#Check if machine is running
VMR=`VBoxManage list runningvms | grep $MASTER | wc -l`
if [ $VMR -eq "0" ]
then
#start machine
VBoxManage startvm $MASTER --type headless > /dev/null;
echo "Waiting for startup of $MASTER";
while [[ `VBoxManage showvminfo $MASTER --details --machinereadable | grep GuestAdditionsRunLevel | grep -o "[0-9]*"` -lt "2" ]]; do
# echo "Sleeping"
sleep 1;
done
#to be sure its started
sleep 1;
fi
ssh root@$MASTERIP "rm /etc/udev/rules.d/70-persistent-net.rules; shutdown -h now"
#Waiting for shutdown
while [[ `VBoxManage list runningvms | grep $MASTER | wc -l` -gt "0" ]]; do
# echo "Sleeping"
sleep 1;
done
}
# master_preclone
echo "Cloning VM"
VBoxManage clonevm $MASTER --name $NODE --register > /dev/null
VBoxManage modifyvm $NODE --macaddress1 $NEWMAC
sleep 1
echo "Starting VM"
VBoxManage startvm $NODE > /dev/null &
# echo "Sleeping for 30 seconds"
# sleep 30
echo "Waiting for startup";
# AAA=`VBoxManage showvminfo $NODE --details --machinereadable | grep GuestAdditionsRunLevel | grep -o "[0-9]*"`
# echo $AAA
sleep 2
while [[ `VBoxManage showvminfo $NODE --details --machinereadable | grep GuestAdditionsRunLevel | grep -o "[0-9]*"` -lt "2" ]]; do
# echo "Sleeping"
sleep 1;
done
#to be sure its started
sleep 1;
#ping -b -c1 $GATEWAY > /dev/null
#NEWIP=`arp -a | grep $NEWMAC | sed 's/^.*[^0-9]\([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*\).*$/\1/'`
echo "Machine is running on $MASTERIP"
# ssh root@$MASTERIP "sed -i 's/master/$NODENAME/g' /etc/hostname;sed -i 's/master/$NODENAME/g' /etc/hosts;sed -i 's/iface eth0 inet dhcp/iface eth0 inet static/g' /etc/network/interfaces;echo ' address $NODEIP' >> /etc/network/interfaces;echo ' netmask $NETMASK' >> /etc/network/interfaces;echo ' broadcast $GATEWAY' >> /etc/network/interfaces;shutdown -h now"
ssh root@$MASTERIP "sed -i 's/master/$NODENAME/g' /etc/hostname;sed -i 's/master/$NODENAME/g' /etc/hosts;shutdown -h now"
| true
|
7fab3c17a2d0865cabacb6db147f8a70ef3dc91b
|
Shell
|
resmo/voyage-linux
|
/voyage-live/config/hooks/001-rmpkg.chroot
|
UTF-8
| 1,505
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
echo "($0)"
if [ $(dpkg-query --show | cut -f1 | grep "^asterisk$" | wc -l) -gt 0 ] ; then
echo "($0) Building Voyage ONE. No remove package"
exit 0
fi
if [ $(dpkg-query --show | cut -f1 | grep "^mpd$" | wc -l) -gt 0 ] ; then
echo "($0) Building Voyage MPD. No remove package"
exit 0
fi
RMPKGS="
libtext-charwidth-perl \
libtext-iconv-perl \
manpages \
aptitude \
liblocale-gettext-perl \
console-data \
console-common \
gcc-4.2-base \
netcat-traditional \
bsdmainutils \
dmidecode \
ed \
groff-base \
info \
libcwidget3 \
libept0 \
libnewt0.52 \
libsigc++-2.0-0c2a \
libxapian15 \
man-db \
nano \
rsyslog \
vim-common \
vim-tiny \
whiptail \
\
"
for PKG in $RMPKGS
do
echo "*** RMPKG Removing $PKG"
#apt-get -f -y -q=2 --purge remove $PKG
apt-get -f -y --purge remove $PKG
done
exit
# libgdbm3 is removed because of voyage-sdk.
# below are harmful
libbz2-1.0 \
libtasn1-3 \
libgcrypt11 \
libopencdk10 \
libgpg-error0 \
libusb-0.1-4 \
hostname \
libldap-2.4-2 \
dhcp3-client \
dhcp3-common \
libgdbm3 \
# below are already removed
apt-utils \
at \
bsdmainutils \
ed \
fdutils \
groff-base \
info \
libtext-wrapi18n-perl \
nano \
tasksel \
tasksel-data \
libcwidget3 \
libsigc++-2.0-0c2a \
kbd \
libconsole \
libdb4.4 \
dmidecode \
libnewt0.52 \
gettext-base \
dialog \
libncursesw5 \
libgpmg1 \
libept0 \
libxapian15 \
vim-tiny \
vim-common \
locales \
liblzo2-2 \
live-initramfs \
live-config \
live-config-sysvinit \
live-boot \
live-boot-initramfs-tools \
| true
|
b072eb4767518328796100aee1d018c9f8fe39b3
|
Shell
|
tillawy/harrek-webservice
|
/pango/pango-view3.sh
|
UTF-8
| 814
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
rm -f ./p2/*
mkdir -p ./p2/
r=0;
NUMOFLETTERS=$(cat letters3.xml | grep '</Letter>'| wc -l);
a=5;
for (( l=1; l<=${NUMOFLETTERS}; l++ )); do
letterXML=$(xmllint --encode utf8 --xpath "/Letters/Letter[position() = ${l}] " letters3.xml)
echo ${letterXML}
t=$(echo ${letterXML} | xmllint --xpath "//Last/text()" -)
echo $t;
pango-view --text "${t}" \
--font="KufiStandardGK 18" \
--background=transparent --rtl -q \
-o ./p2/${l}.Last.png;
c=$(($c+1)) ;
done
exit 0;
cat letters4.txt | while read line; do
#echo ${line} | cut -f ${c} -d " ";
c=1;
for LETTER in $(echo ${line} | cut -f ${c})
do
#echo ${LETTER};
#ls ./p2/${r}.png 2> /dev/null > /dev/null && echo "${r}.png done"
done
r=$(($r+1))
done
| true
|
b83a405e285c1da4ab599d31bb07d228b73a2705
|
Shell
|
tylerwolf35/dotfiles
|
/xmonad/xmobar/spotify.sh
|
UTF-8
| 191
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
running=$(pidof spotify)
if [ "$running" != "" ]; then
artist=$(playerctl metadata artist)
song=$(playerctl metadata title | cut -c 1-60)
echo -n "$artist · $song"
fi
| true
|
b4842631c484428b41b6c5e93ea758da70cf02bd
|
Shell
|
petronny/aur3-mirror
|
/dictcn/dictcn
|
UTF-8
| 2,643
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
############## customization #################
### color theme (see terminal colour convention)
# key word
keycolr="7"
# pronunciation
proncolr="1"
# defination
defcolr="1;32"
# related translation
relcolr="0;32"
# sentences
sentcolr="0"
# emphasis
emphcolr="3"
### media player
player="mplayer"
################################################
function help() {
echo 'dictcn script'
echo 'Requires: wget xml2 cut sed'
echo ' xsel: for -s support'
echo ' libnotify: for -n support'
echo ' mplayer: for -p support'
echo 'Usage: dictcn [-w WORD|-s] [-n] [-p] [-c]'
echo '-w WORD get word to look up from WORD'
echo '-s get word to look up from selection'
echo '-n send to notification'
echo '-p pronounce the word if available'
echo '-c colored output for terminal (you can customize it in the script)'
echo '-h print help'
}
word=""
sendp=""
pronp=""
colrp=""
while getopts w:nshpc opt; do
case "$opt" in
w) word="$OPTARG" ;;
s) word="`xsel -o`" ;;
n) sendp=1;;
p) pronp=1;;
c) colrp=1;;
h) help; exit 0;;
?) help; exit 1;;
esac
done
if [ -z "$word" ]; then help; exit 1; fi
xml="$(wget -q -O - "http://dict.cn/ws.php?utf8=true&q=$word" | xml2)"
key="$(echo "$xml" | grep key | cut -d = -f 2-)"
audio="$(echo "$xml" | grep audio | cut -d = -f 2-)"
pron="$(echo "$xml" | grep pron | cut -d = -f 2- | sed 's/^ //g')"
def="$(echo "$xml" | grep def | cut -d = -f 2-)"
rel="$(echo "$xml" | grep rel | cut -d = -f 2- -s)"
sent="$(echo "$xml" | grep sent | cut -d = -f 2- -s)"
[ ! -z "$pron" ] && pron="[$pron]";
if [ ! -z "$rel" ]; then
nrel="\n$rel\n";
crel=$(echo "\033[${relcolr}m\n$rel\033[0m\n" \
| sed "s/<em>/\\\\033\\[${emphcolr}m/g;s/<\\/em>/\\\\033\\[${relcolr}m/g");
fi
if [ ! -z "$sent" ]; then
nsent="例句:\n$sent";
csent=$(echo "\033[${sentcolr}m例句:\n$sent\033[0m" \
| sed "s/<em>/\\\\033\\[${emphcolr}m/g;s/<\\/em>/\\\\033\\[${sentcolr}m/g");
fi
title="$key $pron"
body=$(echo "\n$def\n$nrel\n$nsent" | sed 's/<em>//g;s/<\/em>//g;s/</[/g;s/>/]/g')
chromatic=$(echo "\033[${keycolr}m$key\033[0m \033[${proncolr}m$pron\033[0m\n
\033[${defcolr}m$def\033[0m\n$crel\n$csent" \
| sed 's/<em>//g;s/<\/em>//g;s/</[/g;s/>/]/g')
if [ ! -z $colrp ]; then echo -e "$chromatic"; else echo -e "$title" "\n$body"; fi
if [ ! -z $sendp ]; then notify-send "$title" "$body"; fi
if [ ! -z $pronp ] && [ ! -z $audio ]; then
tempfile=`mktemp`
wget -qO "$tempfile" "$audio"
$player "$tempfile" &> /dev/null;
fi
| true
|
a1803cbc7dab53b7d8304e4e5c87f4f7fe56f63f
|
Shell
|
gwendolynbrook/ci-scripts
|
/lmp/bb-config.sh
|
UTF-8
| 2,346
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
HERE=$(dirname $(readlink -f $0))
source $HERE/../helpers.sh
require_params MACHINE IMAGE
OSTREE_BRANCHNAME="${OSTREE_BRANCHNAME-lmp-localdev}"
SOTA_CLIENT="${SOTA_CLIENT-aktualizr}"
AKLITE_TAG="${AKLITE_TAG-promoted}"
H_BUILD="${H_BUILD-lmp-localdev}"
LMP_VERSION=$(git --git-dir=.repo/manifests/.git describe --tags)
FACTORY="${FACTORY-lmp}"
source setup-environment build
cat << EOFEOF >> conf/local.conf
ACCEPT_FSL_EULA = "1"
BB_GENERATE_MIRROR_TARBALLS = "1"
# SOTA params
SOTA_PACKED_CREDENTIALS = "${SOTA_PACKED_CREDENTIALS}"
OSTREE_BRANCHNAME = "${MACHINE}-${OSTREE_BRANCHNAME}"
GARAGE_SIGN_REPO = "/tmp/garage_sign_repo"
GARAGE_TARGET_VERSION = "${H_BUILD}"
GARAGE_TARGET_URL = "https://ci.foundries.io/projects/${H_PROJECT}/builds/${H_BUILD}"
GARAGE_CUSTOMIZE_TARGET = "${HERE}/customize-target"
# Default SOTA client
SOTA_CLIENT = "${SOTA_CLIENT}"
# git-describe version of LMP
LMP_VERSION = "${LMP_VERSION}"
# Default AKLITE tag
AKLITE_TAG = "${AKLITE_TAG}"
# Who's factory is this?
FOUNDRIES_FACTORY = "${FACTORY}"
# Additional packages based on the CI job used
IMAGE_INSTALL_append = " ${EXTRA_IMAGE_INSTALL}"
EOFEOF
if [ -n "$OTA_LITE_TAG" ] ; then
cat << EOFEOF >> conf/local.conf
export OTA_LITE_TAG = "${OTA_LITE_TAG}"
EOFEOF
fi
if [ -z "$SOTA_PACKED_CREDENTIALS" ] || [ ! -f $SOTA_PACKED_CREDENTIALS ] ; then
status "SOTA_PACKED_CREDENTIALS not found, disabling OSTree publishing logic"
cat << EOFEOF >> conf/local.conf
SOTA_PACKED_CREDENTIALS = ""
EOFEOF
fi
# Add build id H_BUILD to output files names
cat << EOFEOF >> conf/auto.conf
IMAGE_NAME_append = "-${H_BUILD}"
KERNEL_IMAGE_BASE_NAME_append = "-${H_BUILD}"
MODULE_IMAGE_BASE_NAME_append = "-${H_BUILD}"
DT_IMAGE_BASE_NAME_append = "-${H_BUILD}"
BOOT_IMAGE_BASE_NAME_append = "-${H_BUILD}"
DISTRO_VERSION_append = "-${H_BUILD}"
# get build stats to make sure that we use sstate properly
INHERIT += "buildstats buildstats-summary"
# archive sources for target recipes (for license compliance)
INHERIT += "archiver"
COPYLEFT_RECIPE_TYPES = "target"
ARCHIVER_MODE[src] = "original"
ARCHIVER_MODE[diff] = "1"
EOFEOF
if [ $(ls ../sstate-cache | wc -l) -ne 0 ] ; then
status "Found existing sstate cache, using local copy"
echo 'SSTATE_MIRRORS = ""' >> conf/auto.conf
fi
for x in $(ls conf/*.conf) ; do
status "$x"
cat $x | indent
done
| true
|
16aec0454ffc9f78e15c03f348b3145bfc4014ce
|
Shell
|
criteo/kerberos-docker
|
/network-analyser/analyse-network.sh
|
UTF-8
| 295
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# analyse-network.sh
#
# Start docker container from kerberos cluster to analyse network.
cd "$(dirname "$0")"
docker run -it \
--net=host --privileged \
-v $HOME:/root/:rw \
-u root \
-e XAUTHORITY=/root/.Xauthority -e DISPLAY=${DISPLAY} \
network-analyser $@
| true
|
940cc01260074c83c9586d6c7a61b9749935c1b0
|
Shell
|
RaviKim/Oddtemper
|
/odd/tempScan.sh
|
UTF-8
| 985
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# author = 'Kim Hyung Seok'
# Date = 190612
# version = 0.0.1
# Packages = python3 bluepy
# Library = libglib2.0-dev , mitemp.git, btlewrap.git
# Command = python3 ./demo.py --backend bluepy poll {YOUR_MACADDRESS}
#
# [Comment]
# This Script using for scanning temprature and humidify in serverroom.
# Just Testing in Raspberry_Pi3 and, just monitor terminor working
#
# [Target]
# WatchDog After something recieved signal, alert to slack-bot using by zabbix
# This shell script operate terminal monitoring
# Added Logfile
# Added system time. using date.
name="ServerRoom Temperature Checker"
logpath=/home/pi/Oddtemper/odd/log/
echo $name
#date
#Operate
#python3 /home/pi/mitemp/demo.py --backend bluepy poll 4c:65:a8:da:ea:de
sleep 1
#Log Save
python3 /home/pi/mitemp/demo.py --backend bluepy poll 4c:65:a8:da:ea:de >> $logpath/tempMonitor.txt
sleep 2
$echo date >> $logpath/tempMonitor.txt
$echo "-----------------------------------" >> $logpath/tempMonitor.txt
| true
|
8d0034b445137aec21055f67e0c880b76510ff07
|
Shell
|
danriti/configs
|
/bashrc
|
UTF-8
| 1,708
| 2.734375
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
Color_Off='\e[0m' # Text Reset
# Regular Colors
Black='\e[0;30m' # Black
Red='\e[0;31m' # Red
Green='\e[0;32m' # Green
Yellow='\e[0;33m' # Yellow
Blue='\e[0;34m' # Blue
Purple='\e[0;35m' # Purple
Cyan='\e[0;36m' # Cyan
White='\e[0;37m' # White
# Bold
BBlack='\e[1;30m' # Black
BRed='\e[1;31m' # Red
BGreen='\e[1;32m' # Green
BYellow='\e[1;33m' # Yellow
BBlue='\e[1;34m' # Blue
BPurple='\e[1;35m' # Purple
BCyan='\e[1;36m' # Cyan
BWhite='\e[1;37m' # White
#PS1='[\u@\h \W]\$ '
PS1="[\[$Color_Off\]\u\[$BPurple\]@\[$Color_Off\]\h \[$BBlue\]\W\[$Color_Off\]]\$ "
# Aliases
alias ls='ls --color=auto'
alias ll='ls -l'
alias la='ls -al'
alias term='urxvt -fn -*-terminus-*-*-*-*-12-* +sb'
alias termb='urxvt -fn -*-terminus-*-*-*-*-14-* +sb'
alias termr='urxvt -fn -*-terminus-*-*-*-*-14-* +sb -rv'
alias clip="xclip -i -selection clipboard"
export TERM=screen-256color
export TERMINAL=urxvt
# Set unlimited history file size
export HISTSIZE= HISTFILESIZE=
# Initialize NVM
[ -s "$HOME/.nvm/nvm.sh" ] && source $HOME/.nvm/nvm.sh
# Initialize RVM
[ -d "$HOME/.rvm/bin" ] && export PATH=$PATH:$HOME/.rvm/bin
# Initialize GVM
[ -s "$HOME/.gvm/gvm/scripts/gvm" ] && source "$HOME/.gvm/gvm/scripts/gvm"
# Initialize Heroku Toolbelt
[ -d "/usr/local/heroku/bin" ] && export PATH="/usr/local/heroku/bin:$PATH"
# Go
# export GOROOT=/usr/local/go
# export GOPATH=/home/driti/go
# export PATH=$PATH:/usr/local/go/bin
# export PATH=$PATH:$GOPATH/bin
# Add py3k to path
# export PATH=$PATH:$HOME/opt/bin
| true
|
9033d66b7411857121e2da986fb6b33fe03ef295
|
Shell
|
lamiru/reactor
|
/deploy/_start
|
UTF-8
| 715
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
source ./.common
sudo su -p <<HERE
set -e
source ~/.bash_profile
# pip
pip install -r $PROJECT_DIR/requirements.txt
pip install -r $PROJECT_DIR/requirements-aws.txt
# migrations
cd $PROJECT_DIR
python manage.py makemigrations --settings=$SETTINGS_NAMESPACE
python manage.py makemigrations thumbnail --settings=$SETTINGS_NAMESPACE
python manage.py migrate --settings=$SETTINGS_NAMESPACE
python manage.py loaddata fixtures/dev/*.yaml
echo "from helpers.score import *; calculate_all_score()" | python manage.py shell
python manage.py collectstatic --noinput --settings=$SETTINGS_NAMESPACE
# service restart
service uwsgi restart
service nginx restart
echo 'sleep for 5 seconds...'
sleep 5
HERE
| true
|
70876a8514c3ed92eb82f3cc7c247772291561ab
|
Shell
|
rovinbhandari/Programming_Scripting
|
/Shell/CS240/2/2_1/10.sh
|
UTF-8
| 224
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Author : ROVIN BHANDARI
# Date : 23/08/2010
# Script to create a file for authorized users
filename="$1"
if [ $(whoami) = "rovin" ];
then
>$filename
else
echo "You do not have permission to create the file."
fi
| true
|
690ffb55b35c815dcd64ce4d0cef110cec121fe9
|
Shell
|
igoradulian/Raspberry-System-Monitor-Tool
|
/monitor.sh
|
UTF-8
| 2,536
| 3.359375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#This script get information about system monitoring
#First line of output show after "DIAGNOSTIC INFORMATION FROM"
#show the name of the host from which diagnostic message sent
#Next row show time system is up
#After that show running tasks number,CPU, RAM, SWAP MEMORY
# us, user : time running un-niced user processes
# sy, system : time running kernel processes
# ni, nice : time running niced user processes
# id, idle : time spent in the kernel idle handler
# wa, IO-wait : time waiting for I/O completion
# hi : time spent servicing hardware interrupts
# si : time spent servicing software interrupts
# st : time stolen from this vm by the hypervisor
# ----------------------------------------------------
# Second part of the script show CPU temperature.
# SD card usage and capacity
echo | awk 'BEGIN {
for (i = 0; i < 60; i++)
arr [i] = "-"
for (i in arr)
printf("%s", arr[i])
"date" | getline date
split (date, d , " ")
printf("\n%45s %2s %2s\n", d [2], d [3], d [6])
printf("DIAGNOSTIC INFORMATION FROM %s", userInfo())
}
function userInfo()
{
"hostname" | getline host
return sprintf("%s\n",host)
}'
top -b -n 1 | awk '
NR < 6{
arr[3] = "CPU usage: "
arr[4] = "Memory usage(MiB): "
arr[5] = "Swap Memory usage(MiB): "
mb[4] = 1024
mb[5] = 1024
# print($0)
if ( NR <= 3)
printf("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",arr[NR], $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
if (NR == 4)
# print(arr[NR], $2, $3 / mb[NR], $4, $5 / mb[NR] ,$6 , $7 / mb[NR], $8, $9 / mb[NR], $10, $11 / mb[NR])
printf("%s %s %s %dMB %s %dMB %s %dMB %s %dMB\n",arr[NR], $2, $3, $4 / mb[NR], $5, $6 / mb[NR], $7, $8 / mb[NR], $9, $10 / mb[NR], $11)
if (NR == 5)
printf("%s %s %dMB %s %dMB %s %dMB %s %dMB %s\n",arr[NR], $2, $3 / mb[NR] , $4, $5 / mb[NR], $6, $7 / mb[NR], $8 , $9 / mb[NR], $10, $11 / mb[NR])
}
END {
for (i = 0; i < 60; i++)
arr [i] = "-"
for (i in arr)
printf("%s", arr[i])
print("\n")}'
vcgencmd measure_temp | awk ' {$0 = substr($0, 6, 8); printf ("CPU Temperature:%2s\n", $0)}'
#This block show CPU usage
#physical memory usage(RAM)
#swap memory usage
#number of running tasks
echo
df -Bm | awk 'BEGIN {print "Filesystem 1M-blocks Available Used%"} $1 ~ /root/ {printf ("%4s %7s %10s %5s\n",$1,$2,$4, $5)}'
| true
|
8f5cf55165ef47ae84e428cf03f7069b374f8eec
|
Shell
|
tokuhira/iot-edge-config
|
/src/install-edge-runtime.sh
|
UTF-8
| 2,604
| 4.28125
| 4
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#script to install edge runtime 1.2
######################################
# install_edge_runtime
#
# - installs Azure IoT Edge Runtime 1.2
# - generates the edge's configuration file from template and
# fills in the DPS provisioning section from provided parameters
#
# ARGUMENTS:
# SCOPE_ID
# REGISTRATION_ID
# SYMMETRIC_KEY
# OUTPUTS:
# Write output to stdout
# RETURN:
# updates the global variable OK_TO_CONTINUE in case of success to true.
######################################
function install_edge_runtime() {
if [[ $# != 3 || "$1" == "" || "$2" == "" || "$3" == "" ]];
then
log_error "Scope ID, Registration ID, and the Symmetric Key are required"
exit ${EXIT_CODES[2]}
fi
if [ -x "$(command -v iotedge)" ];
then
log_error "Edge runtime is already available."
exit ${EXIT_CODES[9]}
fi
log_info "Installing edge runtime..."
apt-get install aziot-edge -y 2>>$STDERR_REDIRECT 1>>$STDOUT_REDIRECT &
long_running_command $!
exit_code=$?
if [[ $exit_code != 0 ]];
then
log_info "aziot-edged installation failed with exit code: %d" $exit_code
exit ${EXIT_CODES[10]}
fi
log_info "Installed edge runtime..."
# create config.toml
log_info "Create instance configuration 'config.toml'."
local SCOPE_ID=$1
local REGISTRATION_ID=$2
local SYMMETRIC_KEY=$3
log_info "Set DPS provisioning parameters."
local FILE_NAME="/etc/aziot/config.toml"
# create a config.toml - will replace existing
echo 'hostname = "'`hostname`'"' > $FILE_NAME
echo '' >> $FILE_NAME
echo '## DPS provisioning with symmetric key' >> $FILE_NAME
echo '[provisioning]' >> $FILE_NAME
echo 'source = "dps"' >> $FILE_NAME
echo '' >> $FILE_NAME
echo 'global_endpoint = "https://global.azure-devices-provisioning.net"' >> $FILE_NAME
echo 'id_scope = "'$SCOPE_ID'"' >> $FILE_NAME
echo '' >> $FILE_NAME
echo '[provisioning.attestation]' >> $FILE_NAME
echo 'method = "symmetric_key"' >> $FILE_NAME
echo 'registration_id = "'$REGISTRATION_ID'"' >> $FILE_NAME
echo '' >> $FILE_NAME
echo 'symmetric_key = { value = "'$SYMMETRIC_KEY'" }' >> $FILE_NAME
echo '' >> $FILE_NAME
log_info "Apply settings - this will restart the edge"
iotedge config apply 2>>$STDERR_REDIRECT 1>>$STDOUT_REDIRECT
exit_code=$?
if [[ $exit_code == 0 ]];
then
log_info "IotEdge has been configured successfully"
fi
}
| true
|
e445253803bac4355c38fa2cd1ef12b0a1017948
|
Shell
|
johnny0924/Stalker-1
|
/install
|
UTF-8
| 3,100
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Stalker WM Portal road warrior installer for Debian, Ubuntu
# This script will work on Debian, Ubuntu and probably other distros
# of the same families, although no support is offered for them. It isn't
# bulletproof but it will probably work if you simply want to setup a Stalker WM Portal on
# your Debian/Ubuntu box. It has been designed to be as unobtrusive and
# universal as possible.
# This is a free shell script under GNU GPL version 3.0 or above
# Copyright (C) 2017 LinuxHelps project.
# Feedback/comment/suggestions : https://linuxhelps.net/
# Author Ivan Bachvarov a.k.a SlaSerX
# Color schema
red='\033[01;31m'
blue='\033[01;34m'
green='\033[01;32m'
norm='\033[00m'
#ver=v5.1.1
pass="1989march10"
source="http://linuxhelps.net/configs"
# Download and Install the Latest Updates for the OS
apt-get update > /dev/null
sleep 1
#Install Necessary services & packets
sudo apt-get install -y -u apache2 nginx nginx-extras unzip memcached php5 php5-mysql php-pear nodejs upstart && sudo pear channel-discover pear.phing.info && sudo pear install -Z phing/phing
sleep 1
pear channel-discover pear.phing.info
pear install phing/phing
sleep 1
# Set the Server Timezone to CST
echo "Europe/Sofia" > /etc/timezone
dpkg-reconfigure -f noninteractive tzdata
# Install MySQL Server in a Non-Interactive mode. Default root password will be "1989march10"
echo "mysql-server mysql-server/root_password password $pass" | sudo debconf-set-selections
echo "mysql-server mysql-server/root_password_again password $pass" | sudo debconf-set-selections
apt-get install -y mysql-server > /dev/null
sed -i 's/127\.0\.0\.1/0\.0\.0\.0/g' /etc/mysql/my.cnf
mysql -uroot -p$pass -e 'USE mysql; UPDATE `user` SET `Host`="%" WHERE `User`="root" AND `Host`="localhost"; DELETE FROM `user` WHERE `Host` != "%" AND `User`="root"; FLUSH PRIVILEGES;'
service mysql restart
cd /var/www/html
wget http://download.middleware-stalker.com/downloads/498dcf76433121758c8103e118229f3f/stalker_portal-5.2.0.zip
unzip stalker_portal-5.2.0.zip
mv infomirgroup-stalker_portal-9e60f9025ab6/ stalker_portal
rm -rf *.zip
mysql -uroot -p$pass -e "create database stalker_db"
mysql -uroot -p$pass -e "GRANT ALL PRIVILEGES ON stalker_db.* TO stalker@localhost IDENTIFIED BY '1' WITH GRANT OPTION;"
echo "max_allowed_packet = 32M" >> /etc/mysql/my.cnf
echo "short_open_tag = On" >> /etc/php5/apache2/php.ini
a2enmod rewrite
apt-get purge libapache2-mod-php5filter > /dev/null
cd /etc/apache2/sites-enabled/
rm -rf *
wget $source/000-default.conf
cd /etc/apache2/
rm -rf ports.conf
wget $source/ports.conf
cd /etc/nginx/sites-available/
rm -rf default
wget $source/default
/etc/init.d/apache2 restart
/etc/init.d/nginx restart
/etc/init.d/apache2 restart
/etc/init.d/nginx restart
cd /var/www/html/stalker_portal/deploy
sudo phing
echo -e "\n${green}Necessary services & packets Installed\n\n${norm}\n"
echo -e "\n${green}MySQL root password is $pass\n\n${norm}\n"
echo -e "\n${green}Stalker portal install on http://`ifconfig eth0 2>/dev/null|awk '/inet addr:/ {print $2}'|sed 's/addr://'`/stalker_portal\n\n${norm}\n"
| true
|
6a55bb7f0f134f451acdaa97b89029535c3f617f
|
Shell
|
Hkfalkon/fizzbuzz.sh
|
/1_fizzbuzz.sh
|
UTF-8
| 189
| 2.75
| 3
|
[] |
no_license
|
for ((i=1;i<=100;i++))
do
if (($i%15==0)); then
echo fizzbuzz
elif (($i%3==0)); then
echo fizz
elif (($i%5==0)); then
echo buzz
else
echo $i
fi
done
exit 0
| true
|
84a14ee510c7cd1f9fc40d5b4c80ba8621ce861c
|
Shell
|
annyurina/cccatalog-api
|
/deployment/modules/services/cccatalog-api/proxy-init.tpl
|
UTF-8
| 1,500
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Set up nginx
sudo amazon-linux-extras install nginx1.12
sudo cat << EOF > /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$$remote_addr - $$remote_user [$$time_local] "$$request" '
'$$status $$body_bytes_sent "$$http_referer" '
'"$$http_user_agent" "$$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
access_log off;
proxy_pass http://${ccc_api_host}/link/;
}
}
}
EOF
sudo systemctl start nginx
| true
|
53163eb78d191af0b7a7c1010821421835534678
|
Shell
|
DriveClutch/circleci-python
|
/tools/helm.sh
|
UTF-8
| 1,913
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash -e
if [[ -f ".circleci/debuglog" ]]; then
set -x
fi
# If this repo doesn't contain a .helm directory, then no need to continue
if [[ ! -d ".helm" ]]; then
echo "Helm directory does not exist, skipping packaging"
exit 0
fi
# Figure out where we are and if we should be interacting with HELMREPO
DOREMOTE=false
if [[ ! -z $CIRCLE_BUILD_NUM && ( $CIRCLE_BRANCH == "develop" || $CIRCLE_BRANCH == "master" || $CIRCLE_BRANCH == hotfix* || $CIRCLE_BRANCH == release* ) ]]; then
DOREMOTE=true
else
echo "*NOT* interacting with HELMREPO, either because branchname is not appropriate or not actually in a circleci environment"
fi
# Set the GCP auth key using data provided by the CircleCI context
echo $GCP_AUTH_KEY | base64 -d - > ${HOME}/gcp-key.json
export GOOGLE_APPLICATION_CREDENTIALS=${HOME}/gcp-key.json
REPONAME="${CIRCLE_PROJECT_REPONAME}-${CIRCLE_BRANCH}"
REPOLOCATION="${HELM_GS_BUCKET}/${REPONAME}"
# Create the repo (if needed)
if $DOREMOTE; then
echo "Check if the repo is initialized"
set +e # Turn off failure dumping
helm repo add $REPONAME $REPOLOCATION
RET=$?
if [ "$RET" != "0" ]; then
echo "$REPONAME was not initialized at $REPOLOCATION, performing bucket initialization"
helm gcs init $REPOLOCATION
fi
set -e # Turn on failure dumping
echo "Adding $REPONAME repo to helm"
helm repo add $REPONAME $REPOLOCATION
fi
cd .helm
GITHASHLONG=$(git rev-parse HEAD)
GITHASHSHORT=$(git rev-parse --short HEAD)
DT=$(date "+%Y%m%d.%H%M.%S")
PKGVER="${DT}"
# Run linter first on all packages
for chartpath in */Chart.yaml
do
pkgname=$(basename $(dirname $chartpath))
helm lint $pkgname
done
for chartpath in */Chart.yaml
do
pkgname=$(basename $(dirname $chartpath))
helm package --version=$PKGVER --app-version=$GITHASHLONG $pkgname
if $DOREMOTE; then
helm gcs push ./${pkgname}-${PKGVER}.tgz $REPONAME
fi
done
| true
|
0e78c9c939511129d7b27c2f49e660727db939b3
|
Shell
|
kenyroj/LinuxScript
|
/Initial.sh
|
UTF-8
| 2,043
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
LocalPath="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $LocalPath/ColorAnsiBash.sh
source $LocalPath/AKenScript.sh
# ==== Alias ====
alias ls='ls --color=auto -F'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias ..='cd ..;'
alias tmux='tmux -2'
# ==== For PROMPT ====
function CollapsedPWD() {
local pwd="$1"
local home="$HOME"
local size=${#home}
[[ $# == 0 ]] && pwd="$PWD"
[[ -z "$pwd" ]] && return
if [[ "$pwd" == "/" ]]; then
echo "/"
return
elif [[ "$pwd" == "$home" ]]; then
echo "~"
return
fi
[[ "$pwd" == "$home/"* ]] && pwd="~${pwd:$size}"
if [[ -n "$BASH_VERSION" ]]; then
local IFS="/"
local elements=($pwd)
local length=${#elements[@]}
for ((i=0;i<length-1;i++)); do
local elem=${elements[$i]}
if [[ ${#elem} -gt 1 ]]; then
elements[$i]=${elem:0:1}
fi
done
else
local elements=("${(s:/:)pwd}")
local length=${#elements}
for i in {1..$((length-1))}; do
local elem=${elements[$i]}
if [[ ${#elem} > 1 ]]; then
elements[$i]=${elem[1]}
fi
done
fi
local IFS="/"
echo "${elements[*]}"
}
# Set PROMPT
SHORT_HOST=`echo $HOSTNAME | rev | cut -d '-' -f 1 | rev`
PS_TIME_COLOR=$(PSC_RGB 208)
if [ $UID = 0 ] ; then
# if user is root, use Red time
PS_TIME_COLOR=$PSC_RED
elif [ "$USER" = "kenyroj" -o "$USER" = "aken.hsu" ] ; then
# if user ID is aken.hsu or kenyroj, use Yellow time
PS_TIME_COLOR=$PSC_YLW
elif [ "$USER" = "mdtuser" ] ; then
# if user ID is mdtuser, use Purple time
PS_TIME_COLOR=$PSC_PUP
elif [ "$USER" = "mdtadmin" ] ; then
PS_TIME_COLOR=$PSC_WHT
else
# Other users, use Blue time
PS_TIME_COLOR=$PSC_BLU
fi
export PS1='${?/#0/}'"${PSC_LAK}${SHORT_HOST}${PSC_NON}:${PSC_GRN}\w${PSC_NON}[$PS_TIME_COLOR\A${PSC_NON}] "
#PS1="\[\e]0;\u@\h\a\]$PS1" # Change the putty title
function ExeCmd() {
CMD=$*
echo ${COL_GRN}" ==>" ${COL_YLW}${CMD}${COL_NON}
${CMD}
}
# For android build ccache
#export USE_CCACHE=1
#export CCACHE_DIR=/mnt/nfs/CCache
| true
|
47fab1ae31672fa617342be1b0823e189ce41b2a
|
Shell
|
Brundeco/kaa-gent-html-mail
|
/post_deployment_staging.sh
|
UTF-8
| 1,302
| 3.421875
| 3
|
[] |
no_license
|
# Versioned post-deployment commands
# To run this, use `bash post_deployment_staging.sh`
# Atomic commands are marked using (A)
# TODO: document extra commands or project-specific needs
# Install nvm, or try to update if already installed
echo "Installing or updating nvm ..."
curl -o- -s https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash
# This loads nvm, .bash_profile is owned by root, so we have to call this manually
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
# Install node version according to .nvmrc, this implies "nvm use"
echo "Installing node version according to .nvmrc ..."
nvm install
# Install npm dependencies
echo "Installing npm dependencies..."
npm install --no-optional --silent
# Rebuild in case we switched npm versions
echo "Rebuilding npm dependencies..."
npm rebuild --silent > /dev/null
# Npm production build, suppress all output (it's A LOT)
echo "Building assets..."
npm run --quiet prod > /dev/null 2>&1
# If npm returned a non-zero exit code, fail the whole deploy. $? contains the exit code of the last command executed.
([ $? -eq 0 ] && echo "Npm build successful") || \
{ echo "Something went wrong during npm run prod. Check npm-debug.log for more details."; exit 1; }
echo "Your changes are now live."
| true
|
f0a4d0b1c10a3534ec7cb47eb8a2cd60f7ba7681
|
Shell
|
yaakov-h/CRBoilerplate
|
/BuildScript.sh
|
UTF-8
| 798
| 3.328125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
PROJECT_NAME='CRBoilerplate'
SDKS="iphoneos iphonesimulator"
BUILD_ROOT='build'
TARGET_NAME="${PROJECT_NAME}"
PROJECT_FILE="${PROJECT_NAME}.xcodeproj"
BUILD_TMP="${BUILD_ROOT}/tmp"
pushd .
if [ ! -d "${BUILD_TMP}" ]
then
mkdir -p "${BUILD_TMP}"
fi
for SDK in ${SDKS}
do
BUILD_DIR="${BUILD_TMP}/build-sdk-${SDK}"
mkdir "${BUILD_DIR}"
xcodebuild -project "${PROJECT_FILE}" -target "${TARGET_NAME}" -sdk "${SDK}" CONFIGURATION_BUILD_DIR="${BUILD_DIR}"
done
FINAL_DIR="${BUILD_ROOT}"
OUTPUT_STATIC_LIB=$(find . -name "*${TARGET_NAME}.a" -exec basename "{}" \; | uniq)
lipo -output "${BUILD_ROOT}/${OUTPUT_STATIC_LIB}" -create $(find "${BUILD_TMP}" -name "*${OUTPUT_STATIC_LIB}")
cp -R "${BUILD_TMP}/$(ls build/tmp/ | head -n1)/include" "${BUILD_ROOT}/include"
popd
| true
|
6d85b49b13c88cebff8159f0de596e5ce30059e0
|
Shell
|
skimhellmuth/Cell-type-ieQTL-pipeline
|
/Run_eigenmt.sh
|
UTF-8
| 1,354
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
## Script uses modified version of eigenmt to match francois' pipeline of generating final output files
## usage: module load python/3.7.1; trait=Neutrophils; for i in 1; do tissue=$(sed -n ''$i'p' tissues_${trait}.txt); qsub -cwd -V -j y -p -10 -N ei.${tissue} -pe smp 23 -l h_rt=18:00:00 -l mem=100G -m a -M skim@nygenome.org ./submit_eigenmt_py3.sh ${tissue} ${trait} ieqtl; done
set -eo pipefail
TISSUE=$1
CELLTYPE=$2
TYPE=$3
PREFIX=${TISSUE}_${CELLTYPE}_${TYPE}
MPATH=/gpfs/commons/groups/lappalainen_lab/skim/gtex_eqtl/v8/tensorqtl
FPATH=${CELLTYPE}_${TYPE}
EMTFPATH=/gpfs/commons/groups/lappalainen_lab/data/gtex/v8/eigenmt
## prepare directories
cd ${MPATH}/${FPATH}/
mkdir -p eqtls_final
python3 /gpfs/commons/groups/lappalainen_lab/skim/gtex_eqtl/v8/tensorqtl/run_eigenmt.py \
-q ${PREFIX}.cis_qtl_pairs.chr*.parquet \
-i /gpfs/commons/groups/lappalainen_lab/skim/gtex_eqtl/v8/tensorqtl/${CELLTYPE}_ieqtl/${TISSUE}*.eigenMT_input.txt.gz \
-g /gpfs/commons/groups/lappalainen_lab/data/gtex/v8/eigenmt/genotypes/* \
-gp /gpfs/commons/groups/lappalainen_lab/data/gtex/v8/eigenmt/positions/* \
-p /gpfs/commons/groups/lappalainen_lab/data/gtex/v8/eigenmt/phenotypes/${TISSUE}.txt.gz \
-o eqtls_final/ \
-x ${PREFIX} \
-s /gpfs/commons/groups/lappalainen_lab/skim/gtex_eqtl/v8/sample_lists/${TISSUE}.v8.samplelist.txt \
--parallel 23
exit
| true
|
cad9f3f74654f07ce05d4f63d19a97c9052480c8
|
Shell
|
alvin0918/gin_api
|
/shell/start.sh
|
UTF-8
| 481
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
HOME=`pwd`
STR="{$HOME}/reptile"
PID=`ps -ef | grep "reptile" | grep -v grep | awk '{print $2}'`
case "$1" in
"-c")
$STR = "{$STR} -c {$2}"
;;
"start")
$STP
;;
"stop")
kill -15 $PID
;;
"restart")
kill -s SIGUSR1 $PID
;;
esac
case "$3" in
"start")
$STP
;;
"stop")
kill -15 $PID
;;
"restart")
kill -s SIGUSR1 $PID
;;
esac
| true
|
f3a9cb8c747ae476d79bbcdc16784070b682cbb2
|
Shell
|
michaeljsmalley/osx-setup-scripts
|
/xcode_uninstall.sh
|
UTF-8
| 578
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
info="[info]"
warning="[warning]"
error="[error]"
appdir="/Applications"
check_root() {
if [[ $EUID -ne 0 ]]; then
echo "$error This script must be run as root" 1>&2
exit 1
fi
}
uninstall_app() {
[ ! -d $appdir/Xcode.app ] && echo -e "$info Nothing to remove. Exiting..." && exit 0
echo -e "$info Xcode.app installed. Removing..."
# Need to be at root
cd /
rm -rf $appdir/Xcode.app
echo "$info Xcode.app has been removed."
}
# Make sure only root can run our script
check_root
# Uninstall Xcode.app
uninstall_app
| true
|
8f7622c3a703a2a743a16226776b754f66151481
|
Shell
|
lunaruan/flow
|
/tests/lazy-mode-ide-dependency/test.sh
|
UTF-8
| 583
| 2.625
| 3
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
printf "\\nServer should start in fs lazy mode\\n"
start_flow . --lazy-mode fs
assert_ok "$FLOW" status --strip-root
printf "\\nFocus a file\\n"
assert_ok "$FLOW" force-recheck --focus focused.js
assert_errors "$FLOW" status --strip-root
printf "\\nEdit a dependency\\n"
cp tmp1/dependency.js dependency.js
assert_ok "$FLOW" force-recheck dependency.js
assert_ok "$FLOW" status --strip-root
printf "\\nRevert edit\\n"
cp tmp2/dependency.js dependency.js
assert_ok "$FLOW" force-recheck dependency.js
assert_errors "$FLOW" status --strip-root
assert_ok "$FLOW" stop
| true
|
e88b8afdf7ac8d151ab339b016c0a2d04d933feb
|
Shell
|
GoCodingIcreated/newspapper
|
/alert/telegram/send_alarm.sh
|
UTF-8
| 1,022
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIRNAME=$(dirname $0)
TOKEN=$(cat $DIRNAME/token.txt)
# curl -s https://api.telegram.org/bot$TOKEN/
# My chat id
CHAT_ID=128578085
MESSAGE=$1
if [ -z "$MESSAGE" ]; then
echo "ERROR: Empty message."
exit 1
fi
MESSAGE=$(echo "$MESSAGE" | sed 's/"/\\"/g' \
| sed 's/\[/\\\\[/g' | sed 's/\]/\\\\]/g' | sed 's/\\\[\(http.*\)\\\]/[\1]/g' \
| sed 's/\./\\\\\./g' | sed 's/\-/\\\\\-/g' | sed 's/!/\\\\!/g' | sed 's/(/\\\\(/g' | sed 's/)/\\\\)/g' | sed 's/\\(\(http.*\)\\)/(\1)/g')
echo "DEBUG: SEND_ALARM: MESSAGE: $MESSAGE"
res=$(curl -s -X POST \
-H 'Content-Type: application/json' \
-d "{\"chat_id\": \"$CHAT_ID\", \"text\": \"$MESSAGE\", \"disable_notification\": true, \"parse_mode\": \"MarkdownV2\"}" \
https://api.telegram.org/bot$TOKEN/sendMessage)
echo "$res"
echo
IS_OK=$(echo "$res" | jq ".ok" | grep "true")
if [ -z "$IS_OK" ]; then
echo "ERROR: Message was not accepted by telegram."
exit 1
else
echo "INFO: Message was accepted by telegram."
exit 0
fi
| true
|
519128189a69fbef53596c03fcd1cd2116e1d91a
|
Shell
|
nemtsov/requirejs-project-template
|
/build.sh
|
UTF-8
| 246
| 3.328125
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
function usage {
echo "Usage: $0 <project-name>"
}
if [ -z "$1" ]; then
usage
exit 1
fi
if [ ! -d "$1" ]; then
echo "$0: project '$1' does not exist"
usage
exit 1
fi
./requirejs/build/build.sh $1/scripts/app.build.js
| true
|
f6b1313b12bb8bf369a9ad703970c56ffd2bbef6
|
Shell
|
ekstremedia/timelapse
|
/tools/canon-dslr-hook-script
|
UTF-8
| 549
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
self=`basename $0`
outdir=images
outfilename=$SHOTNAME # get from environment vars
mkdir -p "$outdir"
case "$ACTION" in
init)
#echo "$self: INIT"
# exit 1 # non-null exit to make gphoto2 call fail
;;
start)
#echo "$self: START"
;;
download)
#echo "$self: DOWNLOAD"
ext="${ARGUMENT##*.}"
echo mv "$ARGUMENT" to "$outfilename.$ext"
mv "$ARGUMENT" "$outdir/$outfilename.$ext"
#chmod 777 $outdir_tmp/$outfilename
;;
stop)
#echo "$self: STOP"
;;
*)
echo "$self: Unknown action: $ACTION"
;;
esac
exit 0
| true
|
1c1ddb131d0ea76c5a7ff068ddc47d12ac143c84
|
Shell
|
DeikeLab/multilayer
|
/working_example/produce.sh
|
UTF-8
| 338
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# This is the compile script
OUTFOLDER="./f$1"
# Note that space is needed between = and []
if [ "$2" = "clean" ]
then
rm -r $OUTFOLDER
echo "Directory cleaned!"
else
mkdir -p $OUTFOLDER
make -f Makefile OUTPUT=$1
#rm the copy of the source code to avoid version error
#rm ./$1.c
#rm ./_$1.c
fi
| true
|
a254ac6b0e1049a44c4ecbb67fc66dd44238eacc
|
Shell
|
aivarasko/k8s-from-scratch
|
/1_make_binaries.sh
|
UTF-8
| 7,250
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
[[ -z "${TRACE:-}" ]] || set -o xtrace
source config.sh
function install_go() {
APP='go'
tmp_dir=$(mktemp -d -t go-XXXXXXXXXX)
wget https://dl.google.com/go/go"${GO_VERSION}".linux-amd64.tar.gz -O "${tmp_dir}/go${GO_VERSION}.linux-amd64.tar.gz"
cp sha256sum "${tmp_dir}/"
pushd "${tmp_dir}/"
sha256sum -c sha256sum
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GO_VERSION}"
sudo tar xvfz go"${GO_VERSION}".linux-amd64.tar.gz -C "${K8SFS_CACHE_LOCATION}/${APP}/${GO_VERSION}"
popd
# [ -d "${K8SFS_CACHE_LOCATION}/go/current" ] && sudo rm "${K8SFS_CACHE_LOCATION}/go/current"
# sudo ln -s "${K8SFS_CACHE_LOCATION}/go/go${GO_VERSION}/go" "${K8SFS_CACHE_LOCATION}/go/current"
}
[ ! -d "${K8SFS_CACHE_LOCATION}/go/${GO_VERSION}" ] && install_go
# Go does not exists yet in the target, use cached version
export PATH="${K8SFS_CACHE_LOCATION}/go/${GO_VERSION}/go/bin:$PATH"
function install_etcd() {
APP='etcd'
GIT_VERSION="${ETCD_GIT_VERSION}"
GIT_LOCATION="github.com/etcd-io/etcd"
go get "${GIT_LOCATION}" || true
pushd "${GOPATH}/src/${GIT_LOCATION}"
git checkout "${GIT_VERSION}"
make clean
go mod vendor
make
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin"
sudo cp bin/* "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin/"
popd
# [ -d "${K8SFS_CACHE_LOCATION}/${APP}/current" ] && sudo rm "${K8SFS_CACHE_LOCATION}/${APP}/current"
# sudo ln -s "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}" "${K8SFS_CACHE_LOCATION}/${APP}/current"
}
[ ! -d "${K8SFS_CACHE_LOCATION}/etcd/${ETCD_GIT_VERSION}" ] && install_etcd
function install_kubernetes() {
APP='kubernetes'
GIT_VERSION="${KUBERNETES_GIT_VERSION}"
GIT_LOCATION="github.com/kubernetes/kubernetes"
go get "${GIT_LOCATION}" || true
pushd "${GOPATH}/src/${GIT_LOCATION}"
git checkout "${GIT_VERSION}"
make clean
make
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin"
sudo cp _output/bin/* "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin/"
popd
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${GIT_VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
}
[ ! -d "${K8SFS_CACHE_LOCATION}/kubernetes/${KUBERNETES_GIT_VERSION}" ] && install_kubernetes
function install_cfssl() {
APP='cfssl'
GIT_VERSION="${CFSSL_GIT_VERSION}"
GIT_LOCATION="github.com/cloudflare/cfssl"
go get "${GIT_LOCATION}" || true
pushd "${GOPATH}/src/${GIT_LOCATION}"
git checkout "${GIT_VERSION}"
make clean
make
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin"
sudo cp bin/* "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin/"
popd
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${GIT_VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
}
[ ! -d "${K8SFS_CACHE_LOCATION}/cfssl/${CFSSL_GIT_VERSION}" ] && install_cfssl
function install_runc() {
APP='runc'
GIT_VERSION="${RUNC_GIT_VERSION}"
GIT_LOCATION="github.com/opencontainers/runc"
go get "${GIT_LOCATION}" || true
pushd "${GOPATH}/src/${GIT_LOCATION}"
git checkout "${GIT_VERSION}"
make clean
make BUILDTAGS='seccomp apparmor'
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin"
sudo cp runc "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin/"
# sudo cp runc /usr/local/sbin/
popd
# TODO: do not overwrite runc, better update containerd config to use correct runc location
sudo ln -fs /opt/k8sfs/runc/v1.0.0-rc92/bin/runc /usr/local/sbin/runc
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${GIT_VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
}
[ ! -d "${K8SFS_CACHE_LOCATION}/runc/${RUNC_GIT_VERSION}" ] && install_runc
function install_containerd() {
APP='containerd'
GIT_VERSION="${CONTAINERD_GIT_VERSION}"
GIT_LOCATION="github.com/containerd/containerd"
go get "${GIT_LOCATION}" || true
pushd "${GOPATH}/src/${GIT_LOCATION}"
git checkout "${GIT_VERSION}"
make clean
make
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin"
sudo cp bin/* "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin/"
popd
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${GIT_VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
}
[ ! -d "${K8SFS_CACHE_LOCATION}/containerd/${CONTAINERD_GIT_VERSION}" ] && install_containerd
function install_cni() {
APP='cni'
GIT_VERSION="${CNI_GIT_VERSION}"
GIT_LOCATION="github.com/containernetworking/plugins"
go get "${GIT_LOCATION}" || true
pushd "${GOPATH}/src/${GIT_LOCATION}"
git checkout "${GIT_VERSION}"
./build_linux.sh
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin"
sudo cp bin/* "${K8SFS_CACHE_LOCATION}/${APP}/${GIT_VERSION}/bin/"
popd
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${GIT_VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# Default /etc/containerd/config.toml path
# sudo mkdir -p /opt/"${APP}"
# [ ! -d /opt/"${APP}"/bin ] && sudo ln -s ${K8SFS_TARGET_LOCATION}/"${APP}"/"${GIT_LOCATION}"/bin /opt/"${APP}"/bin
echo "cni done"
}
[ ! -d "${K8SFS_CACHE_LOCATION}/cni/${CNI_GIT_VERSION}" ] && install_cni
function install_sonobuoy() {
APP='sonobuoy'
VERSION="${SONOBUOY_VERSION}"
LOCATION="https://github.com/vmware-tanzu/sonobuoy/releases/download/v${VERSION}/sonobuoy_${VERSION}_linux_amd64.tar.gz"
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${VERSION}/bin"
pushd "${K8SFS_CACHE_LOCATION}/${APP}/${VERSION}/bin"
sudo wget "${LOCATION}"
sudo tar xvfz sonobuoy_"${VERSION}"_linux_amd64.tar.gz sonobuoy
sudo rm sonobuoy_"${VERSION}"_linux_amd64.tar.gz
sudo chmod +x sonobuoy
popd
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
}
[ ! -d "${K8SFS_CACHE_LOCATION}/sonobuoy/${SONOBUOY_VERSION}" ] && install_sonobuoy
function install_crictl() {
APP='crictl'
VERSION="${CRICTL_VERSION}"
LOCATION="https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz"
sudo mkdir -p "${K8SFS_CACHE_LOCATION}/${APP}/${VERSION}/bin"
pushd "${K8SFS_CACHE_LOCATION}/${APP}/${VERSION}/bin"
sudo wget "${LOCATION}"
sudo tar xvfz crictl-"${VERSION}"-linux-amd64.tar.gz crictl
sudo rm crictl-"${VERSION}"-linux-amd64.tar.gz
sudo chmod +x crictl
popd
# [ -d ${K8SFS_CACHE_LOCATION}/"${APP}"/current ] && sudo rm ${K8SFS_CACHE_LOCATION}/"${APP}"/current
# sudo ln -s ${K8SFS_CACHE_LOCATION}/"${APP}"/"${VERSION}" ${K8SFS_CACHE_LOCATION}/"${APP}"/current
}
[ ! -d "${K8SFS_CACHE_LOCATION}/crictl/${CRICTL_VERSION}" ] && install_crictl
sudo rsync -r "${K8SFS_CACHE_LOCATION}"/* "${K8SFS_TARGET_LOCATION}/"
exit 0
# vim: ts=2 sw=2 et
| true
|
ef2e13dba87be3c1730e12dd569293482dbf7558
|
Shell
|
mendhak/pash
|
/compiler/pash_runtime_quick_abort.sh
|
UTF-8
| 11,061
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## File directory
RUNTIME_DIR=$(dirname "${BASH_SOURCE[0]}")
still_alive()
{
jobs -p | tr '\n' ' '
}
log()
{
pash_redir_output echo "$$: (QAbort) " "$@"
}
# Taken from: https://stackoverflow.com/a/20473191
# list_include_item "10 11 12" "2"
function list_include_item {
local list="$1"
local item="$2"
if [[ $list =~ (^|[[:space:]])"$item"($|[[:space:]]) ]] ; then
# yes, list include item
result=0
else
result=1
fi
return $result
}
## This spawns a buffer command to buffer inputs and outputs
##
## It writes the pid to stdout
spawn_eager()
{
local name=$1
local input=$2
local output=$3
local eager_file="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
## Note: Using eager actually leads to some deadlock issues. It must have to do with eagers behavior when
## its input or output closes.
# "$RUNTIME_DIR/../runtime/eager" "$input" "$output" "$eager_file" </dev/null 1>/dev/null 2>/dev/null &
"$RUNTIME_DIR/../runtime/dgsh_tee.sh" "$input" "$output" -I -f </dev/null 1>/dev/null 2>/dev/null &
local eager_pid=$!
log "Spawned $name eager: $eager_pid with:"
log " -- IN: $input"
log " -- OUT: $output"
log " -- INTERM: $eager_file"
echo "$eager_pid"
}
## Kills the process group that belongs to the given pgid
kill_pg()
{
local pg_lead_pid=$1
/bin/kill -15 "-${pg_lead_pid}" 2> /dev/null
}
## TODO: Make sure that this waits for all processes in the process group to finish executing.
wait_pg()
{
local pg_lead_pid=$1
wait "$pg_lead_pid" 2> /dev/null
}
kill_wait_pg()
{
kill_pg "$1"
wait_pg "$1"
}
## Solution Schematic:
##
## (A) (B) (C) (D) (E)
## stdin --- tee --- eager --- seq.sh --- eager --- OUT_SEQ
## \ (F)
## \--- eager --- PAR_IN
##
## (1) If compiler fails, or sequential is done executing:
## - cat OUT_SEQ > stdout
##
## (2) If compiler succeeds:
## - USR1 to reroute so that it redirects to /dev/null
## - PAR_IN redirect to par stdin.
##
## Simplifying assumptions:
## - Not worrying about stderr
## - Not worrying about other inputs at the moment (assuming they are files if compiler succeeds)
## - Not worrying about other outputs
## + assuming that the parallel implementation will overwrite them
## + Assuming that the DFG outputs are not appended
##
## TODO: A first TODO would be to check them in the compilation process
##
## TODO: An alternative TODO would be to let preprocessing give us information about them, allowing us to
## have a finer tuned execution plan depending on this information. For example, if we see that script
## has append to some file we can be carefull and buffer its output using eager.
## NOTE: The intuition about why quick-abort works is that if the compilation succeeds, then the
## script is a DFG, meaning that we know exactly how it affects its environment after completing.
## Therefore, we can go back and stop the already running script without risking unsafe behavior.
## TODO: We also want to avoid executing the compiled script if it doesn't contain any improvement.
## TODO: Maybe the reroute needs to be put around (C) and not (D)
## TODO: Improve the happy path (very fast sequential) execution time
## TODO: Use reroute around dgsh_tees to make sure that they do not use storage unnecessarily
## (if their later command is done).
if [ "$pash_execute_flag" -eq 1 ]; then
# set -x
## (A) Redirect stdin to `tee`
pash_tee_stdin="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
mkfifo "$pash_tee_stdin"
## The redirections below are necessary to ensure that the background `cat` reads from stdin.
{ setsid cat > "$pash_tee_stdin" <&3 3<&- & } 3<&0
pash_input_cat_pid=$!
log "Spawned input cat with pid: $pash_input_cat_pid"
## (B) A `tee` that duplicates input to both the sequential and parallel
pash_tee_stdout1="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
pash_tee_stdout2="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
mkfifo "$pash_tee_stdout1" "$pash_tee_stdout2"
tee "$pash_tee_stdout1" > "$pash_tee_stdout2" < "$pash_tee_stdin" &
## (C) The sequential input eager
pash_seq_eager_output="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
mkfifo "$pash_seq_eager_output"
seq_input_eager_pid=$(spawn_eager "sequential input" "$pash_tee_stdout1" "$pash_seq_eager_output")
## (D) Sequential command
pash_seq_output="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
mkfifo "$pash_seq_output"
setsid "$RUNTIME_DIR/pash_wrap_vars.sh" \
$pash_runtime_shell_variables_file \
$pash_output_variables_file \
${pash_output_set_file} \
${pash_sequential_script_file} \
> "$pash_seq_output" < "$pash_seq_eager_output" &
pash_seq_pid=$!
log "Sequential pid: $pash_seq_pid"
## (E) The sequential output eager
pash_seq_eager2_output="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
mkfifo "$pash_seq_eager2_output"
seq_output_eager_pid=$(spawn_eager "sequential output" "$pash_seq_output" "$pash_seq_eager2_output")
## (F) Second eager
pash_par_eager_output="$($RUNTIME_DIR/pash_ptempfile_name.sh)"
mkfifo "$pash_par_eager_output"
par_eager_pid=$(spawn_eager "parallel input" "$pash_tee_stdout2" "$pash_par_eager_output")
## Run the compiler
setsid python3 "$RUNTIME_DIR/pash_runtime.py" ${pash_compiled_script_file} --var_file "${pash_runtime_shell_variables_file}" "${@:2}" &
pash_compiler_pid=$!
log "Compiler pid: $pash_compiler_pid"
## Wait until one of the two (original script, or compiler) die
alive_pids=$(still_alive)
log "Still alive: $alive_pids"
while `list_include_item "$alive_pids" "$pash_seq_pid"` && `list_include_item "$alive_pids" "$pash_compiler_pid"` ; do
## Wait for either of the two to complete
wait -n "$pash_seq_pid" "$pash_compiler_pid"
completed_pid_status=$?
log "Process exited with return code: $completed_pid_status"
alive_pids=$(still_alive)
log "Still alive: $alive_pids"
done
## If the sequential is still alive we want to see if the compiler succeeded
if `list_include_item "$alive_pids" "$pash_seq_pid"` ; then
pash_runtime_return_code=$completed_pid_status
log "Compilation was done first with return code: $pash_runtime_return_code"
## We only want to run the parallel if the compiler succeeded.
if [ "$pash_runtime_return_code" -eq 0 ]; then
## TODO: Is this necessary
## Redirect the sequential output to /dev/null
cat "$pash_seq_eager2_output" > /dev/null &
seq_cat_pid=$!
log "seq to /dev/null cat pid: $seq_cat_pid"
## Kill the sequential process tree
log "Killing sequential pid: $pash_seq_pid..."
kill_pg "$pash_seq_pid"
kill_status=$?
wait_pg "$pash_seq_pid"
seq_exit_status=$?
log "Sequential pid: $pash_seq_pid was killed successfully returning status $seq_exit_status."
log "Still alive: $(still_alive)"
## If kill failed it means it was already completed,
## and therefore we do not need to run the parallel.
##
## TOOD: Enable this optimization
if true || [ "$kill_status" -eq 0 ]; then
## (2) Run the parallel
log "Run parallel:"
log " -- Runtime vars: $pash_runtime_shell_variables_file"
log " -- Output vars: $pash_output_variables_file"
log " -- Output set: ${pash_output_set_file}"
log " -- Compiled script: ${pash_compiled_script_file}"
log " -- Input: $pash_par_eager_output"
"$RUNTIME_DIR/pash_wrap_vars.sh" \
$pash_runtime_shell_variables_file \
$pash_output_variables_file \
${pash_output_set_file} \
${pash_compiled_script_file} \
< "$pash_par_eager_output" &
## Note: For some reason the above redirection used to create some issues,
## but no more after we started using dgsh-tee
pash_par_pid=$!
log "Parallel is running with pid: $pash_par_pid..."
# strace -p $pash_par_pid 2>> $PASH_REDIR
wait "$pash_par_pid"
pash_runtime_final_status=$?
log "Parallel is done with status: $pash_runtime_final_status"
else
## TODO: Handle that case properly by enabling the optimization above.
log "ERROR: Shouldn't have reached that"
exit 1
fi
else
## If the compiler failed we just wait until the sequential is done.
## (1) Redirect the seq output to stdout
cat "$pash_seq_eager2_output" &
seq_output_cat_pid=$!
log "STDOUT cat pid: $seq_output_cat_pid"
log "Waiting for sequential: $pash_seq_pid"
wait "$pash_seq_pid"
pash_runtime_final_status=$?
log "DONE Sequential: $pash_seq_pid exited with status: $pash_runtime_final_status"
## TODO: It is not clear if we also need to wait for the output cat to end.
log "Waiting for sequential output cat: $seq_output_cat_pid"
wait "$seq_output_cat_pid"
log "DONE Waiting for sequential output cat: $seq_output_cat_pid"
fi
else
pash_runtime_final_status=$completed_pid_status
log "Sequential was done first with return code: $pash_runtime_final_status"
## (1) Redirect the seq output to stdout
cat "$pash_seq_eager2_output" &
final_cat_pid=$!
log "STDOUT cat pid: $final_cat_pid"
## We need to kill the compiler to not get delayed log output
## If this fails (meaning that compilation is done) we do not care
kill_wait_pg "$pash_compiler_pid"
wait "$final_cat_pid"
fi
## TODO: Not clear if this is needed or if it doesn indeed kill all the
## processes and cleans up everything properly
## Kill the input process
log "Killing the input cat process: $pash_input_cat_pid"
kill_wait_pg "$pash_input_cat_pid"
# kill -9 $pash_input_cat_pid 2> /dev/null
# wait $pash_input_cat_pid 2> /dev/null
log "The input cat: $pash_input_cat_pid died!"
## TODO: This (and the above) should not be needed actually, everything should be already done due to
## sequential and parallel both having exited.
## Kill every spawned process
still_alive_pids="$(still_alive)"
log "Killing all the still alive: $still_alive_pids"
kill -15 "$still_alive_pids" 2> /dev/null
wait $still_alive_pids 2> /dev/null
log "All the alive pids died: $still_alive_pids"
## Return the exit code
(exit "$pash_runtime_final_status")
fi
| true
|
74c49e6f7c5c4af212b2690fa03e9dcba928daaf
|
Shell
|
hubrigant/dotfiles
|
/config_masters/zshrc
|
UTF-8
| 4,996
| 2.96875
| 3
|
[] |
no_license
|
DEBUG=${DEBUG:-false}
if $DEBUG; then
echo "-------------------------" >> ~/tmp/loginpaths.txt
echo "zshrc> Start of execution" >> ~/tmp/loginpaths.txt
echo "zshrc> PATH: ${PATH}" >> ~/tmp/loginpaths.txt
echo "zshrc> path: ${path}" >> ~/tmp/loginpaths.txt
fi
# export TRY_THEMING=p10k
# enable powerlevel10k's instant prompt feature
if [[ -r ${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh ]]; then
source ${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh
fi
# begin section 1
# This first section adapted from https://www.outcoldman.com/en/archive/2015/09/13/keep-your-sh-together/
# If I see that zsh takes to much time to load I profile what has been changed,
# I want to see my shell ready in not more than 1 second
PROFILING=${PROFILING:-false}
if $PROFILING; then
zmodload zsh/zprof
fi
# Location of my dotfiles
DOTFILES=$HOME/.dotfiles
# Speed up load time
DISABLE_UPDATE_PROMPT=true
# Execute code in the background to not affect the current session
{
# Compile zcompdump, if modified, to increase startup speed.
zcompdump="${ZDOTDIR:-$HOME}/.zcompdump"
if [[ -s "$zcompdump" && (! -s "${zcompdump}.zwc" || "$zcompdump" -nt "${zcompdump}.zwc") ]]; then
zcompile "$zcompdump"
fi
} &!
# Perform compinit only once a day
autoload -Uz compinit
# if [ $(date +'%j') != $(stat -f '%Sm' -t '%j' ~/.zcompdump) ]; then
if [[ -n ${ZDOTDIR}/.zcompdump(#qN.mh+24) ]]; then
compinit
else
compinit -C
fi
# if you want red dots to be displayed while waiting for completion
COMPLETION_WAITING_DOTS="true"
# tmux plugin settings
ZSH_TMUX_AUTOSTART_ONCE=true
ZSH_TMUX_FIXTERM=true
ZSH_TMUX_AUTOQUIT=false # tells TMUX not to close terminal once tmux exits
# set DEFAULT_USER based on ${USERNAME} variable
DEFAULT_USER=${USERNAME}
# end section 1
# use vi bindings
bindkey -v
# Uncomment the following line to use case-sensitive completion.
CASE_SENSITIVE="true"
# Uncomment the following line to disable auto-setting terminal title.
DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
[[ ! -f ${HOME}/.theming.zsh ]] || source ~/.theming.zsh
# dumb terminal can be a vim dump terminal in that case don't try to load plugins
if [ ! $TERM = dumb ]; then
# ZGEN_DIR=${HOME}/.zgen
source ${DOTFILES}/plugins/autojump/autojump.plugin.zsh
source ${DOTFILES}/plugins/my-aliases/my-aliases.plugin.zsh
# source ${DOTFILES}/plugins/my-brew/my-brew.plugin.zsh
# source ${DOTFILES}/plugins/my-tmux/my-tmux.plugin.zsh
source ${DOTFILES}/plugins/sudo/sudo.plugin.zsh
# source ${DOTFILES}/plugins/taskwarrior/taskwarrior.plugin.zsh
# source ${DOTFILES}/plugins/tmuxinator/tmuxinator.plugin.zsh
# source ${DOTFILES}/plugins/tpm/tpm.plugin.zsh
# source ${DOTFILES}/plugins/vi-mode/vi-mode.plugin.zsh
# source ${DOTFILES}/plugins/vundle/vundle.plugin.zsh
# source ${DOTFILES}/plugins/zsh-autosuggestions/zsh-autosuggestions.plugin.zsh
# source ${DOTFILES}/plugins/zsh-completions/zsh-completions.plugin.zsh
fi
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
# additional configuration for zsh
# Remove the history (fc -l) command from the history list when invoked.
setopt histnostore
# Remove superfluous blanks from each command line being added to the history list.
setopt histreduceblanks
# Do not exit on end-of-file. Require the use of exit or logout instead.
# setopt ignoreeof
# Print the exit value of programs with non-zero exit status.
# setopt printexitvalue
# Do not share history
# setopt no_share_history
# Save history between sessions
HISTFILE=~/.zsh_histfile
HISTSIZE=1000
SAVEHIST=1000
setopt appendhistory
# load local tmux configurations
tmux source-file ~/.tmux-local.conf
# specific for machine configuration, which I don't sync
if [ -f ~/.machinerc ]; then
source ~/.machinerc
fi
# Load direnv, which hopefully will speed up prompt generation
# export DIRENV_LOG_FORMAT=
# eval "$(direnv hook zsh)"
# if profiling was on
if $PROFILING; then
zprof
fi
# zsh-syntax-highlighting must be sourced as the last thing in this file
source ~/.dotfiles/plugins/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
if $DEBUG; then
echo "zshrc> PATH: ${PATH}" >> ~/tmp/loginpaths.txt
echo "zshrc> path: ${path}" >> ~/tmp/loginpaths.txt
echo "zshrc> End of execution" >> ~/tmp/loginpaths.txt
echo "-------------------------" >> ~/tmp/loginpaths.txt
fi
# fix up the $path list using my path plugin
source ${DOTFILES}/plugins/my-path/my-path.plugin.zsh
# export NVM_DIR="$HOME/.nvm"
# [ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh" # This loads nvm
# [ -s "/usr/local/opt/nvm/etc/bash_completion.d/nvm" ] && . "/usr/local/opt/nvm/etc/bash_completion.d/nvm" # This loads nvm bash_completion
[[ ! -f ${HOME}/.zshrc-local ]] || source ~/.zshrc-local
fpath+=${ZDOTDIR:-~}/.zsh_functions
| true
|
043f8e87fc3b2ffb4871a90e6753c0ee86487024
|
Shell
|
paul-levy/SF_diversity
|
/batchDescr.sh
|
UTF-8
| 8,944
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
### README
# Have you set the dataList name?
# Have you set the phAdv name?
# Have you set the RVC name?#
# Have you set the descriptive fit name?
# Have you set the modelRecovery status/type?
### Go to descr_fits.py first
# arguments are
# 1 - cell #
# 2 - dispersion (index into the list of dispersions for that cell; not used in descr/DoG fits)
# 3 - data directory (e.g. LGN/ or V1/)
# 4 - make phase advance fits (yes [1], no [0], vec correction for F1 [-1])
# 5 - make RVC fits
# 6 - make RVC f0-only fits
# 7 - which RVC model? (see hf::rvc_fit_name)
# 0 - Movshon/Kiorpes (only for LGN)
# 1 - Naka-Rushton
# 2 - Peirce-adjustment of Naka-Rushton (can super-saturate)
# 8 - make descriptive (DoG) fits (1 or 0)
# 9 - DoG model (flexGauss [0; not DoG] or sach [1] or tony [2])
# 10 - loss type (for DoG fit);
# 1 - lsq
# 2 - sqrt
# 3 - poiss [was previously default]
# 4 - Sach sum{[(exp-obs)^2]/[k+sigma^2]} where
# k := 0.01*max(obs); sigma := measured variance of the response
# 11 - bootstrap fits (0 - no; nBoots[>0] - yes) //see hf.dog_fit for details
# 12 - joint fitting (0 - no; 1 - yes) //see hf.dog_fit for details
# [13 - phase direction (pos or neg)]; default is pos (1); neg (-1); or NEITHER (0)
# [14 - regularization for gain term (>0 means penalize for high gain)] default is 0
### GUIDE (as of 19.11.05)
# V1/ - use dataList_glx.npy, was 35 cells -- now 56 (as of m681)
# V1/ - model recovery (dataList_glx_mr; mr_fitList...), 10 cells
# V1_orig/ - model recovery (dataList_mr; mr_fitList...), 10 cells
# V1_orig/ - standard, 59 cells
# altExp - standard, 8 cells
# LGN/ - standard, 77 cells
source activate lcv-python
########
### NOTES:
### If running only SF descr or RVC-f0 fits, do not need to run separately for all disp
###
########
EXP_DIR=$1
RVC_FIT=$2
DESCR_FIT=$3
BOOT_REPS=$4
if [ "$EXP_DIR" = "V1/" ]; then
if [[ $RVC_FIT -eq 1 ]]; then
## RVCs ONLY with NO phase adjustment (instead, vector correction for F1)
# -- Naka-Rushton
for run in {1..40}
do
python3.6 descr_fits.py $run 0 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 1 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 2 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 3 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
# -- Movshon RVC
python3.6 descr_fits.py $run 0 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 1 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 2 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 3 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
done
wait
for run in {41..81}
do
python3.6 descr_fits.py $run 0 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 1 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 2 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 3 V1/ -1 1 0 1 0 0 2 $BOOT_REPS 0 0 &
# -- Movshon RVC
python3.6 descr_fits.py $run 0 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 1 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 2 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
python3.6 descr_fits.py $run 3 V1/ -1 1 0 0 0 0 2 $BOOT_REPS 0 0 &
done
wait
fi
if [[ $DESCR_FIT -eq 1 ]]; then
# then, just SF tuning (again, vec corr. for F1, not phase adjustment);
# -- responses derived from vecF1 corrections, if F1 responses
for run in {1..40}
do
python3.6 descr_fits.py $run 0 V1/ -1 0 0 1 1 0 2 $BOOT_REPS 0 0 & # flex gauss
python3.6 descr_fits.py $run 0 V1/ -1 0 0 1 1 2 2 $BOOT_REPS 0 0 & # Tony DoG
#python3.6 descr_fits.py $run 0 V1/ -1 0 0 1 1 1 2 $BOOT_REPS 0 0 & # sach DoG
done
wait
for run in {41..81}
do
python3.6 descr_fits.py $run 0 V1/ -1 0 0 1 1 0 2 $BOOT_REPS 0 0 & # flex gauss
python3.6 descr_fits.py $run 0 V1/ -1 0 0 1 1 2 2 $BOOT_REPS 0 0 & # Tony DoG
#python3.6 descr_fits.py $run 0 V1/ -1 0 0 1 1 1 2 $BOOT_REPS 0 0 & # sach DoG
done
wait
fi
fi
if [ "$EXP_DIR" = "V1_orig/" ]; then
if [[ $RVC_FIT -eq 1 ]]; then
for run in {1..30}
do
# V1_orig/ -- rvc_f0 and descr only
python3.6 descr_fits.py $run 0 V1_orig/ -1 0 1 1 0 0 2 $BOOT_REPS 0 &
done
wait
for run in {31..59}
do
# V1_orig/ -- rvc_f0 and descr only
python3.6 descr_fits.py $run 0 V1_orig/ -1 0 1 1 0 0 2 $BOOT_REPS 0 &
done
wait
fi
wait
if [[ $DESCR_FIT -eq 1 ]]; then
for run in {1..30}
do
# then, just SF tuning (again, vec corr. for F1, not phase adjustment);
python3.6 descr_fits.py $run 0 V1_orig/ -1 0 0 1 1 0 2 $BOOT_REPS 0 & # flex. gauss
python3.6 descr_fits.py $run 0 V1_orig/ -1 0 0 1 1 2 2 $BOOT_REPS 0 & # Tony DoG
#python3.6 descr_fits.py $run 0 V1_orig/ -1 0 0 1 1 1 2 $BOOT_REPS 0 & # sach DoG
done
wait
for run in {31..59}
do
# then, just SF tuning (again, vec corr. for F1, not phase adjustment);
python3.6 descr_fits.py $run 0 V1_orig/ -1 0 0 1 1 0 2 $BOOT_REPS 0 & # flex. gauss
python3.6 descr_fits.py $run 0 V1_orig/ -1 0 0 1 1 0 4 $BOOT_REPS 0 & # flex. gauss, sach loss
#python3.6 descr_fits.py $run 0 V1_orig/ -1 0 0 1 1 1 2 $BOOT_REPS 0 & # sach DoG
done
wait
fi
fi
if [ "$EXP_DIR" = "altExp/" ]; then
if [[ $RVC_FIT -eq 1 ]]; then
for run in {1..8}
do
# altExp/ -- rvc_f0 and descr only
python3.6 descr_fits.py $run 0 altExp/ -1 0 1 1 0 0 2 $BOOT_REPS 0 &
done
wait
fi
wait
if [[ $DESCR_FIT -eq 1 ]]; then
for run in {1..8}
do
# then, just SF tuning (again, vec corr. for F1, not phase adjustment);
python3.6 descr_fits.py $run 0 altExp/ -1 0 0 1 1 0 2 $BOOT_REPS 0 & # flex. gauss
python3.6 descr_fits.py $run 0 altExp/ -1 0 0 1 1 2 2 $BOOT_REPS 0 & # Tony DoG
#python3.6 descr_fits.py $run 0 altExp/ -1 0 0 1 1 1 2 $BOOT_REPS 0 & # sach DoG
done
wait
fi
fi
if [ "$EXP_DIR" = "LGN/" ]; then
## LGN - phase adjustment (will be done iff LGN/ 1; not if LGN/ 0 ) and F1 rvc
if [[ $RVC_FIT -eq 1 ]]; then
for run in {1..38}
do
# phase adj
python3.6 descr_fits.py $run 0 LGN/ 1 0 0 0 0 0 3 $BOOT_REPS 0 &
done
wait
for run in {39..77}
do
# phase adj
python3.6 descr_fits.py $run 0 LGN/ 1 0 0 0 0 0 3 $BOOT_REPS 0 &
done
wait
fi
if [[ $RVC_FIT -eq 1 ]]; then
for run in {1..38}
do
# RVC (movshon)
python3.6 descr_fits.py $run 0 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 1 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 2 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 3 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
# RVC (Naka-Rushton)
python3.6 descr_fits.py $run 0 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 1 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 2 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 3 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
done
wait
for run in {39..77}
do
# RVC (movshon)
python3.6 descr_fits.py $run 0 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 1 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 2 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 3 LGN/ 0 1 0 0 0 0 3 $BOOT_REPS 0 &
# RVC (Naka-Rushton)
python3.6 descr_fits.py $run 0 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 1 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 2 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
python3.6 descr_fits.py $run 3 LGN/ 0 1 0 1 0 0 3 $BOOT_REPS 0 &
done
wait
fi
if [[ $DESCR_FIT -eq 1 ]]; then
for run in {1..38}
do
# Descr fits (based on Movshon RVCs)
python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 0 2 $BOOT_REPS 0 & # flex gauss, not joint
python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 2 2 $BOOT_REPS 0 & # Tony DoG, not joint (sqrt)
#python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 1 2 $BOOT_REPS 0 & # sach DoG, not joint (sqrt)
#python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 1 4 $BOOT_REPS 0 & # sach DoG, not joint (sach loss)
done
wait
for run in {39..77}
do
# Descr fits (based on Movshon RVCs)
python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 0 2 $BOOT_REPS 0 & # flex gauss, not joint
python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 2 2 $BOOT_REPS 0 & # Tony DoG, not joint (sqrt)
#python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 1 2 $BOOT_REPS 0 & # sach DoG, not joint (sqrt)
#python3.6 descr_fits.py $run 0 LGN/ 0 0 0 0 1 1 4 $BOOT_REPS 0 & # sach DoG, not joint (sach loss)
done
wait
fi
fi
| true
|
0d1990edf218f1ad1b48c4f59cbf89201d42a1c0
|
Shell
|
trapexit/TempleOS
|
/TempleOSCD/Linux/GodPassage
|
UTF-8
| 319
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#This prints a random Bible passage.
if [ ! -f Bible.TXT ]; then
echo "Downloading Bible.TXT..."
wget http://www.templeos.org/Wb/Home/Wb2/Files/Text/Bible.TXT
echo "Done."
fi
LINE=$(shuf -en 1 {1..100000} --random-source=/dev/urandom)
echo "Line $LINE:"
tail -n $LINE Bible.TXT | head -n 16
| true
|
b7fb0cdf492b46912ddf8c75dbf86479881daf98
|
Shell
|
roman-neuhauser/dotfiles-zsh
|
/.zsh/functions/redir
|
UTF-8
| 786
| 3.671875
| 4
|
[] |
no_license
|
#! zsh function redir
# redir [OPTS] [-0 FILE] [OPTS] [-1 FILE] [OPTS] [-2 FILE] CMD [ARG...]
# OPTS:
# -A open following fds using >
# -a open following fds using >>
# -C open following fds under 'setopt noclobber'
# -c open following fds under 'setopt clobber'
setopt localoptions errreturn nounset
declare -i append=0 o0=0 o1=1 o2=2 ox=-1
declare optname OPTARG OPTIND
while getopts 0:1:2:AaCc optname; do
ox=-1
case $optname in
A) append=0 ;;
a) append=1 ;;
C) setopt noclobber ;;
c) setopt clobber ;;
0) exec {o0}<$OPTARG ;;
1|2)
if (( append )); then
exec {ox}>>$OPTARG
else
exec {ox}>$OPTARG
fi
: ${(P)${:-o$optname}::=$ox}
;;
esac
done; shift $((OPTIND - 1))
"$@" <&${o0} 1>&${o1} 2>&${o2}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.