blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d604d3fe5df2fc86811ed986c8254475ee3e56fa
|
Shell
|
openbsd/src
|
/regress/sys/ffs/tests/link/03.t
|
UTF-8
| 773
| 2.625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# $FreeBSD: src/tools/regression/fstest/tests/link/03.t,v 1.1 2007/01/17 01:42:09 pjd Exp $
desc="link returns ENAMETOOLONG if an entire length of either path name exceeded 1023 characters"
n0=`namegen`
expect 0 mkdir ${name255} 0755
expect 0 mkdir ${name255}/${name255} 0755
expect 0 mkdir ${name255}/${name255}/${name255} 0755
expect 0 mkdir ${path1021} 0755
expect 0 create ${path1023} 0644
expect 0 link ${path1023} ${n0}
expect 0 unlink ${path1023}
expect 0 link ${n0} ${path1023}
expect 0 unlink ${path1023}
expect ENAMETOOLONG link ${n0} ${path1024}
expect 0 unlink ${n0}
expect ENAMETOOLONG link ${path1024} ${n0}
expect 0 rmdir ${path1021}
expect 0 rmdir ${name255}/${name255}/${name255}
expect 0 rmdir ${name255}/${name255}
expect 0 rmdir ${name255}
| true
|
9293679eb23772b17d36ec044eff12cbafc8b46d
|
Shell
|
grahamnscp/soe-scripts
|
/katello-disconnected/mkisofs-dummy
|
UTF-8
| 1,033
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Sample command:
# mkisofs -r -D -graft-points \
# -path-list /var/lib/pulp/working/repos/rhel-server-rhscl-7-rpms-7Server-x86_64/distributors/export_distributor/realized/pulpiso-sR2vBE \
# -o /var/lib/pulp/working/repos/rhel-server-rhscl-7-rpms-7Server-x86_64/distributors/export_distributor/output/rhel-server-rhscl-7-rpms-7Server-x86_64-2015-05-01T10.09-01.iso
# Configure alternatives:
# alternatives --install /usr/bin/mkisofs mkisofs /usr/bin/mkisofs-dummy 120
# alternatives --config mkisofs
# Parse arguments
PATH_LIST=/tmp
ISO_FILE=/tmp/dummy_$$.iso
echo OPTIONS=$*
while [ $# -gt 0 ]
do
case $1 in
-r) ;;
-D) ;;
-graft-points) ;;
-path-list) PATH_LIST=`eval echo $2`; shift ;;
-o) ISO_FILE=`eval echo $2`; shift ;;
(-) shift; break ;;
(-*) echo "$0: error; unrecognised option $1" 1>&2; exit 1 ;;
*) break ;;
esac
shift
done
# make a dummy iso file
echo "Dummy iso: ($ISO_FILE) for content: $PATH_LIST" > $ISO_FILE
exit 0
| true
|
0228b5aba2aeb05a95b2d4a4ec7fbf94fca35269
|
Shell
|
SeldonIO/seldon-core
|
/examples/cicd/cicd-argocd/seldon-core/remove-helm
|
UTF-8
| 299
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset
set -o errexit
set -o pipefail
STARTUP_DIR="$( cd "$( dirname "$0" )" && pwd )"
if [ ! -f "${STARTUP_DIR}/../settings.sh" ]; then
echo "settings.sh not found!"
exit 1
fi
source ${STARTUP_DIR}/../settings.sh
helm reset --kube-context="${KUBE_CONTEXT}"
| true
|
453fa5380c34813f5e3965afcc4f9db731e85016
|
Shell
|
LevyForchh/odin
|
/scripts/bootstrap_deployer
|
UTF-8
| 429
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# bootstrap odin
# assume-role to the correct account
set -e
./scripts/build_lambda_zip
PROJECT_NAME=${PROJECT_NAME:-coinbase/odin}
STEP_NAME=$(echo $PROJECT_NAME | sed 's/\//\-/')
echo $PROJECT_NAME
echo $STEP_NAME
step bootstrap \
-lambda $STEP_NAME \
-step $STEP_NAME \
-states "$(go run odin.go json)" \
-project $PROJECT_NAME \
-config "development"
rm lambda.zip
| true
|
8a1605813d86aa1d46141fcceb6399b0f75c1b80
|
Shell
|
oldayn/video
|
/dashls.sh
|
UTF-8
| 1,610
| 3.03125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# Simple script to convert normal MP4 file
# to fMP4 fragments and DASH+HLS manifest using ffmpeg and bento4
#
# It takes one parameter (MP4 file name)
# and produce output folder with all content inside it
# It leave maximum quality stream MP4 file as reference version
#
# oldayn@gmail.com 2017
#
# no audio, bitrate 5M, original resolution
ffmpeg -i $1 -an -c:v libx264 -x264opts 'keyint=25:min-keyint=25:no-scenecut' -b:v 5M -maxrate 5M -bufsize 2M out5.mp4
# no audio, bitrate 3M, resize to 1280x720
ffmpeg -i $1 -an -c:v libx264 -x264opts 'keyint=25:min-keyint=25:no-scenecut' -b:v 3M -maxrate 3M -bufsize 1M -vf "scale=-1:720" out4.mp4
# audio copy, bitrate 2M, resize to 960x540 (no correct resize for 480)
ffmpeg -i $1 -c:a copy -c:v libx264 -x264opts 'keyint=25:min-keyint=25:no-scenecut' -b:v 2M -maxrate 2M -bufsize 500k -vf "scale=-1:540" out3.mp4
# no audio, bitrate 1M, resize to 640x360
ffmpeg -i $1 -an -c:v libx264 -x264opts 'keyint=25:min-keyint=25:no-scenecut' -b:v 1M -maxrate 1M -bufsize 400k -vf "scale=-1:360" out2.mp4
# no audio, bitrate 500k, resize to 384x216
ffmpeg -i $1 -an -c:v libx264 -x264opts 'keyint=25:min-keyint=25:no-scenecut' -b:v 500k -maxrate 500k -bufsize 200k -vf "scale=-1:216" out1.mp4
# mp4fragment them all
mp4fragment out5.mp4 out5f.mp4
mp4fragment out4.mp4 out4f.mp4
mp4fragment out3.mp4 out3f.mp4
mp4fragment out2.mp4 out2f.mp4
mp4fragment out1.mp4 out1f.mp4
# make split and playlists
bento4/bin/mp4dash --hls --mpd-name=master.mpd out5f.mp4 out4f.mp4 out3f.mp4 out2f.mp4 out1f.mp4
# some cleanup
mv out3.mp4 output/master.mp4
rm out?.mp4
rm out?f.mp4
| true
|
abc143c2a78128d4a201898bf0602cfebd2ffe7f
|
Shell
|
f5devcentral/f5-bigip-image-generator
|
/src/bin/legacy/final_cloud_prepare.sysinit.mfg
|
UTF-8
| 10,189
| 3.5
| 4
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (C) 2018-2022 F5 Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#####################################################################
# /etc/final_cloud_prepare.sysinit.mfg - runs once before selinux labeling during
# image generation.
#
# Perform one-time VADC/Cloud configuration.
# This script is NOT shipped in BIGIP.
# It will be executed once during vm_bundle build task at the last
# boot of the prep VM.
# It will be invoked by rs.sysinit.mfg script immediately before
# SELinux relabeling.
# The VM will be powered off after execution of this script and SELinux
# relabeling are done.
#
#####################################################################
DEBUG=0
# syslog is not available so early in the boot
LOG_ENABLED=0
source /etc/init.d/functions
source /etc/vadc-init/functions
#####################################################################
# Report the error and exit:
#
function err_exit() {
local i
local err_text="$*"
if [ "$err_text" == "" ]; then
err_text="Missing error text"
fi
for i in {1..20}; do echo "" ; done
for i in {1..5}; do echo "############ ERROR #############" ; done
echo "ERROR - $err_text"
for i in {1..20}; do echo "" ; done
exit 1
}
#####################################################################
#####################################################################
function print_user_passwd_shadow() {
local user_name=$1
local f_auto_search=$2
local f
local list_files="/etc/passwd /etc/shadow"
if [ "$user_name" == "" ]; then
err_exit "${FUNCNAME[0]} - missing 1st param"
fi
if [ "$f_auto_search" == "" ]; then
f_auto_search=0
fi
dbg_echo "${FUNCNAME[0]} - user_name=$user_name f_auto_search=$f_auto_search"
if [ $f_auto_search -eq 1 ]; then
list_files=$(find / -name passwd)
list_files="$list_files $(find / -name shadow)"
fi
echo "list_files = $list_files"
for f in $list_files ; do
ls -l $f
echo $f
echo "----------------"
grep "^$user_name" $f
echo ""
done
}
#####################################################################
##################################################################
function dbg_show_env() {
local fVerbose=$1
local d
local f
local list_dirs=""
local list_files=""
if [ $DEBUG -eq 0 ]; then
return
fi
if [ "$fVerbose" == "" ]; then fVerbose=0 ; fi
dbg_echo ""
dbg_echo "==========================="
dbg_echo "env:"
/bin/env
dbg_echo ""
dbg_echo "==========================="
dbg_echo "mount:"
mount
dbg_echo "==========================="
dbg_echo "mapper:"
ls -l /dev/mapper
dbg_echo "==========================="
dbg_echo "Platform files:"
ls -l /lib/f5-platform/id-*
dbg_echo ""
if [ $fVerbose -eq 0 ]; then
return
fi
# All dirs we are interested in:
list_dirs=""
list_dirs="$list_dirs / /etc /shared"
# Show dirs:
for d in $list_dirs ; do
dbg_echo "==========================="
dbg_echo "ls -l $d"
ls -l $d
done
dbg_echo "==========================="
dbg_echo "Hidden files in the root:"
ls -l -d /.*
dbg_echo "==========================="
show_all_authorized_keys
}
##################################################################
#####################################################################
# Enables ssh access to 'admin' user.
# Final authorized_keys will be configured inside the boot-up script.
#
function enable_ssh_for_admin() {
# Update Linux system files.
dbg_echo "Replace the shell of admin user with tmsh:"
sed -i "s/^\(admin:x:0:[0-9]*:Admin User:\/home\/admin:\)\(.*\)/\1\/usr\/bin\/tmsh/" /etc/passwd
sed -i 's/\(admin:\)\([^:]*\)\(:.*\)/\1!!\3/' /etc/shadow
if [ $DEBUG -ge 1 ]; then
print_user_passwd_shadow "admin"
fi
}
#####################################################################
#####################################################################
function stop_xe_guest_tool() {
toolname="xe-guest-utilities"
# check if xe-guest-toools are installed to begin with:
dbg_echo "Check if the ${toolname} RPM is installed:"
if rpm -q --quiet ${toolname} ; then
dbg_echo "${toolname} is found to be installed. Disabling as it's an unrequired daemon:"
chkconfig --del xe-linux-distribution
rc=$?
if [ $rc -ne 0 ]; then
err_exit "Disabling xe-linux-distribution daemon failed."
fi
fi
}
#####################################################################
#####################################################################
# Disables root account.
#
function disable_root() {
# Update Linux system files.
dbg_echo "Disable default password for root user:"
/bin/sed -i 's/\(root:\)\([^:]*\)\(:.*\)/\1!!\3/' /etc/shadow
if [ $DEBUG -ge 1 ]; then
print_user_passwd_shadow "root"
fi
}
#####################################################################
#####################################################################
# Test that all authorized_keys files are not present anymore.
#
function test_all_authorized_keys() {
local lines=0
local file_size=0
dbg_echo "Test there are no authorized_keys files."
lines=$(find / -name authorized_keys | wc -l)
if [ $lines -ne 0 ]; then
find / -name authorized_keys
err_exit "Some authorized_keys files are still present"
fi
dbg_echo "Test we have backup files for authorized_keys."
lines=$(find / -name bigip.a.k.bak | wc -l)
if [ $lines -ne $authorized_keys_file_count ]; then
ls -l $(find / -name bigip.a.k.bak)
err_exit "Not enough authorized_keys backup files: needed $authorized_keys_file_count, found $lines"
fi
dbg_echo "Test all backup files for authorized_keys are empty."
for f in $(find / -name bigip.a.k.bak); do
# Skip all the symlinks:
if [ ! -L $f ]; then
file_size=$(ls -l $f | cut -d" " -f 5)
if [ $file_size -gt 1 ]; then
ls -l $(find / -name bigip.a.k.bak)
err_exit "File $f has to be empty, but it has size $file_size"
fi
fi
done
}
#####################################################################
#####################################################################
# Show all authorized_keys files and print their content.
#
function show_all_authorized_keys() {
local list_files
list_files=$(find / -name authorized_keys)
dbg_echo "PREP - Expect empty/non-existent authorized_keys files:"
if [ "$list_files" == "" ]; then
dbg_echo "PASSED"
else
log_echo "FAILED"
ls -l $list_files
for f in $(find / -name authorized_keys); do
log_cat $f
done
fi
}
#####################################################################
#####################################################################
# Empty all authorized_keys from the system except the original
# symlink in "/root/.ssh"
# Rename all the files because of Amazon security scanner.
#
function remove_all_authorized_keys() {
local f
local list_authorized_keys
list_authorized_keys=$(find / -name authorized_keys)
authorized_keys_file_count=$(echo "$list_authorized_keys" | wc -l)
dbg_echo "Cleanup content of authorized_keys files:"
# 1st pass - empty the content:
for f in $list_authorized_keys; do
dbg_echo "Emptying $f"
if [ -e $f ]; then
echo "" > $f
fi
done
# 2nd pass - rename the files as some of them are symlinks:
for f in $list_authorized_keys; do
# Rename all the files and the symlinks:
if [ -e $f -o -L $f ]; then
dbg_echo "Renaming $f"
mv -v $f $(dirname $f)/bigip.a.k.bak
rc=$?
if [ $rc -ne 0 ]; then
err_exit "Cannot move file: $f $(dirname $f)/bigip.a.k.bak"
fi
fi
done
}
#####################################################################
#####################################################################
function process_hb() {
dbg_echo "Starting ${FUNCNAME[0]}"
INSTANCE_REG_FILE="/shared/vadc/hourly.licensing"
if [ -f /etc/hourly.licensing ] ; then
mv -vf /etc/hourly.licensing $INSTANCE_REG_FILE
dbg_cat $INSTANCE_REG_FILE
fi
}
#####################################################################
#####################################################################
# Configuration steps needed to prepare Cloud images.
#
function final_cloud_prepare() {
local d
dbg_echo "${FUNCNAME[0]} - start"
# Show initial environment:
dbg_show_env 0
enable_ssh_for_admin
disable_root
process_hb
remove_all_authorized_keys
test_all_authorized_keys
stop_xe_guest_tool
# Show final environment:
dbg_show_env 1
dbg_echo "${FUNCNAME[0]} - end"
}
#####################################################################
for i in {1..5} ; do dbg_echo "" ; done
echo "##################################################"
echo "final_cloud_prepare.sysinit.mfg - $(date) - START"
echo ""
# At this time /shared is not mounted, so don't try to look for
# /shared/vadc/.hypervisor_type file.
# Configuration steps needed to prepare Cloud images.
final_cloud_prepare
echo ""
echo "final_cloud_prepare.sysinit.mfg - $(date) - DONE"
echo "#################################################"
for i in {1..5} ; do dbg_echo "" ; done
exit 0
| true
|
fd03ec49c392fc648e3bf623e7d2986908f5783d
|
Shell
|
rhabbachi/notlocal
|
/commands/dc
|
UTF-8
| 602
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
## Initialize a Docksal powered DKAN 7.x site
##
## Usage: fin notlocal dc
# Abort if anything fails
set -e
#-------------------------- Helper functions --------------------------------
source $ADDON_ROOT/inc/common.inc
#-------------------------- END: Helper functions --------------------------------
notlocal_acme_email=$(fin config get NOTLOCAL_ACME_EMAIL --global)
if [ -z "${notlocal_acme_email}" ]; then
echo-red "Empty NOTLOCAL_ACME_EMAIL global variable! Please update global docksal.env"
exit 1
fi
fin dc -p system -f "$ADDON_ROOT/stacks/system/notlocal.yml" $@
| true
|
fc1ccbe40c60bbefff1c75d6a0032dee4b142c31
|
Shell
|
harisokanovic/archlinux-packages
|
/libsignon-glib/repos/extra-i686/PKGBUILD
|
UTF-8
| 834
| 2.546875
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Antonio Rojas <arojas@archlinux.org>
# Contributor: Maxime Gauduin <alucryd@archlinux.org>
# Contributor: Balló György <ballogyor+arch@gmail.com>
pkgname=libsignon-glib
pkgver=1.12
pkgrel=2
pkgdesc='GLib-based client library for applications handling account authentication through the Online Accounts Single Sign-On service'
arch=(i686 x86_64)
url='https://gitlab.com/accounts-sso/libsignon-glib'
license=(LGPL2.1)
depends=(signon)
makedepends=(python2-gobject gtk-doc gobject-introspection)
source=("https://gitlab.com/accounts-sso/$pkgname/repository/archive.tar.gz?ref=$pkgver")
md5sums=('e12a123d9093a62700b46fa2c6ed8e56')
build() {
cd $pkgname.git
export PYTHON='/usr/bin/python2'
./autogen.sh
./configure \
--prefix='/usr' \
--localstatedir='/var' \
--sysconfdir='/etc'
make
}
package() {
cd $pkgname.git
make DESTDIR="$pkgdir" install
}
| true
|
904eb209ee072388966695d7e80523255e9182a8
|
Shell
|
kasiakepka/test-infra
|
/prow/scripts/cluster-integration/kyma-serverless-metrics-nightly.sh
|
UTF-8
| 4,267
| 3.296875
| 3
|
[
"Unlicense",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail # Fail a pipe if any sub-command fails.
export TEST_INFRA_SOURCES_DIR="${KYMA_PROJECT_DIR}/test-infra"
export TEST_NAMESPACE="serverless-integration"
export TEST_JOB_NAME="serverless-tests"
# shellcheck source=prow/scripts/lib/utils.sh
source "${TEST_INFRA_SOURCES_DIR}/prow/scripts/lib/utils.sh"
# shellcheck source=prow/scripts/lib/gcp.sh
source "$TEST_INFRA_SOURCES_DIR/prow/scripts/lib/gcp.sh"
# shellcheck source=prow/scripts/lib/serverless-shared-k3s.sh
source "$TEST_INFRA_SOURCES_DIR/prow/scripts/lib/serverless-shared-k3s.sh"
requiredVars=(
CLUSTER_PROVIDER
INPUT_CLUSTER_NAME
KYMA_PROJECT_DIR
)
utils::check_required_vars "${requiredVars[@]}"
function connect_to_azure_cluster() {
# shellcheck source=prow/scripts/lib/azure.sh
source "${TEST_INFRA_SOURCES_DIR}/prow/scripts/lib/azure.sh"
requiredVars=(
AZURE_CREDENTIALS_FILE
RS_GROUP
)
utils::check_required_vars "${requiredVars[@]}"
az::authenticate \
-f "$AZURE_CREDENTIALS_FILE"
az aks get-credentials --resource-group "${RS_GROUP}" --name "${INPUT_CLUSTER_NAME}"
}
function connect_to_gcp_cluster() {
# shellcheck source=prow/scripts/lib/gcp.sh
source "${TEST_INFRA_SOURCES_DIR}/prow/scripts/lib/gcp.sh"
requiredVars=(
CLOUDSDK_CORE_PROJECT
CLOUDSDK_COMPUTE_REGION
CLOUDSDK_COMPUTE_ZONE
GOOGLE_APPLICATION_CREDENTIALS
)
utils::check_required_vars "${requiredVars[@]}"
log::info "Authenticate"
gcp::authenticate \
-c "${GOOGLE_APPLICATION_CREDENTIALS}"
log::info "get kubeconfig"
gcp::get_cluster_kubeconfig \
-c "$INPUT_CLUSTER_NAME" \
-p "$CLOUDSDK_CORE_PROJECT" \
-z "$CLOUDSDK_COMPUTE_ZONE" \
-R "$CLOUDSDK_COMPUTE_REGION" \
-r "$PROVISION_REGIONAL_CLUSTER"
}
function connect_to_cluster() {
if [[ $CLUSTER_PROVIDER == "azure" ]]; then
connect_to_azure_cluster
elif [[ $CLUSTER_PROVIDER == "gcp" ]]; then
connect_to_gcp_cluster
else
log::error "GARDENER_PROVIDER ${CLUSTER_PROVIDER} is not yet supported"
exit 1
fi
}
function run_serverless_integration_tests() {
log::info "Running Serverless Integration tests"
pushd /home/prow/go/src/github.com/kyma-project/kyma/resources/serverless/
log::info "Creating test namespace"
kubectl create ns "${TEST_NAMESPACE}"
kubectl label ns "${TEST_NAMESPACE}" created-by=serverless-controller-manager-test
helm install serverless-test "charts/k3s-tests" -n "${TEST_NAMESPACE}" \
-f values.yaml --set jobName="${TEST_JOB_NAME}" \
--set testSuite="serverless-integration"
git checkout -
popd
}
function run_serverless_metrics_collector() {
log::info "Collecting serverless controller metrics"
kubectl -n "${TEST_NAMESPACE}" apply -f "${TEST_INFRA_SOURCES_DIR}/prow/scripts/cluster-integration/serverless-metrics-collector.yaml"
kubectl -n "${TEST_NAMESPACE}" wait job/metrics-collector --for=condition=Complete=True --timeout=300s
echo
kubectl logs -n "${TEST_NAMESPACE}" -l job-name=metrics-collector
echo
}
function clean_serverless_integration_tests() {
log::info "Removing test namespace"
kubectl delete ns -l created-by=serverless-controller-manager-test
}
connect_to_cluster
# in case of failed runs
clean_serverless_integration_tests
run_serverless_integration_tests
job_status=""
# helm does not wait for jobs to complete even with --wait
# TODO but helm@v3.5 has a flag that enables that, get rid of this function once we use helm@v3.5
while true; do
echo "Test job not completed yet..."
[[ $(kubectl -n "${TEST_NAMESPACE}" get jobs "${TEST_JOB_NAME}" -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}') == "True" ]] && job_status=1 && echo "Test job failed" && break
[[ $(kubectl -n "${TEST_NAMESPACE}" get jobs "${TEST_JOB_NAME}" -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}') == "True" ]] && job_status=0 && echo "Test job completed successfully" && break
sleep 5
done
collect_results "${TEST_JOB_NAME}" "${TEST_NAMESPACE}"
run_serverless_metrics_collector
clean_serverless_integration_tests
echo "Exit code ${job_status}"
exit $job_status
| true
|
661b463c6d74cacfe22c53d0818f42c9db12c4ad
|
Shell
|
zooniverse/cellect_panoptes
|
/benchmark/load_and_select.sh
|
UTF-8
| 759
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
curl_prefix="http://localhost:4000/workflows/1"
declare -a power_users=("3685" "9616" "4013" "6538" "1863")
# load subjects for the workflow
time curl -v -H 'Accept: application/json' "${curl_prefix}/status"
# load all power users seen sets
for i in "${power_users[@]}"; do
time curl -X POST -H 'Accept: application/json' "${curl_prefix}/users/$i/load"
done
# get some subjects for the power users
for i in "${power_users[@]}"; do
time curl -H 'Accept: application/json' "${curl_prefix}/?user_id=$i"
done
#
# load a thousand empty users and select over them
for i in {1..1000}; do
# time curl -X POST -H 'Accept: application/json' "${curl_prefix}/users/$i/load"
time curl -H 'Accept: application/json' "${curl_prefix}/?user_id=$i"
done
| true
|
6ec76f630c5b7a3cbd5dcfbdc19a795f4d995803
|
Shell
|
Mathias-Boulanger/gff_toolbox
|
/gff_toolbox.sh
|
UTF-8
| 74,645
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
#Made by Mathias Boulanger - 2019/05/30
#gff_toolbox.sh
#version 1.3
#use on gff file structure
ARGS=1 #The script need 1 argument
NAMEPROG=$(basename ${0}) #Name of the program
DATA=$1 #File in argument
##Check the ability to work
if [[ $# -ne $ARGS ]]; then
printf "\n${GREEN}Usage:${NOCOLOR} ${NAMEPROG} target_file.gff\n\n"
exit 1
elif [[ ! -f $DATA ]];then
printf "\n${RED}Error:${NOCOLOR} the file '${DATA}' does not exit!\nPlease use an existing file.\n${GREEN}Usage:${NOCOLOR} ${NAMEPROG} target_file.gff\n\n"
exit 1
elif [[ $(wc -l $DATA) = "0 ${DATA}" ]]; then
printf "\n${RED}Error:${NOCOLOR} the file is empty!\n\n"
exit 1
fi
NAMEDATA=$(basename ${DATA})
EXTENSION="gff" #Extension file necessary to run this script
SPIN='-\|/' #Waiting characters
RED='\033[1;31m'
GREEN='\033[1;32m'
ORANGE='\033[0;33m'
NOCOLOR='\033[0m'
##Resize the windows
printf '\033[8;40;175t'
##Checking needed commands
printf "Checking for required commands\n\n"
needed_commands="awk sed grep head tail uniq wc rm sleep read kill seq cp mv" ;
req=0
while true; do
if [[ "$(command -v command)" == "" ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR}the command 'command' is not found. Check requirements skipped !\n${NAMEPROG} may not works properly!\n"
break
else
for requirement in ${needed_commands}; do
printf "checking for ${requirement} ... "
if [[ "$(command -v ${requirement})" == "" ]]; then
printf "${RED}NOT FOUND!${NOCOLOR}\n"
((req++))
else
printf "${GREEN}OK${NOCOLOR}\n"
fi
done
printf "\n"
break
fi
done
if [[ $req -ne 0 ]]; then
if [[ $req -eq 1 ]]; then
printf "\n${RED}Error:${NOCOLOR} ${req} command is missing to execute ${NAMEPROG} properly!\nPlease install it on your system to use ${NAMEPROG}\n\n"
else
printf "\n${RED}Error:${NOCOLOR} ${req} commands are missing to execute ${NAMEPROG} properly!\nPlease install them on your system to use ${NAMEPROG}\n\n"
fi
exit 1
fi
##Checking the presence of commented lines
COMMENTLINES=$(grep "^#" $DATA | wc -l)
if [[ $COMMENTLINES -ne 0 ]]; then
if [[ $COMMENTLINES -eq 1 ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR} the file presents 1 commented line.\n"
else
printf "\n${ORANGE}WARNING:${NOCOLOR} the file presents ${COMMENTLINES} commented lines.\n"
fi
printf "To be sure that commented lines will not interfere with ${NAMEPROG}\nA new file without commented lines will be create.\n"
NAMEFILEUNCOM=${NAMEDATA%%.*}_withoutCommentedLines.$EXTENSION
if [[ -f $NAMEFILEUNCOM ]]; then
while true; do
printf "\n"
printf "The directory already presents a file (${NAMEFILEUNCOM}) sorted without commented lines.\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
sed '/^#/d' $DATA > $NAMEFILEUNCOM & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rOverwrite of the the file ${NAMEFILEUNCOM} ${SPIN:$i:1}"
sleep .1
done
printf "\n\n"
DATA=${NAMEFILEUNCOM}
break;;
[nN][oO]|[nN] )
printf "%s\n" "The file already presents in the directory will be use for the next step." ""
DATA=${NAMEFILEUNCOM}
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
sed '/^#/d' $DATA > $NAMEFILEUNCOM & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rCreation of file without commented lines ${SPIN:$i:1}"
sleep .1
done
printf "\n\n"
DATA=${NAMEFILEUNCOM}
fi
fi
##Trash all tmp file if it is exist
rm -f /tmp/${NAMEPROG}_*.tmp
##Check the structure of the file
head -n 1 $DATA | awk 'BEGIN{FS="\t"}{print NF}' | uniq >> /tmp/${NAMEPROG}_check.tmp &&
tail -n 1 $DATA | awk 'BEGIN{FS="\t"}{print NF}' | uniq >> /tmp/${NAMEPROG}_check.tmp &&
awk 'BEGIN{FS="\t"}{print NF}' $DATA | uniq | wc -l >> /tmp/${NAMEPROG}_check.tmp & #could be long for big file
PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rChecking the ability to work ${SPIN:$i:1}"
sleep .1
done
printf "\n\n"
printf "%s\n" "$FIRSTLINE" "$LASTLINE" "$NAMEFILEUNCOM"
if [[ "$(sed -n '1p' /tmp/${NAMEPROG}_check.tmp)" -ne 9 ]]; then
printf "\n${RED}Error:${NOCOLOR} the first line of the file does not present 9 columns!\n\n"
rm /tmp/${NAMEPROG}_check.tmp
exit 1
elif [[ "$(sed -n '2p' /tmp/${NAMEPROG}_check.tmp)" -ne 9 ]]; then
printf "\n${RED}Error:${NOCOLOR} the last line of the file does not present 9 columns!\n\n"
rm /tmp/${NAMEPROG}_check.tmp
exit 1
elif [[ "$(sed -n '3p' /tmp/${NAMEPROG}_check.tmp)" -ne 1 ]]; then
printf "%s\n" "Error: some lines of the file do not present 9 columns!" ""
rm /tmp/${NAMEPROG}_check.tmp
exit 1
fi
rm /tmp/${NAMEPROG}_check.tmp
##Start to work
printf "\033c"
if [[ ${DATA##*.} != $EXTENSION ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR} The file extension should be .${EXTENSION}\nMake sure that the file present a gff structure.\n"
fi
printf "%s\n" "" "Yeah, let's play with gff files..." ""
r=0
while true; do
#Choice of the tool
while true; do
#Remind of gff struture
HEADER="%-10s\t %-10s\t %-14s\t %-5s\t %-5s\t %-5s\t %-6s\t %-12s\t %-66s\n"
STRUCTURE="%-10s\t %-10s\t %-14s\t %5d\t %5d\t %-5s\t %-6s\t %-12s\t %-66s\n"
divider=======================================================================================
divider=$divider$divider$divider
width=162
printf "%s\n" "" "Classical gff3 file should present the structure as follow:" ""
printf "$HEADER" "SeqID" "Source" "Feature (type)" "Start" "End" "Score" "Strand" "Frame (Phase)" "Attributes"
printf "%$width.${width}s\n" "$divider"
printf "$STRUCTURE" \
"NC_XXXXXX.X" "RefSeq" "gene" "1" "1728" "." "+" "." "ID=geneX;...;gbkey=Gene;gene=XXX;gene_biotype=coding_protein;..." \
"chrX" "." "exon" "1235" "1298" "." "-" "." "ID=idX;...;gbkey=misc_RNA;gene=XXX;...;..." \
"NC_XXXXXX.X" "BestRefSeq" "CDS" "50" "7500" "." "+" "1" "ID=idX;...;gbkey=CDS;gene=XXX;...;..."
printf "%s\n" "" "If you would like more informations on the gff file structure visit this web site: http://gmod.org/wiki/GFF3" ""
#choice
printf "\n"
printf "Which tool do you would like to use on ${GREEN}${DATA##*/}${NOCOLOR} ?\n"
printf "\n"
printf "%s\n" "=================================== Tools to extract information from gff file ===================================" ""
printf "%s\n" "1 - Classical Human Chromosomes filter (specific to human genome)" "2 - Promoter regions extractor (specific to gene regions)" "3 - Extract lines with specific sources present in my file (column 2)" "4 - Extract lines with specific type of region present in my file (column 3)" "5 - Attributes explorer (Extract list or lines with specific attribute: IDs, gbkey, biotype, gene list) (column 9)" "6 - Sequence extender (Add an interval to the start and the end of all sequences)" "7 - Duplicate explorer" ""
printf "%s\n" "=========================================== Tool to transform gff file ===========================================" ""
printf "%s\n" "8 - GFF to BED file" "" ""
printf "%s\n %s\n \r%s" "If you would like to quit, please answer 'q' or 'quit'" "" "Please, enter the number of the chosen tool: "
read ANSWER
case $ANSWER in
[1-8] )
printf "\n"
t=$ANSWER
break;;
[qQ]|[qQ][uU][iI][tT] )
printf "\033c"
printf "%s\n" "" "Thank you for using GFF toolbox!"
rm -f /tmp/${NAMEPROG}_*.tmp
if [[ $r -ne 0 ]]; then
printf "%s\n" "" "All files generated by ${NAMEPROG} have been generated in the current directory."
fi
printf "%s\n" "If you got any problems when using this script or if you have any comments, please feel free to contact mathias_boulanger_17@hotmail.com" ""
exit 0
;;
* )
printf "\033c"
printf "%s\n" "" "Please answer a tool number or quit." ""
;;
esac
done
question_end () {
while true; do
printf "%s\n" "Do you want to continue to use ${NAMEPROG} with the new generated file? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
DATA=${NAMEFILE}
break;;
[nN]|[nN][oO] )
DATA=${DATA}
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
}
##Tool 1: Classical Human Chromosomes Filter
while true; do
if [[ t -eq 1 ]]; then
printf "\033c"
printf "${NAMEPROG} consider only main human chromosomes chr1 to chr22, chrX, chrY and chrM named as follow: NC_XXXXXX.X or chrX\n"
NCDATA=$(grep "^NC" $DATA | wc -l)
CHRDATA=$(grep "^chr" $DATA | wc -l)
x=0
if [[ $NCDATA -eq 0 && $CHRDATA -eq 0 ]]; then
printf "\033c"
printf "\n${RED}Error:${NOCOLOR} the file does not contain classical human chromosome names (NC_XXXXXX.X or chrX)!\n"
break
elif [[ $NCDATA -gt 0 && $CHRDATA -gt 0 ]]; then
while true; do
printf "%s\n" "Human chromosome names in the file are named by 2 different ways ('NC_XXXXXX.X' and 'chrX')" "Which name do you want to keep in the gff file to homogenize the chromosome SeqIDs? (NC or chr)"
read ANSWER
printf "\n"
CHRNAMES=( "chr1" "chr2" "chr3" "chr4" "chr5" "chr6" "chr7" "chr8" "chr9" "chr10" "chr11" "chr12" "chr13" "chr14" "chr15" "chr16" "chr17" "chr18" "chr19" "chr20" "chr21" "chr22" "chrX" "chrY" "chrM" )
x=1
e=0
case $ANSWER in
[nN][cC] )
SORTCHR="^NC"
NAMEFILE1=${NAMEDATA%%.*}_formatNC.$EXTENSION
NCNAMES=( "NC_000001" "NC_000002" "NC_000003" "NC_000004" "NC_000005" "NC_000006" "NC_000007" "NC_000008" "NC_000009" "NC_000010" "NC_000011" "NC_000012" "NC_000013" "NC_000014" "NC_000015" "NC_000016" "NC_000017" "NC_000018" "NC_000019" "NC_000020" "NC_000021" "NC_000022" "NC_000023" "NC_000024" "NC_012920" )
cut -f1 $DATA | grep "^NC" | sort | uniq > /tmp/${NAMEPROG}_NC_names.tmp
NUMNCNAMES=$(uniq /tmp/${NAMEPROG}_NC_names.tmp | wc -l)
for (( i = 0; i < ${NUMNCNAMES}+1; i++ )); do
if [[ $(sed -n $i'p' /tmp/${NAMEPROG}_NC_names.tmp | awk '{split($1, subfield, "."); print subfield[1]}' | wc -c) -ne 10 ]]; then
sed -i -e $i'd' /tmp/${NAMEPROG}_NC_names.tmp
fi
done
if [[ $NUMNCNAMES -gt 25 ]]; then
printf "\n${RED}Error:${NOCOLOR} More than 25 classical human chromosome names (NC_XXXXXX.X) are detected!\nPlease check the SeqIDs content of the file\nThis are NC names found in the file :" "" "$(cat /tmp/${NAMEPROG}_NC_names.tmp)" ""
e=1
break
elif [[ $(awk '{split($1, subfield, "."); print subfield[1]}' /tmp/${NAMEPROG}_NC_names.tmp | uniq | wc -l) -ne $(awk '{split($1, subfield, "."); print subfield[1]}' /tmp/${NAMEPROG}_NC_names.tmp | wc -l) ]]; then
printf "%s\n" "${RED}Error:${NOCOLOR} One of your NC name present different versions! (ex: NC_000001.1 and NC_000001.2)" "Please check the SeqIDs content of the file" "This are NC names found in the file :" "" "$(cat /tmp/${NAMEPROG}_NC_names.tmp)" ""
e=1
break
else
if [[ $NUMNCNAMES -lt 25 ]]; then
N=$(( 25 - ${NUMNCNAMES} ))
for (( i = 1; i < ${N}; i++ )); do
printf "\n" >> /tmp/${NAMEPROG}_NC_names.tmp
done
fi
for (( i = 0; i < 26; i++ )); do
if [[ "$(sed -n $i'p' /tmp/${NAMEPROG}_NC_names.tmp | awk '{split($1, subfield, "."); print subfield[1]}')" != ${NCNAMES[$i-1]} ]]; then
sed -i -e $i'i\
'${NCNAMES[$i-1]}'
' /tmp/${NAMEPROG}_NC_names.tmp
fi
done
sed -i -e '/^$/d' /tmp/${NAMEPROG}_NC_names.tmp
rm /tmp/${NAMEPROG}_NC_names.tmp-e
for (( i = 1; i < 26; i++ )); do
eval NCFILENAMES[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_NC_names.tmp)"
done
fi
rm /tmp/${NAMEPROG}_NC_names.tmp
if [[ -f $NAMEFILE1 ]]; then
while true; do
printf "\n"
printf "The directory already present a file (${NAMEFILE1}) homogenized by NC.\nDo you want to overwrite this file? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
cp $DATA $NAMEFILE1
for i in $(seq 0 24) ; do
A="${NCFILENAMES[$i]}"
B="${CHRNAMES[$i]}"
awk 'BEGIN{FS="\t"; OFS="\t"}{if ($1=="'$B'") print "'$A'", $2, $3, $4, $5, $6, $7, $8, $9; else print $0}' $NAMEFILE1 > /tmp/${NAMEPROG}_${NAMEFILE1}.tmp && mv /tmp/${NAMEPROG}_${NAMEFILE1}.tmp $NAMEFILE1
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rHomogenization of the file by 'NC' ${SPIN:$i:1}"
sleep .1
done
printf "\n\n${GREEN}${DATA##*/}${NOCOLOR} has been re-homogenize by 'NC' chromosome names." ""
break;;
[nN][oO]|[nN] )
printf "%s\n" "" "The file already present in the directory will be use for the next steps." ""
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
cp $DATA $NAMEFILE1
for i in $(seq 0 24) ; do
A="${NCFILENAMES[$i]}"
B="${CHRNAMES[$i]}"
awk 'BEGIN{FS="\t"; OFS="\t"}{if ($1=="'$B'") print "'$A'", $2, $3, $4, $5, $6, $7, $8, $9; else print $0}' $NAMEFILE1 > /tmp/${NAMEPROG}_${NAMEFILE1}.tmp && mv /tmp/${NAMEPROG}_${NAMEFILE1}.tmp $NAMEFILE1
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rHomogenization of the file by 'NC' ${SPIN:$i:1}"
sleep .1
done
printf "\n\n${GREEN}${DATA##*/}${NOCOLOR} has been homogenize by 'NC' chromosome names.\n"
fi
break;;
[cC][hH][rR] )
SORTCHR="^chr"
NAMEFILE1=${NAMEDATA%%.*}_formatChr.$EXTENSION
NCNAMES=( "NC_000001" "NC_000002" "NC_000003" "NC_000004" "NC_000005" "NC_000006" "NC_000007" "NC_000008" "NC_000009" "NC_000010" "NC_000011" "NC_000012" "NC_000013" "NC_000014" "NC_000015" "NC_000016" "NC_000017" "NC_000018" "NC_000019" "NC_000020" "NC_000021" "NC_000022" "NC_000023" "NC_000024" "NC_012920" )
if [[ -f $NAMEFILE1 ]]; then
while true; do
printf "\n"
printf "The directory already present a file (${NAMEFILE1}) homogenized by NC.\nDo you want to overwrite this file? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
cp $DATA $NAMEFILE1
for i in $(seq 0 24) ; do
A="${NCNAMES[$i]}"
B="${CHRNAMES[$i]}"
awk 'BEGIN{FS="\t"; OFS="\t"}{split($1, subfield, "."); if (subfield[1]=="'$A'") print "'$B'", $2, $3, $4, $5, $6, $7, $8, $9; else print $0}' $NAMEFILE1 > /tmp/${NAMEPROG}_${NAMEFILE1}.tmp && mv /tmp/${NAMEPROG}_${NAMEFILE1}.tmp $NAMEFILE1
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rHomogenization of the file by 'chr' ${SPIN:$i:1}"
sleep .1
done
printf "${GREEN}${DATA##*/}${NOCOLOR} has been re-homogenize by 'chr' chromosome names.\n"
break;;
[nN][oO]|[nN] )
printf "\n\n${GREEN}${NAMEFILE1}${NOCOLOR} already present in the directory will be use for the next steps.\n"
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
cp $DATA $NAMEFILE1
for i in $(seq 0 24) ; do
A="${NCNAMES[$i]}"
B="${CHRNAMES[$i]}"
awk 'BEGIN{FS="\t"; OFS="\t"}{split($1, subfield, "."); if (subfield[1]=="'$A'") print "'$B'", $2, $3, $4, $5, $6, $7, $8, $9; else print $0}' $NAMEFILE1 > /tmp/${NAMEPROG}_${NAMEFILE1}.tmp && mv /tmp/${NAMEPROG}_${NAMEFILE1}.tmp $NAMEFILE1
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rHomogenization of the file by 'chr' ${SPIN:$i:1}"
sleep .1
done
printf "${GREEN}${DATA##*/}${NOCOLOR} has been homogenize by 'chr' chromosome names.\n"
fi
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer NC or chr." "";;
esac
done
if [[ $e -eq 1 ]]; then
break
fi
elif [[ $NCDATA -eq 0 && $CHRDATA -gt 0 ]]; then
SORTCHR="^chr"
elif [[ $NCDATA -gt 0 && $CHRDATA -eq 0 ]]; then
SORTCHR="^NC"
fi
if [[ $(grep "$SORTCHR" $DATA | wc -l) -eq $(cat $DATA | wc -l) ]]; then
printf "\033c"
printf "%s\n" "SeqIDs of your file are composed exclusively with classical human chromosomes." "You do not need to sort the file by classical human chromosomes." ""
break
fi
while true; do
printf "\n"
printf "Do you want to keep main human chromosomes or the others SeqIDs? (main/other)\n"
read ANSWER
printf "\n"
case $ANSWER in
[mM]|[mM][aA][iI][nN] )
if [[ $x -eq 0 ]]; then
NAMEFILE=${NAMEDATA%%.*}_mainChrom.$EXTENSION
DATA=${DATA}
elif [[ $x -eq 1 ]]; then
NAMEFILE=${NAMEFILE1%%.*}_mainChrom.$EXTENSION
DATA=${NAMEFILE1}
fi
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\n"
printf "The directory already present a file (${NAMEFILE}) sorted by main chromosomes.\nDo you want to sort again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
grep "$SORTCHR" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by main human chromosomes in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n\n${GREEN}${DATA}${NOCOLOR} has been re-sorted by the main human chromosomes.\n"
break;;
[nN][oO]|[nN] )
printf "\n\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
grep "$SORTCHR" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by main human chromosomes ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n\n${GREEN}${DATA}${NOCOLOR} has been sorted by the main human chromosomes.\n"
fi
break;;
[oO]|[oO][tT][hH][eE][rR] )
if [[ $x -eq 0 ]]; then
NAMEFILE=${NAMEDATA%%.*}_withoutMainChrom.$EXTENSION
DATA=${DATA}
elif [[ $x -eq 1 ]]; then
NAMEFILE=${NAMEFILE1%%.*}_withoutMainChrom.$EXTENSION
DATA=${NAMEFILE1}
fi
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\n"
printf "The directory already present a file (${NAMEFILE}) sorted without main chromosomes.\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
grep -v "$SORTCHR" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting without main human chromosomes in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n\n${GREEN}${DATA}${NOCOLOR} has been re-sorted without main human chromosomes.\n"
break;;
[nN][oO]|[nN] )
printf "\n\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
grep -v "$SORTCHR" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting without main human chromosomes ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n\n${GREEN}${DATA}${NOCOLOR} has been sorted without main human chromosomes.\n"
fi
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer main or other." "";;
esac
done
((r++))
question_end
break
else
break
fi
done
##Tool 2: Promoter regions extractor
while true; do
if [[ t -eq 2 ]]; then
printf "\033c"
TSS=0
e=0
if [[ $(cut -f3 $DATA | sort | uniq | wc -l) -eq 1 ]]; then
REGION=$(cut -f3 $DATA | sort | uniq)
if [[ "$REGION" == "" ]]; then
printf "${ORANGE}WARNING:${NOCOLOR} the only region of the file does not present character!\n\n"
else
printf "%s\n" "The only region found in the file is '${REGION}'." ""
fi
case $REGION in
[gG][eE][nN][eE]|[gG][eE][nN][eE][sS] )
while true; do
printf "All sequences in the file are genes!\nDo you want to explore the 'gene_biotype' to extract promoter from one sub-type of gene? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
printf "${ORANGE}WARNING:${NOCOLOR} the file sould have a gff3 structure of attributes (column 9) as follow: XX=XX1;XX=XX;etc...\n\n"
cut -f9 $DATA | sed -e 's/\;/ /g' > /tmp/${NAMEPROG}_attributes.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rExtracting attributes of the genes in the file ${SPIN:$i:1}"
sleep .1
done
printf "\n\n"
MAXNUMCOL=$(awk 'BEGIN{FS="\t"}{print NF}' /tmp/${NAMEPROG}_attributes.tmp | sort -n | sed -n '$p')
if [[ $(grep "gene_biotype" /tmp/${NAMEPROG}_attributes.tmp | wc -l) -eq 0 ]]; then
printf "${ORANGE}WARNING:${NOCOLOR} The attributes of the genes in the file do not present 'gene_biotype'!\n.Promoters region will be extract from all the genes in the file\n\n"
break
fi
for (( i = 1; i < ${MAXNUMCOL}+1; i++ )); do
grep "gene_biotype" /tmp/${NAMEPROG}_attributes.tmp | awk 'BEGIN{FS="\t"}{split($'$i', subfield, "="); if (subfield[1]=="gene_biotype") print subfield[2]}' >> /tmp/${NAMEPROG}_gene_biotype.tmp
done
sed -i -e '/^$/d' /tmp/${NAMEPROG}_gene_biotype.tmp
sort /tmp/${NAMEPROG}_${ATTOSORT}.tmp | uniq > /tmp/${NAMEPROG}_gene_biotype_uniq.tmp
rm /tmp/${NAMEPROG}_attributes.tmp
if [[ $(cat /tmp/${NAMEPROG}_gene_biotype_uniq.tmp | wc -l) -eq 0 ]]; then
printf "${ORANGE}WARNING:${NOCOLOR} The genes of your file present only 1 gene_biotype without_character!\n.Promoters region will be extract from all the genes in the file\n\n"
break
elif [[ $(cat /tmp/${NAMEPROG}_gene_biotype_uniq.tmp | wc -l) -eq 1 ]]; then
printf "${ORANGE}WARNING:${NOCOLOR} The genes of your file present only 1 gene_biotype: "$(cat /tmp/${NAMEPROG}_gene_biotype_uniq.tmp)". You do not need to sort a specific gene_biotype.\n\n"
break
fi
while true; do
printf "%s\n" "This are unique contents of 'gene_biotype' present in the file:" "" "Number type_of_gene_biotype" "$(sort /tmp/${NAMEPROG}_gene_biotype.tmp | uniq -c)" ""
NUMOFGENEBIOTYPE=$(cat /tmp/${NAMEPROG}_gene_biotype_uniq.tmp | wc -l)
for (( i = 1; i < ${NUMOFGENEBIOTYPE} + 1; i++ )); do
eval LISTGENBIOTYPE[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_gene_biotype_uniq.tmp)"
done
printf "By which sub-attribute of '${ATTOSORT}' do you want to sort?\n"
read ANSWER
printf "\n"
for (( i = 0; i < ${NUMOFGENEBIOTYPE}; i++ )); do
if [[ $ANSWER = ${LISTGENBIOTYPE[$i]} ]]; then
SUBATTOSORT=${LISTGENBIOTYPE[$i]}
fi
done
if [[ ! -z $SUBATTOSORT ]]; then
NAMEFILE1=${NAMEDATA%%.*}_geneBiotypeAttributes_${SUBATTOSORT}Sorted.${EXTENSION}
if [[ -f $NAMEFILE1 ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE1}) sorted by the sub-attribute of gene_biotype '${SUBATTOSORT}'.\nDo you want to sort again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
grep "gene_biotype=${SUBATTOSORT}" $DATA > $NAMEFILE1 & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${SUBATTOSORT} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "${GREEN}${DATA##*/}${NOCOLOR} has been re-sorted by the sub-attributeof gene_biotype: ${SUBATTOSORT}\n\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE1}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
grep "gene_biotype=${SUBATTOSORT}" $DATA > $NAMEFILE1 & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${SUBATTOSORT} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the sub-attribute of gene_biotype: ${SUBATTOSORT}\n\n"
fi
break
else
printf "\033c"
printf "%s\n" "" "The sub-attribute of gene_biotype that you wrote is not find in the file." ""
fi
done
rm /tmp/${NAMEPROG}_gene_biotype.tmp
rm /tmp/${NAMEPROG}_gene_biotype_uniq.tmp
DATA=${NAMEFILE1}
break;;
[nN]|[nN][oO] )
printf "\nPromoters region will be extract from all the genes in the file\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
;;
[tT][sS][sS] )
TSS=1
;;
* )
printf "\n${ORANGE}WARNING:${NOCOLOR} the only region of the file is not call 'gene'!\nPlease make sure that the content of your file is gene sequences to be sure to extract promoter regions.\n\n"
esac
else
printf "\n${ORANGE}WARNING:${NOCOLOR} The file contain multiple type of region!\nPlease make sure that the content of your file is gene sequences to be sure to extract promoter regions.\nYou can use the tool 'type of region extractor' to extract gene region from your file.\n\n"
fi
if [[ $TSS -eq 0 ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR} This tool has been developed to extract promoter regions from Transcription Start Site (TSS) depending of the strand of the gene.\nFor the (+) strand, the TSS is defined as the first base of the corresponding gene region (the start), while for the (-) strand, the TSS is defined as the last base of the corresponding gene region (the end).\nIf the strand is not specified, the TSS will be the start of the gene region.\n"
NAMEFILE2=${NAMEDATA%%.*}_TSS.${EXTENSION}
if [[ -f $NAMEFILE2 ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE2}) where TSSs seem to be already extracted.\nDo you want to extract them again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
awk 'BEGIN{FS="\t";OFS="\t"}{if ($7=="-") print $1,$2,"TSS",$5,$5,$6,$7,$8,$9 ; else print $1,$2,"TSS",$4,$4,$6,$7,$8,$9}' $DATA > $NAMEFILE2 & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rExtraction of TSSs in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nThe TSSs have been re-extracted from ${GREEN}${DATA##*/}${NOCOLOR}\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE2}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
awk 'BEGIN{FS="\t";OFS="\t"}{if ($7=="-") print $1,$2,"TSS",$5,$5,$6,$7,$8,$9 ; else print $1,$2,"TSS",$4,$4,$6,$7,$8,$9}' $DATA > $NAMEFILE2 & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rExtraction of TSSs in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nThe TSSs have been extracted from ${GREEN}${DATA##*/}${NOCOLOR}\n"
fi
DATA=${NAMEFILE2}
else
awk 'BEGIN{FS="\t"; OFS=" "}{if ($4!=$5) print "Error line", NR}' $DATA > /tmp/${NAMEPROG}_TSS_error.tmp
if [[ $(cat /tmp/${NAMEPROG}_TSS_error.tmp | wc -l) -gt 0 ]]; then
printf "\n${RED}Error:${NOCOLOR} one of your sequence presente TSS with a 'start' different to 'end':\nPlease check the TSS at the line:\n"
cat /tmp/${NAMEPROG}_TSS_error.tmp
break
else
printf "\nThe unique type of region of the file is already ${REGION}, TSSs do not need to be extract from ${GREEN}${DATA##*/}${NOCOLOR}\n"
fi
fi
while true; do
printf "\nWhich interval around TSS do you want to extract as promoter region?\nPlease answer the interval in base pair as follow: upstream-downstream (ex: 2000-2000)\n"
read ANSWER
printf "\n"
UPSTREAM=${ANSWER%%-*}
DOWNSTREAM=${ANSWER##*-}
if [[ $ANSWER =~ [-] && "${UPSTREAM}" =~ ^[0-9]+$ && "${DOWNSTREAM}" =~ ^[0-9]+$ && ${UPSTREAM} -lt 100000 && ${DOWNSTREAM} -lt 100000 ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR} This tool has been developed to extract promoter regions from Transcription Start Site depending of the strand of the gene.\nFor strand +, the upstream value will be subtracted to the TSS and the downstream value will be added to the TSS.\nFor Strand -, the upstream value will be added to the TSS and the downstream value will be subtracted to the TSS.\nIf the strand is not specify, the upstream value will be subtracted to the TSS and the downstream value will be added to the TSS.\n\n"
NAMEFILE=${NAMEDATA%%.*}_promoter-${UPSTREAM}to${DOWNSTREAM}bp.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) where promoters seem to be already extracted with the same interval (${ANSWER}).\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
awk 'BEGIN{FS="\t";OFS="\t"}{if ($7=="-") {gsub($4, $4 - '${DOWNSTREAM}', $4); gsub($5, $5 + '${UPSTREAM}', $5); print $1,$2,"promoter",$4,$5,$6,$7,$8,$9} else {gsub($4, $4 - '${UPSTREAM}', $4); gsub($5, $5 + '${DOWNSTREAM}', $5); print $1,$2,"promoter",$4,$5,$6,$7,$8,$9}}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rExtraction of TSSs in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nPromoters have been re-extracted with the interval -${UPSTREAM}bp to ${DOWNSTREAM}bp from ${GREEN}${DATA##*/}${NOCOLOR}\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE2}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
awk 'BEGIN{FS="\t";OFS="\t"}{if ($7=="-") {gsub($4, $4 - '${DOWNSTREAM}', $4); gsub($5, $5 + '${UPSTREAM}', $5); print $1,$2,"promoter",$4,$5,$6,$7,$8,$9} else {gsub($4, $4 - '${UPSTREAM}', $4); gsub($5, $5 + '${DOWNSTREAM}', $5); print $1,$2,"promoter",$4,$5,$6,$7,$8,$9}}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rExtraction of TSSs in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nPromoters have been extracted with the interval -${UPSTREAM}bp to ${DOWNSTREAM}bp from ${GREEN}${DATA##*/}${NOCOLOR}\n"
fi
e=1
break
else
printf "%s\n" "Please answer a correct interval as 'upstream-downstream' (ex: 3000-0)." "The interval maximum is '99999-99999'\n"
fi
done
((r++))
if [[ $e -eq 1 ]]; then
question_end
fi
break
else
break
fi
done
##Tool 3: Extract lines with specific sources
while true; do
if [[ t -eq 3 ]]; then
printf "\033c"
printf "%s\n" "" "" "This are sources present in the file:" "" "Number_of_line Source" "$(cut -f2 $DATA | sort | uniq -c)" "" &
cut -f2 $DATA | sort | uniq | sed 's/ /_/g' > /tmp/${NAMEPROG}_sources.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rLooking for sources present in the file ${SPIN:$i:1}"
sleep .1
done
if [[ $(awk '/^$/ {x += 1};END {print x }' /tmp/${NAMEPROG}_sources.tmp) -ge 1 ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR} 1 of the source does not present character!\nIf you want to sort this source please enter 'without_character'\n\n"
fi
if [[ $(wc -l /tmp/${NAMEPROG}_sources.tmp) = "1 /tmp/${NAMEPROG}_sources.tmp" ]]; then
printf "\033c"
printf "%s\n" "Only 1 source has been found in the file." "You do not need to sort the file by the database source." ""
if [[ $(awk '/^$/ {x += 1};END {print x }' /tmp/${NAMEPROG}_sources.tmp) -ge 1 ]]; then
printf "${ORANGE}WARNING:${NOCOLOR} the only source of the file does not present character!\n"
fi
break
else
NUMOFSOURCES=$(cat /tmp/${NAMEPROG}_sources.tmp | wc -l)
for (( i = 1; i < ${NUMOFSOURCES} + 1; i++ )); do
eval LISTSOURCES[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_sources.tmp)"
done
fi
rm /tmp/${NAMEPROG}_sources.tmp
while true; do
printf "\nBy which source do you want to sort? (If the source name present space ' ', please use '_' instead)\n"
read ANSWER
printf "\n"
sourcewithoutcharacter=0
if [[ "$ANSWER" == "without_character" ]]; then
sourcewithoutcharacter=1
SOURCETOSORT="sourceWithoutCharacter"
else
for (( i = 0; i < ${NUMOFSOURCES}; i++ )); do
if [[ $ANSWER = ${LISTSOURCES[$i]} ]]; then
SOURCETOSORT=${LISTSOURCES[$i]}
fi
done
fi
if [[ ! -z $SOURCETOSORT ]]; then
if [[ $sourcewithoutcharacter -eq 1 ]]; then
SOURCETOSORT2=""
else
printf $SOURCETOSORT > /tmp/${NAMEPROG}_sourcetosort.tmp
SOURCETOSORT2="$(sed 's/_/ /g' /tmp/${NAMEPROG}_sourcetosort.tmp)"
rm /tmp/${NAMEPROG}_sourcetosort.tmp
fi
NAMEFILE=${NAMEDATA%%.*}_${SOURCETOSORT}SourceSorted.$EXTENSION
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) sorted by ${SOURCETOSORT2}\nDo you want to sort again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
awk 'BEGIN{FS="\t"; OFS="\t"}{ if ($2=="'"$SOURCETOSORT2"'") print $0}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${SOURCETOSORT2} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
if [[ $sourcewithoutcharacter -eq 1 ]]; then
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been re-sorted by the source without character.\n\n"
else
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been re-sorted by the source: ${SOURCETOSORT2}\n\n"
fi
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE}${NOCOLOR} file already present in the directory will be use for the next steps.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
awk 'BEGIN{FS="\t"; OFS="\t"}{ if ($2=="'"$SOURCETOSORT2"'") print $0}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${SOURCETOSORT2} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
if [[ $sourcewithoutcharacter -eq 1 ]]; then
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the source without character.\n\n"
else
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the source: ${SOURCETOSORT2}\n\n"
fi
fi
break
else
printf "%s\n" "" "The source that you wrote is not find in the file." ""
fi
done
((r++))
question_end
break
else
break
fi
done
#Tool 4 : Extract lines with specific type of region
while true; do
if [[ t -eq 4 ]]; then
printf "\033c"
printf "%s\n" "" "" "This are regions present in the file:" "" "Number_of_line Region" "$(cut -f3 $DATA | sort | uniq -c)" "" &
cut -f3 $DATA | sort | uniq | sed 's/ /_/g' > /tmp/${NAMEPROG}_regions.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rLooking for region features present in the file ${SPIN:$i:1}"
sleep .1
done
if [[ $( cat /tmp/${NAMEPROG}_regions.tmp | wc -l) -eq 1 ]]; then
printf "\033c"
printf "%s\n" "Only 1 region has been found in the file." "You do not need to sort the file by region." ""
if [[ $(awk '/^$/ {x += 1};END {print x }' /tmp/${NAMEPROG}_regions.tmp) -ge 1 ]]; then
printf "${ORANGE}WARNING:${NOCOLOR} the only region of the file does not present character!\n"
fi
break
else
NUMOFREGIONS=$(cat /tmp/${NAMEPROG}_regions.tmp | wc -l)
for (( i = 1; i < ${NUMOFREGIONS} + 1; i++ )); do
eval LISTREGIONS[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_regions.tmp)"
done
fi
if [[ $(awk '/^$/ {x += 1};END {print x }' /tmp/${NAMEPROG}_regions.tmp) -ge 1 ]]; then
printf "\n${ORANGE}WARNING:${NOCOLOR} 1 of the region does not present character!\nIf you want to sort this region please enter 'without_character'\n\n"
fi
rm /tmp/${NAMEPROG}_regions.tmp
while true; do
printf "By which region do you want to sort? (If the region name present space ' ', please use '_' instead)\n"
read ANSWER
printf "\n"
regonwithoutcharacter=0
if [[ "$ANSWER" == "without_character" ]]; then
regonwithoutcharacter=1
REGIONTOSORT="regionWithoutCharacter"
else
for (( i = 0; i < ${NUMOFREGIONS}; i++ )); do
if [[ $ANSWER = ${LISTREGIONS[$i]} ]]; then
REGIONTOSORT=${LISTREGIONS[$i]}
fi
done
fi
if [[ ! -z $REGIONTOSORT ]]; then
if [[ $regonwithoutcharacter -eq 1 ]]; then
REGIONTOSORT2=""
else
printf $REGIONTOSORT > /tmp/${NAMEPROG}_regiontosort.tmp
REGIONTOSORT2="$(sed 's/_/ /g' /tmp/${NAMEPROG}_regiontosort.tmp)"
rm /tmp/${NAMEPROG}_regiontosort.tmp
fi
NAMEFILE=${NAMEDATA%%.*}_${REGIONTOSORT}RegionSorted.$EXTENSION
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) sorted by ${REGIONTOSORT2}.\nDo you want to sort again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
awk 'BEGIN{FS="\t";OFS="\t"}{ if ($3=="'"$REGIONTOSORT2"'") print $0}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${REGIONTOSORT2} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
if [[ $regonwithoutcharacter -eq 1 ]]; then
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been re-sorted by the region without character.\n\n"
else
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been re-sorted by the region: ${REGIONTOSORT2}\n\n"
fi
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory will be use for the next steps.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
awk 'BEGIN{FS="\t";OFS="\t"}{ if ($3=="'"$REGIONTOSORT2"'") print $0}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${REGIONTOSORT2} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
if [[ $regonwithoutcharacter -eq 1 ]]; then
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the region without character.\n\n"
else
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the region: ${REGIONTOSORT2}\n\n"
fi
fi
break
else
printf "%s\n" "" "The region that you wrote is not find in the file." ""
fi
done
((r++))
question_end
break
else
break
fi
done
#Tool 5: Attributes explorer
while true; do
if [[ t -eq 5 ]]; then
printf "\033c"
printf "${ORANGE}WARNING:${NOCOLOR} the file sould have a gff3 structure of attributes (column 9) as follow: XX=XX1;XX=XX;etc...\n\n"
cut -f9 $DATA | sed -e 's/\;/ /g' > /tmp/${NAMEPROG}_attributes0.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rExtracting attributes present in the file ${SPIN:$i:1}"
sleep .1
done
printf "\n\n"
MAXNUMCOL=$(awk 'BEGIN{FS="\t"}{print NF}' /tmp/${NAMEPROG}_attributes0.tmp | sort -n | sed -n '$p')
printf "${MAXNUMCOL} attributes max per line have been found in the file\n\n"
for (( i = 1; i < ${MAXNUMCOL}+1; i++ )); do
awk 'BEGIN{FS="\t"}{split($'$i', subfield, "="); print subfield[1]}' /tmp/${NAMEPROG}_attributes0.tmp >> /tmp/${NAMEPROG}_attributes1.tmp
printf "\rRecovering of the attribute n°${i}"
done
printf "\n"
sed -i -e '/^$/d' /tmp/${NAMEPROG}_attributes1.tmp
sort /tmp/${NAMEPROG}_attributes1.tmp | uniq -c > /tmp/${NAMEPROG}_numattributes.tmp &
sort /tmp/${NAMEPROG}_attributes1.tmp | uniq > /tmp/${NAMEPROG}_attributes2.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rChecking the number of attributes in the file ${SPIN:$i:1}"
sleep .1
done
rm /tmp/${NAMEPROG}_attributes1.tmp
s=0
while true; do
while true; do
e=0
printf "%s\n" "" "" "This are attributes present in the file:" "" "Number Attribute" "$(cat /tmp/${NAMEPROG}_numattributes.tmp)" ""
printf "Do you want to extract the list of content from 1 attribute or do you want to sort by one of them? (E/s)\n"
read ANSWER
printf "\n"
case $ANSWER in
[eE]|[eE][xX][tT][rR][aA][cC][tT] )
while true; do
printf "\033c"
printf "%s\n" "This are attributes present in the file:" "" "Number Attribute" "$(cat /tmp/${NAMEPROG}_numattributes.tmp)" ""
NUMOFATTRIBUTE=$(cat /tmp/${NAMEPROG}_attributes2.tmp | wc -l)
for (( i = 1; i < ${NUMOFATTRIBUTE} + 1; i++ )); do
eval LISTATTRIBUTE[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_attributes2.tmp)"
done
printf "Which attribute do you want to extract?\n"
read ANSWER
for (( i = 0; i < ${NUMOFATTRIBUTE}; i++ )); do
if [[ $ANSWER = ${LISTATTRIBUTE[$i]} ]]; then
ATTOEXTRACT=${LISTATTRIBUTE[$i]}
fi
done
if [[ ! -z $ATTOEXTRACT ]]; then
NAMEFILE1=${NAMEDATA%%.*}_${ATTOEXTRACT}List.txt
if [[ -f $NAMEFILE1 ]]; then
while true; do
printf "\nThe directory already present the file ${NAMEFILE1}.\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
for (( i = 1; i < ${MAXNUMCOL}+1; i++ )); do
grep $ATTOEXTRACT /tmp/${NAMEPROG}_attributes0.tmp | awk 'BEGIN{FS="\t"}{split($'$i', subfield, "="); if (subfield[1]=="'${ATTOEXTRACT}'") print subfield[2]}' >> $NAMEFILE1
done
printf "\nThe list (${GREEN}${NAMEFILE1}${NOCOLOR}) of all content of the attribute '${ATTOEXTRACT}' has been overwritten.\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE1}${NOCOLOR} already present in the directory will be use for the next steps.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
for (( i = 1; i < ${MAXNUMCOL}+1; i++ )); do
grep $ATTOEXTRACT /tmp/${NAMEPROG}_attributes0.tmp | awk 'BEGIN{FS="\t"}{split($'$i', subfield, "="); if (subfield[1]=="'${ATTOEXTRACT}'") print subfield[2]}' >> $NAMEFILE1
done
printf "\nThe list (${GREEN}${NAMEFILE1}${NOCOLOR}) of all content of the attribute '${ATTOEXTRACT}' has been created.\n"
fi
while true; do
printf "%s\n" "" "Do you to extract unique occurence of the list? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
NAMEFILE2=${NAMEFILE1%%.*}_unique.txt
if [[ $(uniq $NAMEFILE1 | wc -l) -eq $(cat $NAMEFILE1 | wc -l) ]]; then
printf "All the attribute '${ATTOEXTRACT}' are already unique in the file\n"
mv $NAMEFILE1 $NAMEFILE2
break
fi
if [[ -f $NAMEFILE2 ]]; then
while true; do
printf "\nThe directory already present the file ${NAMEFILE2}.\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
uniq $NAMEFILE1 > $NAMEFILE2
printf "\nThe list (${GREEN}${NAMEFILE2}${NOCOLOR}) of unique content of the attribute '${ATTOEXTRACT}' has been overwritten.\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE2}${NOCOLOR} already present in the directory will be use for the next steps.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
uniq $NAMEFILE1 > $NAMEFILE2
printf "\nThe list (${GREEN}${NAMEFILE2}${NOCOLOR}) of unique content of the attribute '${ATTOEXTRACT}' has been created.\n"
fi
break;;
[nN]|[nN][oO] )
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no."
;;
esac
done
while true; do
printf "%s\n" "" "Do you to extract list from an other attribute? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
break;;
[nN]|[nN][oO] )
e=1
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
else
printf "%s\n" "" "The attribute that you wrote is not find in the file." ""
fi
if [[ $e -eq 1 ]]; then
break
fi
done
;;
[sS]|[sS][oO][rR][tT] )
while true; do
printf "\033c"
printf "%s\n" "This are attributes present in the file:" "" "Number Attribute" "$(cat /tmp/${NAMEPROG}_numattributes.tmp)" ""
NUMOFATTRIBUTE=$(cat /tmp/${NAMEPROG}_attributes2.tmp | wc -l)
for (( i = 1; i < ${NUMOFATTRIBUTE} + 1; i++ )); do
eval LISTATTRIBUTE[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_attributes2.tmp)"
done
printf "Which attribute do you want to sort?\n"
read ANSWER
for (( i = 0; i < ${NUMOFATTRIBUTE}; i++ )); do
if [[ $ANSWER = ${LISTATTRIBUTE[$i]} ]]; then
ATTOSORT=${LISTATTRIBUTE[$i]}
fi
done
if [[ ! -z $ATTOSORT ]]; then
if [[ $(grep "${ATTOSORT}=" /tmp/${NAMEPROG}_attributes0.tmp | wc -l) -eq $(cat /tmp/${NAMEPROG}_attributes0.tmp | wc -l ) ]]; then
printf "\nThe chosen attribute is present on all line of the file!\n"
while true; do
printf "%s\n" "" "Do you to explore the content of '${ATTOSORT}'? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
for (( i = 1; i < ${MAXNUMCOL}+1; i++ )); do
grep ${ATTOSORT} /tmp/${NAMEPROG}_attributes0.tmp | awk 'BEGIN{FS="\t"}{split($'$i', subfield, "="); if (subfield[1]=="'${ATTOSORT}'") print subfield[2]}' >> /tmp/${NAMEPROG}_${ATTOSORT}.tmp
done
if [[ $(cat /tmp/${NAMEPROG}_${ATTOSORT}.tmp | uniq | wc -l) -eq $(cat /tmp/${NAMEPROG}_attributes0.tmp | wc -l) ]]; then
printf "\033c"
printf "All content of '${ATTOSORT}' seem to be unique \nUse the attribute extraction to obtain the list of the content of '${ATTOSORT}'."
#Ajout la fonction récupe des noms comme id=idXX
elif [[ $(uniq /tmp/${NAMEPROG}_gene_biotype.tmp | wc -l) -eq 0 ]]; then
printf "\033c"
printf "${ORANGE}WARNING:${NOCOLOR} Only 1 type of sub-attribute of '${ATTOSORT}' has been found in the file, but it does have any charater!\nPlease use an other attribute to sort.\n"
elif [[ $(cat /tmp/${NAMEPROG}_${ATTOSORT}.tmp | uniq | wc -l) -eq 1 ]]; then
printf "\033c"
printf "Only 1 type of sub-attribute of '${ATTOSORT}' has been found in the file!\n"
UNIQLINE="$(sed -n '1p' /tmp/${NAMEPROG}_${ATTOSORT}.tmp)"
NAMEFILE=${NAMEDATA%%.*}_${ATTOSORT}Attributes_${UNIQLINE}Uniq.${EXTENSION}
cp $DATA $NAMEFILE
printf "A copy of the file has been created with the name: ${GREEN}${NAMEFILE}${NOCOLOR}"
else
while true; do
printf "%s\n" "This are unique content of '${ATTOSORT}' present in the file:" "" "Number type_of_${ATTOSORT}" "$(sort /tmp/${NAMEPROG}_${ATTOSORT}.tmp | uniq -c)" ""
sort /tmp/${NAMEPROG}_${ATTOSORT}.tmp | uniq > /tmp/${NAMEPROG}_${ATTOSORT}uniq.tmp
NUMOFSUBATTRIBUTE=$(cat /tmp/${NAMEPROG}_${ATTOSORT}uniq.tmp | wc -l)
for (( i = 1; i < ${NUMOFSUBATTRIBUTE} + 1; i++ )); do
eval LISTSUBATTRIBUTE[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_${ATTOSORT}uniq.tmp)"
done
printf "By which sub-attribute of '${ATTOSORT}' do you want to sort?\n"
read ANSWER
printf "\n"
for (( i = 0; i < ${NUMOFSUBATTRIBUTE}; i++ )); do
if [[ $ANSWER = ${LISTSUBATTRIBUTE[$i]} ]]; then
SUBATTOSORT=${LISTSUBATTRIBUTE[$i]}
fi
done
if [[ ! -z $SUBATTOSORT ]]; then
NAMEFILE=${NAMEDATA%%.*}_${ATTOSORT}Attributes_${SUBATTOSORT}Sorted.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) sorted by the sub-attribute '${SUBATTOSORT}'.\nDo you want to sort again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
grep "${ATTOSORT}=${SUBATTOSORT}" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${SUBATTOSORT} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "${GREEN}${DATA##*/}${NOCOLOR} has been re-sorted by the sub-attribute: ${SUBATTOSORT}\n\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
grep "${ATTOSORT}=${SUBATTOSORT}" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${SUBATTOSORT} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the sub-attribute: ${SUBATTOSORT}\n\n"
fi
break
else
printf "\033c"
printf "%s\n" "" "The sub-attribute that you wrote is not find in the file." ""
fi
done
rm /tmp/${NAMEPROG}_${ATTOSORT}uniq.tmp
fi
rm /tmp/${NAMEPROG}_${ATTOSORT}.tmp
s=1
break
;;
[nN]|[nN][oO] )
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
else
NAMEFILE=${NAMEDATA%%.*}_${ATTOSORT}AttributesSorted.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) sorted by ${ATTOSORT}.\nDo you want to sort again? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
grep "${ATTOSORT}=" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${ATTOSORT} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "${GREEN}${DATA##*/}${NOCOLOR}has been re-sorted by the attribute: ${ATTOSORT}\n\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory will has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
grep "${ATTOSORT}=" $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rSorting by ${ATTOSORT} in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
printf "\n${GREEN}${DATA##*/}${NOCOLOR} has been sorted by the attribute: ${ATTOSORT}\n\n"
fi
s=1
fi
e=1
break
else
printf "\033c"
printf "%s\n" "" "The attribute that you wrote is not find in the file." ""
fi
done
;;
* )
printf "\033c"
printf "%s\n" "" "Please answer extract or sort." ""
;;
esac
if [[ $e -eq 1 ]]; then
break
fi
done
while true; do
printf "%s\n" "" "Do you to extract list or sort from an other attribute? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
printf "\033c"
break
;;
[nN]|[nN][oO] )
e=2
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
if [[ $e -eq 2 ]]; then
break
fi
done
rm /tmp/${NAMEPROG}_attributes2.tmp
rm /tmp/${NAMEPROG}_numattributes.tmp
rm /tmp/${NAMEPROG}_attributes0.tmp
((r++))
if [[ $s -eq 1 ]]; then
question_end
fi
break
else
break
fi
done
#Tool 6: Sequence extender
while true; do
if [[ t -eq 6 ]]; then
printf "\033c"
e=0
while true; do
printf "With which interval do you want to extend the sequences in the file?\nPlease answer the interval in base pair as follow: upstream-downstream (ex: 2000-2000)\n"
read ANSWER
printf "\n"
UPSTREAM=${ANSWER%%-*}
DOWNSTREAM=${ANSWER##*-}
if [[ $ANSWER =~ [-] && "${UPSTREAM}" =~ ^[0-9]+$ && "${DOWNSTREAM}" =~ ^[0-9]+$ && ${UPSTREAM} -lt 100000 && ${DOWNSTREAM} -lt 100000 ]]; then
while true; do
printf "Do you want to take care of the strand of the sequence? (Y/n)\nIf 'Yes', for sequences with a (+) strand, the upstream value will be subtracted to the start and the downstream value will be added to the end. While for sequences with a (-) strand, the upstream value will be added to the end and the downstream value will be subtracted to the start.\nIf 'no', for all sequences, the upstream value will be subtracted to the start and the downstream value will be added to the end.\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
NAMEFILE=${NAMEDATA%%.*}_sequences-${UPSTREAM}to${DOWNSTREAM}bp_strand_dep.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) where promoters seem to be already extracted with the same interval (${ANSWER}).\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
awk 'BEGIN{FS="\t";OFS="\t"}{if ($7=="-") {gsub($4, $4 - '${DOWNSTREAM}', $4); gsub($5, $5 + '${UPSTREAM}', $5); print $0} else {gsub($4, $4 - '${UPSTREAM}', $4); gsub($5, $5 + '${DOWNSTREAM}', $5); print $0}}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rAddition of the interval -${UPSTREAM}bp-${DOWNSTREAM}bp to each sequence in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nThe sequences have been re-extended with the interval -${UPSTREAM}bp to ${DOWNSTREAM}bp.\nThe file ${GREEN}${NAMEFILE}${NOCOLOR} has been created.\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE2}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
awk 'BEGIN{FS="\t";OFS="\t"}{if ($7=="-") {gsub($4, $4 - '${DOWNSTREAM}', $4); gsub($5, $5 + '${UPSTREAM}', $5); print $0} else {gsub($4, $4 - '${UPSTREAM}', $4); gsub($5, $5 + '${DOWNSTREAM}', $5); print $0}}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rAddition of the interval -${UPSTREAM}bp-${DOWNSTREAM}bp to each sequence in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nThe sequences have been extended with the interval -${UPSTREAM}bp to ${DOWNSTREAM}bp.\nThe file ${GREEN}${NAMEFILE}${NOCOLOR} has been created.\n"
fi
break;;
[nN]|[nN][oO] )
NAMEFILE=${NAMEDATA%%.*}_sequences-${UPSTREAM}to${DOWNSTREAM}bp_strand_indep.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe directory already present a file (${NAMEFILE}) where promoters seem to be already extracted with the same interval (${ANSWER}).\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
awk 'BEGIN{FS="\t";OFS="\t"}{gsub($4, $4 - '${UPSTREAM}', $4); gsub($5, $5 + '${DOWNSTREAM}', $5); print $0}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rAddition of the interval -${UPSTREAM}bp-${DOWNSTREAM}bp to each sequence in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nThe sequences have been re-extended with the interval -${UPSTREAM}bp to ${DOWNSTREAM}bp.\nThe file ${GREEN}${NAMEFILE}${NOCOLOR} has been created.\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE2}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
awk 'BEGIN{FS="\t";OFS="\t"}{gsub($4, $4 - '${UPSTREAM}', $4); gsub($5, $5 + '${DOWNSTREAM}', $5); print $0}' $DATA > $NAMEFILE & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rAddition of the interval -${UPSTREAM}bp-${DOWNSTREAM}bp to each sequence in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\nThe sequences have been extended with the interval -${UPSTREAM}bp to ${DOWNSTREAM}bp.\nThe file ${GREEN}${NAMEFILE}${NOCOLOR} has been created.\n"
fi
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
break
else
printf "%s\n" "Please answer a correct interval as 'upstream-downstream' (ex: 3000-0)." "The interval maximum is '99999-99999'\n"
fi
done
((r++))
question_end
break
else
break
fi
done
##Tool 7: Duplicate explorer
while true; do
if [[ t -eq 7 ]]; then
printf "\033c"
e=0
while true; do
printf "Do you want to take in to account the strand of the sequence? (Y/n)\nIf yes 2 sequences with the same start and end but not in the same strand will not be consider as duplicate line...\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY]|[yY][eE][sS]|"" )
cut '-f4,5,7' $DATA | uniq -d | sed 's/ /_/g' > /tmp/${NAMEPROG}_duplicate.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rLooking for duplicate sequence in the file ${SPIN:$i:1}"
sleep .1
done
STRAND="StrandDep"
break;;
[nN]|[nN][oO] )
cut '-f4,5' $DATA | uniq -d | sed 's/ /_/g' > /tmp/${NAMEPROG}_duplicate.tmp & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rLooking for duplicate sequence in the file ${SPIN:$i:1}"
sleep .1
done
STRAND="StrandIndep"
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer yes or no." ""
;;
esac
done
NUMDUP=$(cat /tmp/${NAMEPROG}_duplicate.tmp | wc -l | sed 's/ //g')
for (( i = 1; i < ${NUMDUP} + 1; i++ )); do
eval LISTDUP[$i-1]="$(sed -n $i'p' /tmp/${NAMEPROG}_duplicate.tmp | cut -d_ '-f1,2')"
done
if [[ $NUMDUP -eq 0 ]]; then
printf "\033c"
printf "${GREEN}The file does not present duplicate sequence!${NOCOLOR}\n\n"
rm /tmp/${NAMEPROG}_duplicate.tmp
break
elif [[ $NUMDUP -eq 1 ]]; then
printf "\n\nThe file present ${RED}${NUMDUP}${NOCOLOR} duplicated line.\n\n"
else
printf "\n\nThe file present ${RED}${NUMDUP}${NOCOLOR} duplicated lines.\n\n"
fi
while true; do
printf "%s\n" "What do you would like to do? (1 | 2 | 3)" "1 - Keep the first occurrence for each duplicated line." "2 - Check each duplicated line 1 by 1 and choose which one to keep. (could be long for high number of duplicates)" "3 - Nothing"
read ANSWER
printf "\n"
case $ANSWER in
[1] )
NAMEFILE=${NAMEDATA%%.*}_uniqueSequences${STRAND}AutomaticallyFiltered.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "\nThe current directory already present a file (${NAMEFILE}) without duplicates.\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
cp $DATA $NAMEFILE
for (( i = 0; i < ${NUMDUP}; i++ )); do
grep -n -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | cut -d':' -f1 | sed '1d' >> /tmp/${NAMEPROG}_lineToDel.tmp
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rDuplicates filtering in process ${SPIN:$i:1}"
sleep .1
done
k=0
for (( i = 1; i < $(cat /tmp/${NAMEPROG}_lineToDel.tmp | wc -l) + 1; i++ )); do
LINETODEL=$(sed -n $i'p' /tmp/${NAMEPROG}_lineToDel.tmp)
eval LINETODEL=$((${LINETODEL}-${k}))
sed -i -e $LINETODEL'd' $NAMEFILE
((k++))
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rDuplicates filtering in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\n${GREEN}${DATA##*/}${NOCOLOR} has been re-filtered automatically without duplicates.\n\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
cp $DATA $NAMEFILE
for (( i = 0; i < ${NUMDUP}; i++ )); do
grep -n -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | cut -d':' -f1 | sed '1d' >> /tmp/${NAMEPROG}_lineToDel.tmp
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rDuplicates filtering in process ${SPIN:$i:1}"
sleep .1
done
k=0
for (( i = 1; i < $(cat /tmp/${NAMEPROG}_lineToDel.tmp | wc -l) + 1; i++ )); do
LINETODEL=$(sed -n $i'p' /tmp/${NAMEPROG}_lineToDel.tmp)
eval LINETODEL=$((${LINETODEL}-${k}))
sed -i -e $LINETODEL'd' $NAMEFILE
((k++))
done & PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rDuplicates filtering in process ${SPIN:$i:1}"
sleep .1
done
printf "\n\n${GREEN}${DATA##*/}${NOCOLOR} has been filtered automatically without duplicates.\n\n"
fi
rm /tmp/${NAMEPROG}_lineToDel.tmp
break;;
[2] )
NAMEFILE=${NAMEDATA%%.*}_uniqueSequences${STRAND}ManuallyFiltered.${EXTENSION}
if [[ -f $NAMEFILE ]]; then
while true; do
printf "The current directory already present a file (${NAMEFILE}) without duplicates.\nDo you want to overwrite it? (Y/n)\n"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
cp $DATA $NAMEFILE
k=0
for (( i = 0; i < ${NUMDUP}; i++ )); do
printf "This are duplicates n°$(($i+1))/${NUMDUP}:\n\n"
grep -n -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | cut -d':' -f1 > /tmp/${NAMEPROG}_lineToDel.tmp
while true; do
grep -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | grep -n ""
NUMLINE=$(grep -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | grep -n "" | wc -l)
printf "\nPlease enter the number of the line that you would like to keep in the file?\n"
read ANSWER
printf "\n"
case $ANSWER in
[$(seq 1 ${NUMLINE})] )
sed -i -e ${ANSWER}'d' /tmp/${NAMEPROG}_lineToDel.tmp
rm /tmp/${NAMEPROG}_lineToDel.tmp-e
for (( t = 1; t < $(cat /tmp/${NAMEPROG}_lineToDel.tmp | wc -l) + 1; t++ )); do
LINETODEL=$(sed -n $t'p' /tmp/${NAMEPROG}_lineToDel.tmp)
eval LINETODEL=$((${LINETODEL}-${k}))
sed -i -e $LINETODEL'd' $NAMEFILE
((k++))
done
printf "OK\n\n"
break
;;
* )
printf "%s\n" "" "Please, enter a correct line number." ""
;;
esac
done
done
printf "\n\n${GREEN}${DATA##*/}${NOCOLOR} has been re-filtered manually without duplicates.\n\n"
break;;
[nN][oO]|[nN] )
printf "\n${GREEN}${NAMEFILE}${NOCOLOR} already present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
cp $DATA $NAMEFILE
k=0
for (( i = 0; i < ${NUMDUP}; i++ )); do
printf "This are duplicates n°$(($i+1))/${NUMDUP}:\n\n"
grep -n -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | cut -d':' -f1 > /tmp/${NAMEPROG}_lineToDel.tmp
while true; do
grep -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | grep -n ""
NUMLINE=$(grep -E "${LISTDUP[$i]##*_}|${LISTDUP[$i]%%_*}" $DATA | grep -n "" | wc -l)
printf "\nPlease enter the number of the line that you would like to keep in the file?\n"
read ANSWER
printf "\n"
case $ANSWER in
[$(seq 1 ${NUMLINE})] )
sed -i -e ${ANSWER}'d' /tmp/${NAMEPROG}_lineToDel.tmp
rm /tmp/${NAMEPROG}_lineToDel.tmp-e
for (( t = 1; t < $(cat /tmp/${NAMEPROG}_lineToDel.tmp | wc -l) + 1; t++ )); do
LINETODEL=$(sed -n $t'p' /tmp/${NAMEPROG}_lineToDel.tmp)
eval LINETODEL=$((${LINETODEL}-${k}))
sed -i -e $LINETODEL'd' $NAMEFILE
((k++))
done
printf "OK\n\n"
break
;;
* )
printf "%s\n" "" "Please, enter a correct line number." ""
;;
esac
done
done
printf "\n\n${GREEN}${DATA##*/}${NOCOLOR} has been filtered manually without duplicates.\n\n"
fi
rm /tmp/${NAMEPROG}_lineToDel.tmp
rm /tmp/${NAMEPROG}_lineToDel.tmp-e
break;;
[3] )
e=1
break;;
* )
printf "\033c"
printf "%s\n" "" "Please answer 1, 2 or 3." ""
;;
esac
done
if [[ -f "${NAMEFILE}-e" ]]; then
rm "${NAMEFILE}-e"
fi
rm /tmp/${NAMEPROG}_duplicate.tmp
((r++))
if [[ $e -eq 0 ]]; then
question_end
fi
break
else
break
fi
done
##Tool 8: GFF to BED file
while true; do
if [[ t -eq 8 ]]; then
while true; do
CHRNAMES=( "chr1" "chr2" "chr3" "chr4" "chr5" "chr6" "chr7" "chr8" "chr9" "chr10" "chr11" "chr12" "chr13" "chr14" "chr15" "chr16" "chr17" "chr18" "chr19" "chr20" "chr21" "chr22" "chrX" "chrY" "chrM" )
NCNAMES=( "NC_000001" "NC_000002" "NC_000003" "NC_000004" "NC_000005" "NC_000006" "NC_000007" "NC_000008" "NC_000009" "NC_000010" "NC_000011" "NC_000012" "NC_000013" "NC_000014" "NC_000015" "NC_000016" "NC_000017" "NC_000018" "NC_000019" "NC_000020" "NC_000021" "NC_000022" "NC_000023" "NC_000024" "NC_012920" )
makebed3 () {
NAMEBED3="${NAMEDATA%%.*}.bed3"
bed3 () {
awk 'BEGIN{FS="\t";OFS="\t"}{print $1, $4, $5}' $DATA > $NAMEBED3
for i in $(seq 0 24) ; do
A="${NCNAMES[$i]}"
B="${CHRNAMES[$i]}"
awk 'BEGIN{FS="\t"; OFS="\t"}{split($1, subfield, "."); if (subfield[1]=="'$A'") print "'$B'", $2, $3; else print $0}' $NAMEBED3 > /tmp/${NAMEPROG}_${NAMEBED3}.tmp && sort -h /tmp/${NAMEPROG}_${NAMEBED3}.tmp > $NAMEBED3
done
rm /tmp/${NAMEPROG}_${NAMEBED3}.tmp
}
waitbed3 () {
PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rCreation of BED3 file in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
}
if [[ -f $NAMEBED3 ]]; then
while true; do
printf "%s\n" "" "The directory already present a BED3 file (${NAMEBED3##*/})." "Do you want to overwrite this file? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
bed3 & waitbed3
printf "\n${GREEN}${NAMEBED3##*/}${NOCOLOR} file has been overwritten.\n"
break;;
[nN][oO]|[nN] )
printf "\033c"
printf "${GREEN}${NAMEBED3##*/}${NOCOLOR} file present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
bed3 & waitbed3
printf "\n${GREEN}${NAMEBED3##*/}${NOCOLOR} file has been generated.\n"
fi
}
makebed6 () {
NAMEBED6="${NAMEDATA%%.*}.bed6"
bed6 () {
awk 'BEGIN{FS="\t";OFS="\t"}{print $1, $4, $5, $9, $6, $7}' $DATA > $NAMEBED6
for i in $(seq 0 24) ; do
A="${NCNAMES[$i]}"
B="${CHRNAMES[$i]}"
awk 'BEGIN{FS="\t"; OFS="\t"}{split($1, subfield, "."); if (subfield[1]=="'$A'") print "'$B'", $2, $3, $4, $5, $6; else print $0}' $NAMEBED6 > /tmp/${NAMEPROG}_${NAMEBED6}.tmp && sort -h /tmp/${NAMEPROG}_${NAMEBED6}.tmp $NAMEBED6
done
rm /tmp/${NAMEPROG}_${NAMEBED6}.tmp
}
waitbed6 () {
PID=$!
i=0 &
while kill -0 $PID 2>/dev/null; do
i=$(( (i+1) %4 ))
printf "\rCreation of BED6 file in process ${SPIN:$i:1}"
sleep .1
done
printf "\033c"
}
if [[ -f $NAMEBED6 ]]; then
while true; do
printf "%s\n" "The directory already present a BED6 file (${NAMEBED6##*/})." "Do you want to overwrite this file? (Y/n)"
read ANSWER
printf "\n"
case $ANSWER in
[yY][eE][sS]|[yY]|"" )
bed6 & waitbed6
printf "\n${GREEN}${NAMEBED6##*/}${NOCOLOR} file has been overwritten.\n"
break;;
[nN][oO]|[nN] )
printf "\033c"
printf "${GREEN}${NAMEBED6##*/}${NOCOLOR} file present in the directory has not been overwritten.\n"
break;;
* )
printf "%s\n" "" "Please answer yes or no." "";;
esac
done
else
bed6 & waitbed6
printf "\n${GREEN}${NAMEBED6##*/}${NOCOLOR} file has been generated.\n"
fi
}
printf "\033c"
printf "%s\n" "Which can of BED to you want to create with '${DATA##*/}'? (bed3 - bed6 - both)"
read ANSWER
case $ANSWER in
[bB][eE][dD][3] )
makebed3
break;;
[bB][eE][dD][6] )
makebed6
break;;
[bB][oO][tT][hH] )
makebed3
makebed6
break;;
* )
printf "%s\n" "" "Please answer bed3, bed6 or both." "";;
esac
done
((r++))
break
else
break
fi
done
done
| true
|
8d2cc6e026f2044cd21b6c28f24575546b3faaab
|
Shell
|
awesomejt/wp-theme-jason
|
/setup/provision.sh
|
UTF-8
| 991
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
apt-get update -y
apt-get upgrade -y
apt-get autoremove -y
apt-get install -y apache2 apache2-utils nano git unzip
apt-get install -y php5 php-pear libapache2-mod-php5 php5-dev php5-gd libssh2-php php5-mcrypt dbconfig-common
apt-get install -y mysql-server php5-mysql
apt-get install -y phpmyadmin
rm -rf /var/www/html
cd /tmp
wget https://wordpress.org/latest.tar.gz
tar -xzvf latest.tar.gz
mv wordpress /var/www/html
cp /vagrant/setup/wp-config.php /var/www/html/wp-config.php
rsync -va /var/www/html/wp-content/uploads/ /vagrant/media/
if [ -d /var/www/html/wp-content/uploads ]; then
rm -rf /var/www/html/wp-content/uploads
fi
ln -s /vagrant/media /var/www/html/wp-content/uploads
rsync -va /var/www/html/wp-content/themes/ /vagrant/themes/
if [ -d /var/www/html/wp-content/themes ]; then
rm -rf /var/www/html/wp-content/themes
fi
ln -s /vagrant/themes /var/www/html/wp-content/themes
mysql -u root < /vagrant/setup/provision.sql
| true
|
543848846bc55c92501b9f6818444c0220198b22
|
Shell
|
fakewaffle/shell-config
|
/bin/pull-request
|
UTF-8
| 870
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
ORIGIN=`git remote -v | grep -i origin | grep -i fetch | cut -d : -f 2 | cut -d / -f 1`
UPSTREAM=`git remote -v | grep -i upstream | grep -i fetch | cut -d : -f 2 | cut -d / -f 1`
if [ "$UPSTREAM" = "" ]; then
echo "An repository named 'upstream' doesn't exist"
echo -n "Type the name of the repository you want to make a pull request to, followed by [ENTER]: "
read UPSTREAM
fi
if [ "$ORIGIN" = "" ]; then
echo "An repository named 'origin' doesn't exist"
echo -n "Type the name of the repository you want to make a pull request from, followed by [ENTER]: "
read ORIGIN
fi
COMPARE=$1
BASE=$2
if [ "$COMPARE" = "" ]; then
COMPARE="master"
fi
if [ "$BASE" = "" ]; then
BASE="master"
fi
echo "Base: ${BASE}"
echo "Compare: ${COMPARE}"
open https://github.com/${UPSTREAM}/${PWD##*/}/compare/${BASE}...${ORIGIN}:${COMPARE}
| true
|
26b4912b74cfce8892ff172bac3b5b293b940aa8
|
Shell
|
heuns2/articulate-ci-demo
|
/ci/tasks/current-app-get-info
|
UTF-8
| 652
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
pwd
env
cf api $PWS_API --skip-ssl-validation
cf login -u $PWS_USER -p $PWS_PWD -o "$PWS_ORG" -s "$PWS_SPACE"
cf apps
set +e
current_app="green"
next_app="blue"
result=`cf apps | grep "$PWS_APP_HOSTNAME.$PWS_APP_DOMAIN" | grep "$PWS_APP_SUFFIX\-\-green" |wc -l || true`
if [ $result -ne 0 ]
then
current_app="green"
next_app="blue"
else
current_app="blue"
next_app="green"
fi
echo "$current_app" > ./current-app-info/current-app.txt
echo "$next_app" > ./current-app-info/next-app.txt
set -xe
echo "Current main app routes to app instance $current_app"
echo "New version of app to be deployed to instance $next_app"
| true
|
d797d7176c7c3c3cc8541a1211d7978d35c9f563
|
Shell
|
twoerth/CH-isochrone
|
/city_cindex.sh
|
UTF-8
| 2,320
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
awk 'BEGIN {
FS=",";
printf("Name,Lat,Lon,CI60,CI50,CI40,CI30,CI20,CI10,Rank,Canton,Name,House2009, House1985, App2009, App1985, Tax2010\n");
while (getline < "coordinates.csv" > 0) {
cities_name[$1] = $1;
cities_lat[$1] = $2 + 0.0;
cities_lon[$1] = $3 + 0.0;
}
while (getline < "rating.csv") {
ratings[$3] = $0;
}
}
function ceil(x) {
return (x == int(x)) ? x : int(x)+1
}
function floor(x) {
y = int(x)
return y > x ? y - 1 : y
}
NR == 2 {
row=0
rows=0;
col=0;
last_lng = 0;
for (i = 2; i <= NF; i++) {
split($i, bb, " ");
cells[i, 1] = bb[1];
cells[i, 2] = bb[2];
cells[i, 3] = bb[3];
cells[i, 4] = bb[4];
if (bb[2] != last_lng && last_lng != 0) {
rows = row;
row = 0;
col++;
}
for (city in cities_name) {
lat = cities_lat[city];
lng = cities_lon[city];
#print city " " lat " " lng;
if ((lat <= bb[1]) && (lat >= bb[3]) && (lng >= bb[2]) && (lng <= bb[4])) {
cities_line[city] = col " " row;
#print city "=" col " " row;
}
}
row++;
last_lng = bb[2];
}
#rows++;
}
NR > 2 {
for (city in cities_name) {
#print "Searching " city " " cities_line[city];
if (cities_line[city] == $1) {
# create index
for (i = 1; i <=6; i++) {
t[i] = 0;
}
for (i = 2; i < NF; i++) {
if (length($i) > 0) {
min = ($i == 0) ? 1 : ceil($i / 10);
for (x = 1; x <= min; x++) {
t[x]++;
}
}
}
printf("\"%s\",%f,%f,%d,%d,%d,%d,%d,%d,%s\n", city, cities_lat[city], cities_lon[city], t[1], t[2], t[3], t[4], t[5], t[6], ratings[city]);
}
}
}
END {
}' $1 #| awk '
exit
BEGIN {
FS=","
}
{
ul_lat=$2 + 0.0;
ul_lon=$3 + 0.0;
lr_lat=$4 + 0.0;
lr_lon=$5 + 0.0;
found = 0;
for (city in cities_name) {
lat = cities_lat[city];
lon = cities_lon[city];
# print city, ul_lat, cities_lat[city], lr_lat, " | ", ul_lon, lon, lr_lon, (ul_lat >= lat && lr_lat <= lat && ul_lon <= lon), (lr_lon >= lon);
if (ul_lat >= lat && lr_lat <= lat && ul_lon <= lon && lr_lon >= lon) {
printf("\"%s\",%f,%f,%d,%d,%d,%d,%d,%d,%s\n", city, cities_lat[city], cities_lon[city], $6, $7, $8, $9, $10, $11, ratings[city]);
found = 1;
}
}
}'
| true
|
17f8fb4470a31eacee3c0a6c0aaed7d6790c29be
|
Shell
|
neomutt/test-docker
|
/generate-dated-emails.sh
|
UTF-8
| 1,165
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
FRUIT=(apple banana cherry damson elderberry fig guava hawthorn ilama jackfruit kumquat lemon mango nectarine olive papaya quince raspberry strawberry tangerine ugli vanilla wolfberry xigua yew ziziphus)
COUNT=20
DICT="/usr/share/dict/words"
WORDS=($(grep "^[a-z]\{6\}$" "$DICT" | shuf | head -n $COUNT))
mkdir -p {cur,new,tmp}
function create_email()
{
local SECONDS="$1"
local WORD="$2"
local TIME="${3:-12:00:00} +0000 (GMT)"
local DATE TIDY FILE
DATE=$(date -d "@$SECONDS" "+%a, %d %b %Y")
TIDY=$(date -d "@$SECONDS" "+%F")
if [ $((RANDOM%2)) -eq 1 ]; then
FILE="cur/${SECONDS}.$RANDOM:2,S"
else
FILE="new/${SECONDS}.$RANDOM:2"
fi
cat > "$FILE" <<-EOF
From: $WORD <$WORD@flatcap.org>
To: rich@flatcap.org
Subject: (date) $TIDY $TIME
Date: $DATE $TIME
apple
banana
cherry
EOF
}
for ((i = 2; i < $COUNT; i++)); do
W=${WORDS[$i]}
SECONDS=$(date -d "$i days ago" "+%s")
create_email $SECONDS $W
done
WORDS=($(grep "^[a-z]\{6\}$" "$DICT" | shuf | head -n 48))
for ((i = 1; i < 48; i++)); do
W=${WORDS[$i]}
SECONDS=$(date -d "$i hours ago" "+%s")
TIME=$(date -d "@$SECONDS" "+%H:00:00")
create_email $SECONDS $W $TIME
done
| true
|
dce954e966aa37c982efdba58bed1ee41566d1c3
|
Shell
|
robdew/fieldcam
|
/take_pic.sh
|
UTF-8
| 1,616
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
###############TAKEPIC.SH
#!/bin/bash
# sudo fswebcam --log ~/camlog.txt -d /dev/video0 -r 1600x1200 pics/`date '+%Y.%m.%d-%H.%M.%S'`.jpg
# sudo fswebcam --log ~/camlog.txt -d /dev/video0 pics/`date '+%Y.%m.%d-%H.%M.%S'`.jpg
#sudo raspistill -w 1024 -h 768 -o pics/`date '+%Y.%m.%d-%H.%M.%S'`.jpg
#sudo raspistill -o pics/`date '+%Y.%m.%d-%H.%M.%S'`.jpg
# sudo raspistill -w 1024 -h 768 -o latest.jpg
# In lawrence kansas the longest day is 5/21
# Sunrise: 5:56
# Sunset: 20:50
# We will take a picture if the time is between 5 am and 9:30 pm
#cleanup
mkdir ~/pics
rm ~/pics/latest.jpg; rm ~/pics/snap1.bmp
TIME=`date +%H`
if [ $TIME -ge 5 ] && [ $TIME -le 22 ]; then
#take a pic, write out exif data to pic
# sudo raspistill -w 1024 -h 768 -x -n -e bmp -o ~/pics/snap1.bmp
sudo raspistill -w 1600 -h 1200 -x -n -e bmp -o ~/pics/snap1.bmp
# convert the pic to a jpg and annotate the corner with a datestamp
convert ~/pics/snap1.bmp -pointsize 40 -gravity NorthEast -annotate 0 "`date`" ~/pics/latest.jpg
# copy the file to the server
sudo chown pi ~/pics/*
# rsync --progress -azh -e "ssh -p 21098 -i /home/dewey/.ssh/dewey-picam_rsa" ~/pics/latest.jpg robdhuus@robdewhirst.com:~/public_html/cam/latest.jpg
# make a copy of the file on server without xferring it again
# ssh -p 21098 -i /home/dewey/.ssh/dewey-picam_rsa robdhuus@robdewhirst.com "cp ~/public_html/cam/latest.jpg ~/public_html/cam/old/`date '+%Y.%m.%d-%H.%M.%S'`.jpg"
#send a copy to JMM webserver
ncftpput -u $username -p $password jayhawkmodelmasters.com images/webcam /home/pi/pics/latest.jpg
fi
#######################################
| true
|
c7b1652d475be85431df15a3415c701410f23174
|
Shell
|
javierjulio/laptop
|
/dotfiles/bin/repeat-until-exit
|
UTF-8
| 586
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
count=0
max_count=500
if [ "$#" -eq 0 ]; then
echo "Usage: repeat-until-exit <command>"
echo ""
echo "Run a given command repeatedly until it exits or ${max_count} iterations reached."
echo ""
echo "Example:"
echo " repeat-until-exit bin/rspec spec/requests/users_spec.rb:9"
exit 1
fi
while "$@"
do
(( count++ ))
echo
echo "Run $count finished."
echo "--------------------"
[ "$count" -eq $max_count ] && break
done
echo "Exited after $count times."
if [ "$count" -eq $max_count ]; then
echo "Max iteration of $max_count reached."
fi
| true
|
98813190f5491e0fabcfa6558b99135feb5b7659
|
Shell
|
Enterprise-Group-Ltd/aws-autoscaling-suspend-resume
|
/aws-asg-suspend-resume.sh
|
UTF-8
| 70,760
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
#
#
# ------------------------------------------------------------------------------------
#
# MIT License
#
# Copyright (c) 2017 Enterprise Group, Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------------
#
# File: aws-asg-suspend-resume.sh
#
script_version=1.0.72
#
# Dependencies:
# - bash shell
# - jq - JSON wrangler https://stedolan.github.io/jq/
# - AWS CLI tools (pre-installed on AWS AMIs)
# - AWS CLI profile with IAM permissions for the following AWS CLI commands:
# * autoscaling describe-auto-scaling-groups
# * autoscaling suspend-processes
# * autoscaling resume-processes
# * sts get-caller-identity
#
#
# Tested on:
# Windows Subsystem for Linux (WSL)
# OS Build: 15063.540
# bash.exe version: 10.0.15063.0
# Ubuntu 16.04
# GNU bash, version 4.3.48(1)
# jq 1.5-1-a5b5cbe
# aws-cli/1.11.134 Python/2.7.12 Linux/4.4.0-43-Microsoft botocore/1.6.1
#
# AWS EC2
# Amazon Linux AMI release 2017.03
# Linux 4.9.43-17.38.amzn1.x86_64
# GNU bash, version 4.2.46(2)
# jq-1.5
# aws-cli/1.11.133 Python/2.7.12 Linux/4.9.43-17.38.amzn1.x86_64 botocore/1.6.0
#
#
# By: Douglas Hackney
# https://github.com/dhackney
#
# Type: AWS utility
# Description:
# This shell script suspends or resumes autoscaling group processes
#
#
# Roadmap:
# * add -r region
# * add -r all regions
#
#
#
###############################################################################
#
# set the environmental variables
#
set -o pipefail
#
###############################################################################
#
# initialize the script variables
#
asg_modify_action=""
asg_modify_action_name=""
autoscaling_group_name=""
choices=""
cli_profile=""
count_asg_modify_process=0
count_cli_profile=0
count_error_lines=0
count_script_version_length=0
count_text_header_length=0
count_text_block_length=0
count_text_width_menu=0
count_text_width_header=0
count_text_side_length_menu=0
count_text_side_length_header=0
count_text_bar_menu=0
count_text_bar_header=0
count_this_file_tasks=0
counter_asg_modify_process=0
counter_report=0
counter_this_file_tasks=0
date_file=="$(date +"%Y-%m-%d-%H%M%S")"
date_now="$(date +"%Y-%m-%d-%H%M%S")"
_empty=""
_empty_task=""
_empty_task_sub=""
error_line_aws=""
error_line_pipeline=""
feed_write_log=""
filebytes_asg_list_asg_modify_process_txt=0
_fill=""
_fill_task=""
_fill_task_sub=""
full_path=""
let_done=""
let_done_task=""
let_done_task_sub=""
let_left=""
let_left_task=""
let_left_task_sub=""
let_progress=""
let_progress_task=""
let_progress_task_sub=""
list_asg_modify_process=""
logging=""
parameter1=""
paramter2=""
text_header=""
text_bar_menu_build=""
text_bar_header_build=""
text_side_menu=""
text_side_header=""
text_menu=""
text_menu_bar=""
text_header=""
text_header_bar=""
this_aws_account=""
this_aws_account_alias=""
this_file=""
this_log=""
thislogdate=""
this_log_file=""
this_log_file_errors=""
this_log_file_errors_full_path=""
this_log_file_full_path=""
this_log_temp_file_full_path=""
this_path=""
this_summary_report=""
this_summary_report_full_path=""
this_user=""
verbose=""
write_path=""
#
###############################################################################
#
#
# load the baseline variables
#
this_utility_acronym="asg"
this_utility_filename_plug="asg-process"
this_path="$(pwd)"
this_file="$(basename "$0")"
full_path="${this_path}"/"$this_file"
this_log_temp_file_full_path="$this_path"/aws-"$this_utility_filename_plug"-log-temp.log
this_user="$(whoami)"
date_file="$(date +"%Y-%m-%d-%H%M%S")"
count_this_file_tasks="$(cat "$full_path" | grep -c "\-\-\- begin\: " )"
counter_this_file_tasks=0
logging="n"
#
###############################################################################
#
# initialize the temp log file
#
echo "" > "$this_log_temp_file_full_path"
#
#
##############################################################################################################33
# Function definition begin
##############################################################################################################33
#
#
# Functions definitions
#
#######################################################################
#
#
# function to display the usage
#
function fnUsage()
{
echo ""
echo " ---------------------------------- AWS Autoscale Suspend / Resume utility usage -------------------------------------"
echo ""
echo " This utility suspends or resumes Autoscaling Group processes "
echo ""
echo " This script will: "
echo " * Suspend the processes in one or more Autoscaling Group(s) "
echo " * Resume the processes in one or more Autoscaling Group(s) "
echo ""
echo "----------------------------------------------------------------------------------------------------------------------"
echo ""
echo " usage:"
echo " aws-asg-suspend-resume.sh -a u -n all -p myAWSCLIprofile "
echo ""
echo " Optional parameters: -b y -g y "
echo ""
echo " Where: "
echo " -a - Modification action to apply - suspend or resume autoscaling. Enter s for suspend or u for resume. "
echo " Example: -a u "
echo ""
echo " -n - Name of the Autoscaling Group(s) to suspend or resume. Enter partial text to match similar "
echo " Autoscaling Group names. Enter 'all' for all Autoscaling Groups."
echo " Example: -n myAutoscalingGroupName "
echo " Example: -n myAuto "
echo " Example: -n all "
echo ""
echo " -p - Name of the AWS CLI cli_profile (i.e. what you would pass to the --profile parameter in an AWS CLI command)"
echo " Example: -p myAWSCLIprofile "
echo ""
echo " -b - Verbose console output. Set to 'y' for verbose console output. Note: verbose mode can be slow."
echo " Example: -b y "
echo ""
echo " -g - Logging on / off. Default is off. Set to 'y' to create a debug log. Note: logging mode can be slower. "
echo " Example: -g y "
echo ""
echo " -h - Display this message"
echo " Example: -h "
echo ""
echo " ---version - Display the script version"
echo " Example: --version "
echo ""
echo ""
exit 1
}
#
#######################################################################
#
#
# function to echo the progress bar to the console
#
# source: https://stackoverflow.com/questions/238073/how-to-add-a-progress-bar-to-a-shell-script
#
# 1. Create ProgressBar function
# 1.1 Input is currentState($1) and totalState($2)
function fnProgressBar()
{
# Process data
let _progress=(${1}*100/"${2}"*100)/100
let _done=(${_progress}*4)/10
let _left=40-"$_done"
# Build progressbar string lengths
_fill="$(printf "%${_done}s")"
_empty="$(printf "%${_left}s")"
#
# 1.2 Build progressbar strings and print the ProgressBar line
# 1.2.1 Output example:
# 1.2.1.1 Progress : [########################################] 100%
printf "\r Overall Progress : [${_fill// /#}${_empty// /-}] ${_progress}%%"
}
#
#######################################################################
#
#
# function to update the task progress bar
#
# source: https://stackoverflow.com/questions/238073/how-to-add-a-progress-bar-to-a-shell-script
#
# 1. Create ProgressBar function
# 1.1 Input is currentState($1) and totalState($2)
function fnProgressBarTask()
{
# Process data
let _progress_task=(${1}*100/"${2}"*100)/100
let _done_task=(${_progress_task}*4)/10
let _left_task=40-"$_done_task"
# Build progressbar string lengths
_fill_task="$(printf "%${_done_task}s")"
_empty_task="$(printf "%${_left_task}s")"
#
# 1.2 Build progressbar strings and print the ProgressBar line
# 1.2.1 Output example:
# 1.2.1.1 Progress : [########################################] 100%
printf "\r Task Progress : [${_fill_task// /#}${_empty_task// /-}] ${_progress_task}%%"
}
#
#######################################################################
#
#
# function to update the subtask progress bar
#
# source: https://stackoverflow.com/questions/238073/how-to-add-a-progress-bar-to-a-shell-script
#
# 1. Create ProgressBar function
# 1.1 Input is currentState($1) and totalState($2)
function fnProgressBarTaskSub()
{
# Process data
let _progress_task_sub=(${1}*100/"${2}"*100)/100
let _done_task_sub=(${_progress_task_sub}*4)/10
let _left_task_sub=40-"$_done_task_sub"
# Build progressbar string lengths
_fill_task_sub="$(printf "%${_done_task_sub}s")"
_empty_task_sub="$(printf "%${_left_task_sub}s")"
#
# 1.2 Build progressbar strings and print the ProgressBar line
# 1.2.1 Output example:
# 1.2.1.1 Progress : [########################################] 100%
printf "\r Sub-Task Progress : [${_fill_task_sub// /#}${_empty_task_sub// /-}] ${_progress_task_sub}%%"
}
#
#######################################################################
#
#
# function to display the task progress bar on the console
#
# parameter 1 = counter
# paramter 2 = count
#
function fnProgressBarTaskDisplay()
{
fnWriteLog ${LINENO} level_0 " ---------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnProgressBarTask "$1" "$2"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ---------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
}
#
#######################################################################
#
#
# function to display the task progress bar on the console
#
# parameter 1 = counter
# paramter 2 = count
#
function fnProgressBarTaskSubDisplay()
{
fnWriteLog ${LINENO} level_0 " ---------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnProgressBarTaskSub "$1" "$2"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ---------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
}
#
#######################################################################
#
#
# function to echo the header to the console
#
function fnHeader()
{
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} "--------------------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "--------------------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_header"
fnWriteLog ${LINENO} level_0 ""
fnProgressBar ${counter_this_file_tasks} ${count_this_file_tasks}
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_header_bar"
fnWriteLog ${LINENO} level_0 ""
}
#
#######################################################################
#
#
# function to echo to the console and write to the log file
#
function fnWriteLog()
{
# clear IFS parser
IFS=
# write the output to the console
fnOutputConsole "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"
# if logging is enabled, then write to the log
if [[ ("$logging" = "y") || ("$logging" = "z") ]] ;
then
# write the output to the log
fnOutputLog "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"
fi
# reset IFS parser to default values
unset IFS
}
#
#######################################################################
#
#
# function to echo to the console
#
function fnOutputConsole()
{
#
# console output section
#
# test for verbose
if [ "$verbose" = "y" ] ;
then
# if verbose console output then
# echo everything to the console
#
# strip the leading 'level_0'
if [ "$2" = "level_0" ] ;
then
# if the line is tagged for display in non-verbose mode
# then echo the line to the console without the leading 'level_0'
echo " Line: "$1" "$3" "$4" "$5" "$6" "$7" "$8" "$9""
else
# if a normal line echo all to the console
echo " Line: "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9""
fi
else
# test for minimum console output
if [ "$2" = "level_0" ] ;
then
# echo ""
# echo "console output no -v: the logic test for level_0 was true"
# echo ""
# if the line is tagged for display in non-verbose mode
# then echo the line to the console without the leading 'level_0'
echo " "$3" "$4" "$5" "$6" "$7" "$8" "$9""
fi
fi
#
#
}
#
#######################################################################
#
#
# function to write to the log file
#
function fnOutputLog()
{
# log output section
#
# load the timestamp
thislogdate="$(date +"%Y-%m-%d-%H:%M:%S")"
#
# ----------------------------------------------------------
#
# normal logging
#
# append the line to the log variable
# the variable is written to the log file on exit by function fnWriteLogFile
#
# if the script is crashing then comment out this section and enable the
# section below "use this logging for debug"
#
if [ "$2" = "level_0" ] ;
then
# if the line is tagged for logging in non-verbose mode
# then write the line to the log without the leading 'level_0'
this_log+="$(echo "${thislogdate} Line: "$1" "$3" "$4" "$5" "$6" "$7" "$8" "$9"" 2>&1)"
else
# if a normal line write the entire set to the log
this_log+="$(echo "${thislogdate} Line: "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"" 2>&1)"
fi
#
# append the new line
# do not quote the following variable: $'\n'
this_log+=$'\n'
#
#
# ---------------------------------------------------------
#
# 'use this for debugging' - debug logging
#
# if the script is crashing then enable this logging section and
# comment out the prior logging into the 'this_log' variable
#
# note that this form of logging is VERY slow
#
# write to the log file with a prefix timestamp
# echo "${thislogdate} Line: "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"" 2>&1 >> "$this_log_file_full_path"
#
#
}
#
#######################################################################
#
#
# function to append the log variable to the temp log file
#
function fnWriteLogTempFile()
{
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "Appending the log variable to the temp log file"
fnWriteLog ${LINENO} ""
echo "$this_log" >> "$this_log_temp_file_full_path"
# empty the temp log variable
this_log=""
}
#
#######################################################################
#
#
# function to write log variable to the log file
#
function fnWriteLogFile()
{
# append the temp log file onto the log file
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} "Writing temp log to log file"
fnWriteLog ${LINENO} "Value of variable 'this_log_temp_file_full_path': "
fnWriteLog ${LINENO} "$this_log_temp_file_full_path"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "Value of variable 'this_log_file_full_path': "
fnWriteLog ${LINENO} "$this_log_file_full_path"
fnWriteLog ${LINENO} level_0 ""
# write the contents of the variable to the temp log file
fnWriteLogTempFile
cat "$this_log_temp_file_full_path" >> "$this_log_file_full_path"
echo "" >> "$this_log_file_full_path"
echo "Log end" >> "$this_log_file_full_path"
# delete the temp log file
rm -f "$this_log_temp_file_full_path"
}
#
##########################################################################
#
#
# function to delete the work files
#
function fnDeleteWorkFiles()
{
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "in delete work files "
fnWriteLog ${LINENO} "value of variable 'verbose': "$verbose" "
fnWriteLog ${LINENO} ""
if [ "$verbose" != "y" ] ;
then
# if not verbose console output then delete the work files
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "In non-verbose mode: Deleting work files"
fnWriteLog ${LINENO} ""
feed_write_log="$(rm -f ./"$this_utility_acronym"-* 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
feed_write_log="$(rm -f ./"$this_utility_acronym"_* 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'this_log_file_full_path' "$this_log_file_full_path" "
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
# if no errors, then delete the error log file
count_error_lines="$(cat "$this_log_file_errors_full_path" | wc -l)"
if (( "$count_error_lines" < 3 ))
then
rm -f "$this_log_file_errors_full_path"
fi
else
# in verbose mode so preserve the work files
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "In verbose mode: Preserving work files "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "work files are here: "$this_path" "
fnWriteLog ${LINENO} level_0 ""
fi
}
#
##########################################################################
#
#
# function to log non-fatal errors
#
function fnErrorLog()
{
fnWriteLog ${LINENO} level_0 "-----------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Error message: "
fnWriteLog ${LINENO} level_0 " "$feed_write_log" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-----------------------------------------------------------------------------------------------------"
echo "-----------------------------------------------------------------------------------------------------" >> "$this_log_file_errors_full_path"
echo "" >> "$this_log_file_errors_full_path"
echo " Error message: " >> "$this_log_file_errors_full_path"
echo " "$feed_write_log"" >> "$this_log_file_errors_full_path"
echo "" >> "$this_log_file_errors_full_path"
echo "-----------------------------------------------------------------------------------------------------" >> "$this_log_file_errors_full_path"
}
#
##########################################################################
#
#
# function to handle command or pipeline errors
#
function fnErrorPipeline()
{
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " System Error while running the previous command or pipeline "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Please check the error message above "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Error at script line number: "$error_line_pipeline" "
fnWriteLog ${LINENO} level_0 ""
if [[ "$logging" == "y" ]] ;
then
fnWriteLog ${LINENO} level_0 " The log will also show the error message and other environment, variable and diagnostic information "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " The log is located here: "
fnWriteLog ${LINENO} level_0 " "$this_log_file_full_path""
fi
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Exiting the script"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-----------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
# append the temp log onto the log file
fnWriteLogTempFile
# write the log variable to the log file
fnWriteLogFile
exit 1
}
#
##########################################################################
#
#
# function for AWS CLI errors
#
function fnErrorAws()
{
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " AWS Error while executing AWS CLI command"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Please check the AWS error message above "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Error at script line number: "$error_line_aws" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " The log will also show the AWS error message and other diagnostic information "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " The log is located here: "
fnWriteLog ${LINENO} level_0 " "$this_log_file_full_path""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Exiting the script"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
# append the temp log onto the log file
fnWriteLogTempFile
# write the log variable to the log file
fnWriteLogFile
exit 1
}
#
##########################################################################
#
#
# function for jq errors
#
function fnErrorJq()
{
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Error at script line number: "$error_line_jq" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " There was a jq error while processing JSON "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Please check the jq error message above "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " The log will also show the jq error message and other diagnostic information "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " The log is located here: "
fnWriteLog ${LINENO} level_0 " "$this_log_file_full_path""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Exiting the script"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
# append the temp log onto the log file
fnWriteLogTempFile
# write the log variable to the log file
fnWriteLogFile
exit 1
}
#
##########################################################################
#
#
# function to increment the ASG modify process counter
#
function fnCounterIncrementAsgModifyProcess()
{
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "increment the ASG modify process counter: 'counter_asg_modify_process'"
counter_asg_modify_process="$((counter_asg_modify_process+1))"
fnWriteLog ${LINENO} "post-increment value of variable 'counter_asg_modify_process': "$counter_asg_modify_process" "
fnWriteLog ${LINENO} ""
#
}
#
##########################################################################
#
#
# function to increment the task counter
#
function fnCounterIncrementTask()
{
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "increment the task counter"
counter_this_file_tasks="$((counter_this_file_tasks+1))"
fnWriteLog ${LINENO} "value of variable 'counter_this_file_tasks': "$counter_this_file_tasks" "
fnWriteLog ${LINENO} "value of variable 'count_this_file_tasks': "$count_this_file_tasks" "
fnWriteLog ${LINENO} ""
}
#
##########################################################################
#
#
# function to increment the report counter
#
function fnCounterIncrementReport()
{
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "increment the report counter: 'counter_report'"
counter_report="$((counter_report+1))"
fnWriteLog ${LINENO} "post-increment value of variable 'counter_report': "$counter_report" "
fnWriteLog ${LINENO} ""
#
}
#
##############################################################################################################33
# Function definition end
##############################################################################################################33
#
#
###########################################################################################################################
#
#
# enable logging to capture initial segments
#
logging="z"
#
###########################################################################################################################
#
#
# build the menu and header text line and bars
#
text_header='Autoscaling Group Suspend / Resume Utility v'
count_script_version_length=${#script_version}
count_text_header_length=${#text_header}
count_text_block_length=$(( count_script_version_length + count_text_header_length ))
count_text_width_menu=104
count_text_width_header=83
count_text_side_length_menu=$(( (count_text_width_menu - count_text_block_length) / 2 ))
count_text_side_length_header=$(( (count_text_width_header - count_text_block_length) / 2 ))
count_text_bar_menu=$(( (count_text_side_length_menu * 2) + count_text_block_length + 2 ))
count_text_bar_header=$(( (count_text_side_length_header * 2) + count_text_block_length + 2 ))
# source and explanation for the following use of printf is here: https://stackoverflow.com/questions/5799303/print-a-character-repeatedly-in-bash
text_bar_menu_build="$(printf '%0.s-' $(seq 1 "$count_text_bar_menu") )"
text_bar_header_build="$(printf '%0.s-' $(seq 1 "$count_text_bar_header") )"
text_side_menu="$(printf '%0.s-' $(seq 1 "$count_text_side_length_menu") )"
text_side_header="$(printf '%0.s-' $(seq 1 "$count_text_side_length_header") )"
text_menu="$(echo "$text_side_menu"" ""$text_header""$script_version"" ""$text_side_menu")"
text_menu_bar="$(echo "$text_bar_menu_build")"
text_header="$(echo " ""$text_side_header"" ""$text_header""$script_version"" ""$text_side_header")"
text_header_bar="$(echo " ""$text_bar_header_build")"
#
###########################################################################################################################
#
#
# display initializing message
#
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_header"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " This utility suspends or resumes AWS AutoScaling Group processes "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " This script will: "
fnWriteLog ${LINENO} level_0 " - Suspend or resume the processes for Autoscaling Groups "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_header_bar"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Please wait "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Checking the input parameters and initializing the app "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Depending on connection speed and AWS API response, this can take "
fnWriteLog ${LINENO} level_0 " from a few seconds to a few minutes "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Status messages and opening menu will appear below"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_header_bar"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
#
#
###################################################
#
#
# check command line parameters
# check for -h
#
if [[ "$1" = "-h" ]] ; then
clear
fnUsage
fi
#
###################################################
#
#
# check command line parameters
# check for --version
#
if [[ "$1" = "--version" ]] ;
then
clear
echo ""
echo "'AWS Autoscaling Group Suspend / Resume' utility script version: "$script_version" "
echo ""
exit
fi
#
###################################################
#
#
# check command line parameters
# if less than 2, then display the usage
#
if [[ "$#" -lt 6 ]] ; then
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ERROR: You did not enter all of the required parameters "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " You must provide values for all three parameters: -p -a -n "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Example: "$0" -p MyProfileName -a s -n myAutoscalingGroupName "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnUsage
fi
#
###################################################
#
#
# check command line parameters
# if too many parameters, then display the error message and useage
#
if [[ "$#" -gt 10 ]] ; then
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ERROR: You entered too many parameters"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " You must provide only one value for all parameters: -p -a -n -b -g "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Example: "$0" -p MyProfileName -a r -n all -b y -g y "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnUsage
fi
#
###################################################
#
#
# parameter values
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable '@': "$@" "
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of parameter '1' "$1" "
fnWriteLog ${LINENO} "value of parameter '2' "$2" "
fnWriteLog ${LINENO} "value of parameter '3' "$3" "
fnWriteLog ${LINENO} "value of parameter '4' "$4" "
fnWriteLog ${LINENO} "value of parameter '5' "$5" "
fnWriteLog ${LINENO} "value of parameter '6' "$6" "
#
###################################################
#
#
# load the main loop variables from the command line parameters
#
while getopts "p:a:n:b:g:h" opt;
do
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable '@': "$@" "
fnWriteLog ${LINENO} "value of variable 'opt': "$opt" "
fnWriteLog ${LINENO} "value of variable 'OPTIND': "$OPTIND" "
fnWriteLog ${LINENO} ""
#
case "$opt" in
p)
cli_profile="$OPTARG"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of -p 'cli_profile': "$cli_profile" "
;;
a)
asg_modify_action="$OPTARG"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of -a 'asg_modify_action': "$asg_modify_action" "
;;
n)
autoscaling_group_name="$OPTARG"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of -n 'autoscaling_group_name': "$autoscaling_group_name" "
;;
b)
verbose="$OPTARG"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of -b 'verbose': "$verbose" "
;;
g)
logging="$OPTARG"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of -g 'logging': "$logging" "
;;
h)
fnUsage
;;
\?)
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "---------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ERROR: You entered an invalid option."
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Invalid option: -"$OPTARG""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "---------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnUsage
;;
esac
done
#
###################################################
#
#
# check logging variable
#
#
###################################################
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable '@': "$@" "
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'logging': "$logging" "
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "if logging not enabled by parameter, then disabling logging "
if [[ "$logging" != "y" ]] ;
then
logging="n"
fi
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'logging': "$logging" "
fnWriteLog ${LINENO} ""
#
# parameter values
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'cli_profile' "$cli_profile" "
fnWriteLog ${LINENO} "value of variable 'verbose' "$verbose" "
fnWriteLog ${LINENO} "value of variable 'logging' "$logging" "
#
###################################################
#
#
# disable logging if not set by the -g parameter
#
if [[ "$logging" != "y" ]] ;
then
logging="n"
fi
#
###################################################
#
#
# check command line parameters
# check for valid AWS CLI profile
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "count the available AWS CLI profiles that match the -p parameter profile name "
count_cli_profile="$(cat /home/"$this_user"/.aws/config | grep -c "$cli_profile")"
# if no match, then display the error message and the available AWS CLI profiles
if [[ "$count_cli_profile" -ne 1 ]]
then
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ERROR: You entered an invalid AWS CLI profile: "$cli_profile" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Available cli_profiles are:"
cli_profile_available="$(cat /home/"$this_user"/.aws/config | grep "\[profile" 2>&1)"
#
# check for command / pipeline error(s)
if [ "$?" -ne 0 ]
then
#
# set the command/pipeline error line number
error_line_pipeline="$((${LINENO}-7))"
#
# call the command / pipeline error function
fnErrorPipeline
#
#
fi
#
fnWriteLog ${LINENO} "value of variable 'cli_profile_available': "$cli_profile_available ""
feed_write_log="$(echo " "$cli_profile_available"" 2>&1)"
fnWriteLog ${LINENO} level_0 "$feed_write_log"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " To set up an AWS CLI profile enter: aws configure --profile profileName "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Example: aws configure --profile MyProfileName "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnUsage
fi
#
#
###################################################
#
#
# pull the AWS account number
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "pulling AWS account"
this_aws_account="$(aws sts get-caller-identity --profile "$cli_profile" --output text --query 'Account')"
fnWriteLog ${LINENO} "value of variable 'this_aws_account': "$this_aws_account" "
fnWriteLog ${LINENO} ""
#
###################################################
#
#
# set the aws account dependent variables
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "setting the AWS account dependent variables"
#
write_path="$this_path"/aws-"$this_aws_account"-"$this_utility_filename_plug"-"$date_file"
this_log_file=aws-"$this_aws_account"-"$this_utility_filename_plug"-v"$script_version"-"$date_file"-debug.log
this_log_file_errors=aws-"$this_aws_account"-"$this_utility_filename_plug"-v"$script_version"-"$date_file"-errors.log
this_log_file_full_path="$write_path"/"$this_log_file"
this_log_file_errors_full_path="$write_path"/"$this_log_file_errors"
this_summary_report=aws-"$this_aws_account"-"$this_utility_filename_plug"-"$date_file"-summary-report.txt
this_summary_report_full_path="$write_path"/"$this_summary_report"
#
fnWriteLog ${LINENO} "value of variable 'write_path': "$write_path" "
fnWriteLog ${LINENO} "value of variable 'this_log_file': "$this_log_file" "
fnWriteLog ${LINENO} "value of variable 'this_log_file_errors': "$this_log_file_errors" "
fnWriteLog ${LINENO} "value of variable 'this_log_file_full_path': ""$this_log_file_full_path"" "
fnWriteLog ${LINENO} "value of variable 'this_log_file_errors_full_path': "$this_log_file_errors_full_path" "
fnWriteLog ${LINENO} "value of variable 'this_summary_report': "$this_summary_report" "
fnWriteLog ${LINENO} "value of variable 'this_summary_report_full_path': "$this_summary_report_full_path" "
fnWriteLog ${LINENO} ""
#
###################################################
#
#
# create the directories
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "creating write path directories "
feed_write_log="$(mkdir -p "$write_path" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "status of write path directory "
feed_write_log="$(ls -ld */ "$this_path" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
###################################################
#
#
# pull the AWS account alias
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "pulling AWS account alias"
this_aws_account_alias="$(aws iam list-account-aliases --profile "$cli_profile" --output text --query 'AccountAliases' )"
fnWriteLog ${LINENO} "value of variable 'this_aws_account_alias': "$this_aws_account_alias" "
fnWriteLog ${LINENO} ""
#
###############################################################################
#
#
# Initialize the log file
#
if [[ "$logging" = "y" ]] ;
then
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "initializing the log file "
fnWriteLog ${LINENO} ""
echo "Log start" > "$this_log_file_full_path"
echo "" >> "$this_log_file_full_path"
echo "This log file name: "$this_log_file"" >> "$this_log_file_full_path"
echo "" >> "$this_log_file_full_path"
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "contents of file:'$this_log_file_full_path' "
feed_write_log="$(cat "$this_log_file_full_path" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
fi
#
###############################################################################
#
#
# Initialize the error log file
#
echo " Errors:" > "$this_log_file_errors_full_path"
echo "" >> "$this_log_file_errors_full_path"
#
###############################################################################
#
#
# Set the action name
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "setting the action name "
if [[ "$asg_modify_action" = "u" ]] ;
then
asg_modify_action_name="resume"
#
elif [[ "$asg_modify_action" = 's' ]] ;
then
asg_modify_action_name="suspend"
#
fi
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'asg_modify_action_name': "$asg_modify_action_name" "
fnWriteLog ${LINENO} ""
#
#
#
#
###########################################################################################################################
#
#
# Begin checks and setup
#
#
#
###############################################################################
#
#
# Test the -n value for valid ASGs
#
# test -n section goes here
#
###############################################################################
#
#
# pull the list and number of ASGs to modify
#
#
# pull a list of the ASGs
#
fnWriteLog ${LINENO} "initializing the ASG variable 'list_asg_modify_process' "
list_asg_modify_process=""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'list_asg_modify_process':"
feed_write_log="$(echo "$list_asg_modify_process" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "pulling the list of ASGs to change"
#
for asg_name
in $(aws autoscaling describe-auto-scaling-groups --profile "$cli_profile" --query 'AutoScalingGroups[*].AutoScalingGroupName' --output text) ;
do
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- loop head: pull ASG names ----------------------- "
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} ""
## disabled for speed
## enable for debugging
# fnWriteLog ${LINENO} "pre-append value of variable 'list_asg_modify_process':"
# feed_write_log="$(echo "$list_asg_modify_process" 2>&1)"
# fnWriteLog ${LINENO} "$feed_write_log"
# fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'asg_name':"
feed_write_log="$(echo "$asg_name" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "appending variable 'asg_name' to variable 'list_asg_modify_process':"
list_asg_modify_process+="${asg_name}"
# do not quote the following variable: $'\n'
list_asg_modify_process+=$'\n'
## disabled for speed
## enable for debugging
# fnWriteLog ${LINENO} ""
# fnWriteLog ${LINENO} "post-append value of variable 'list_asg_modify_process':"
# feed_write_log="$(echo "$list_asg_modify_process" 2>&1)"
# fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- loop tail: pull ASG names ----------------------- "
fnWriteLog ${LINENO} ""
#
done
#
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- done: pull ASG names ----------------------- "
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "post-append, pre-filter value of variable 'list_asg_modify_process':"
feed_write_log="$(echo "$list_asg_modify_process" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "filtering the list of ASGs to change if not all"
if [[ "$autoscaling_group_name" != "all" ]] ;
then
# matches the -n parameter anywhere in the ASG name
list_asg_modify_process="$(echo "$list_asg_modify_process" | grep ".*"$autoscaling_group_name".*" 2>&1)"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "post-filtered value of variable 'list_asg_modify_process':"
feed_write_log="$(echo "$list_asg_modify_process" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
fi
#
# write the variable to a file
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "writing variable 'list_asg_modify_process' to file: 'asg_list_asg_modify_process.txt' "
feed_write_log="$(echo "$list_asg_modify_process" > asg_list_asg_modify_process.txt 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} "contents of file: 'asg_list_asg_modify_process.txt' "
feed_write_log="$(cat asg_list_asg_modify_process.txt 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "counting the list of ASGs to change"
# test for empty file with only a line feed
filebytes_asg_list_asg_modify_process_txt="$(stat --printf="%s" asg_list_asg_modify_process.txt )"
if [[ "$filebytes_asg_list_asg_modify_process_txt" -eq 1 ]] ;
then
count_asg_modify_process=0
else
count_asg_modify_process="$(cat asg_list_asg_modify_process.txt | wc -l 2>&1 )"
fi
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "value of variable 'count_asg_names': "$count_asg_modify_process" "
fnWriteLog ${LINENO} ""
#
###################################################
#
#
# check for zero ASGs to modify
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "checking for zero ASGs name match "
if [[ "$count_asg_modify_process" -eq 0 ]] ;
then
clear
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ERROR: No AutoScaling Group name matched parameter: -n "$autoscaling_group_name" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "-------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnUsage
fi
#
###################################################
#
#
# clear the console
#
clear
#
######################################################################################################################################################################
#
#
# Opening menu
#
#
######################################################################################################################################################################
#
#
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_menu"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Suspend / Resume AWS AutoScaling Group processes "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_menu_bar"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "AWS account:............"$this_aws_account" "$this_aws_account_alias" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Autoscaling Group Process modification: "$asg_modify_action_name" "
fnWriteLog ${LINENO} level_0 ""
if [[ "$autoscaling_group_name" != "all" ]] ;
then
fnWriteLog ${LINENO} level_0 "Autoscaling Group names matching or containing this text will be modified: "$autoscaling_group_name" "
else
fnWriteLog ${LINENO} level_0 "All Autoscaling Groups will be modified "
fi
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Count of Autoscaling Groups to "$asg_modify_action_name": "$count_asg_modify_process" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "$text_menu_bar"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "The process modification >>"$asg_modify_action_name"<< will be applied to the Autoscaling Groups "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " ###############################################"
fnWriteLog ${LINENO} level_0 " >> Note: There is no undo for this operation << "
fnWriteLog ${LINENO} level_0 " ###############################################"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " By running this utility script you are taking full responsibility for any and all outcomes"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Autoscaling Group Suspend / Resume utility"
fnWriteLog ${LINENO} level_0 "Run Utility Y/N Menu"
#
# Present a menu to allow the user to exit the utility and do the preliminary steps
#
# Menu code source: https://stackoverflow.com/questions/30182086/how-to-use-goto-statement-in-shell-script
#
# Define the choices to present to the user, which will be
# presented line by line, prefixed by a sequential number
# (E.g., '1) copy', ...)
choices=( 'Run' 'Exit' )
#
# Present the choices.
# The user chooses by entering the *number* before the desired choice.
select choice in "${choices[@]}"; do
#
# If an invalid number was chosen, "$choice" will be empty.
# Report an error and prompt again.
[[ -n "$choice" ]] || { fnWriteLog ${LINENO} level_0 "Invalid choice." >&2; continue; }
#
# Examine the choice.
# Note that it is the choice string itself, not its number
# that is reported in "$choice".
case "$choice" in
Run)
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Running Autoscaling Group Suspend / Resume utility"
fnWriteLog ${LINENO} level_0 ""
# Set flag here, or call function, ...
;;
Exit)
#
#
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Exiting the utility..."
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
# delete the work files
fnDeleteWorkFiles
# append the temp log onto the log file
fnWriteLogTempFile
# write the log variable to the log file
fnWriteLogFile
exit 1
esac
#
# Getting here means that a valid choice was made,
# so break out of the select statement and continue below,
# if desired.
# Note that without an explicit break (or exit) statement,
# bash will continue to prompt.
break
#
# end select - menu
# echo "at done"
done
#
##########################################################################
#
# ********************* begin script *********************
#
##########################################################################
#
##########################################################################
#
#
# ---- begin: write the start timestamp to the log
#
fnHeader
#
date_now="$(date +"%Y-%m-%d-%H%M%S")"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "run start timestamp: "$date_now" "
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} "increment the task counter"
fnCounterIncrementTask
#
fnWriteLog ${LINENO} ""
#
##########################################################################
#
#
# clear the console for the run
#
fnHeader
#
##########################################################################
#
#
# ---- begin: display the log location
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "Run log: "$this_log_file_full_path" "
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
#
#
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} "increment the task counter"
fnCounterIncrementTask
#
fnWriteLog ${LINENO} ""
#
##########################################################################
#
#
# Suspend / resume the ASGs
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "------------------------------------ begin: modify the ASG processes ------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
#
# set the counter
counter_asg_modify_process=0
#
for asg_name_modify in $list_asg_modify_process ;
do
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- loop head: modify ASG processes ----------------------- "
fnWriteLog ${LINENO} ""
#
# display the header
fnHeader
# display the task progress bar
fnProgressBarTaskDisplay "$counter_asg_modify_process" "$count_asg_modify_process"
#
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "This task takes a while. Please wait..."
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Modifying processes to "$asg_modify_action_name" for Autoscaling Group: "
fnWriteLog ${LINENO} level_0 "$asg_name_modify"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "AWS CLI command: aws autoscaling ${asg_modify_action_name}-processes --auto-scaling-group-name "$asg_name_modify" --profile "$cli_profile" "
feed_write_log="$(aws autoscaling ${asg_modify_action_name}-processes --auto-scaling-group-name "$asg_name_modify" --profile "$cli_profile" 2>&1)"
#
# check for errors from the AWS API
if [ "$?" -ne 0 ]
then
# AWS Error while changing the ASG process status
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "AWS error message: "
fnWriteLog ${LINENO} level_0 "$feed_write_log"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " AWS Error while changing the process status to "$asg_modify_action_name" for "$asg_name_modify" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "--------------------------------------------------------------------------------------------------"
#
# set the awserror line number
error_line_aws="$((${LINENO}-18))"
#
# call the AWS error handler
fnErrorAws
#
fi # end non-recursive AWS error
#
fnWriteLog ${LINENO} "$feed_write_log"
fnWriteLog ${LINENO} ""
#
# increment the modify counter
fnCounterIncrementAsgModifyProcess
#
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- loop tail: modify ASG processes ----------------------- "
fnWriteLog ${LINENO} ""
#
# write out the temp log and empty the log variable
fnWriteLogTempFile
#
done
#
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- done: modify ASG processes ----------------------- "
fnWriteLog ${LINENO} ""
#
#
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} "increment the task counter"
fnCounterIncrementTask
#
fnWriteLog ${LINENO} ""
#
# display the header
fnHeader
# display the task progress bar
fnProgressBarTaskDisplay "$counter_asg_modify_process" "$count_asg_modify_process"
#
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Task 'modify AutoScaling Groups processes' complete"
fnWriteLog ${LINENO} level_0 ""
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "------------------------------------- end: modify the ASG processes -------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
#
##########################################################################
#
#
# create the summary report
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "--------------------- begin: print summary report for each Autoscaling Group name ------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnHeader
# load the report variables
#
# initialize the counters
#
#
fnWriteLog ${LINENO} ""
fnHeader
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Creating job summary report file "
fnWriteLog ${LINENO} level_0 ""
# initialize the report file and append the report lines to the file
echo "">"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " ------------------------------------------------------------------------------------------">>"$this_summary_report_full_path"
echo " ------------------------------------------------------------------------------------------">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " AWS AutoScaling Group modify processes to "$asg_modify_action_name" Summary Report">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " Script Version: "$script_version"">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " Date: "$date_file"">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " AWS Account: "$this_aws_account" "$this_aws_account_alias"">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " Autoscaling Group Process modification: "$asg_modify_action_name" ">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " Autoscaling Group name matched: "$autoscaling_group_name" ">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " Number of AutoScaling Groups modified to "$asg_modify_action_name": "$count_asg_modify_process" ">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
if [[ "$logging" == "y" ]] ;
then
echo " AWS AutoScaling Group modify processes to "$asg_modify_action_name" job log file: ">>"$this_summary_report_full_path"
echo " "$write_path"/ ">>"$this_summary_report_full_path"
echo " "$this_log_file" ">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
fi
echo " ------------------------------------------------------------------------------------------">>"$this_summary_report_full_path"
count_error_lines="$(cat "$this_log_file_errors_full_path" | wc -l)"
if (( "$count_error_lines" > 2 ))
then
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
# add the errors to the report
feed_write_log="$(cat "$this_log_file_errors_full_path">>"$this_summary_report_full_path" 2>&1)"
fnWriteLog ${LINENO} "$feed_write_log"
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " ------------------------------------------------------------------------------------------">>"$this_summary_report_full_path"
fi
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
#
# write the process status of the modified ASGs to the report
#
echo " Modified Autoscaling Group processes status:">>"$this_summary_report_full_path"
echo " -----------------------------------------------------------------------">>"$this_summary_report_full_path"
#
# initialize the ASG process status file
echo "">"$this_path"/asg-process-status.json
#
# initialize the report counter
counter_report=0
# load the ASG process status file
for asg_name_process_status in $(echo $list_asg_modify_process) ;
do
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- loop head: create report asg name status ----------------------- "
fnWriteLog ${LINENO} ""
#
# display the header
fnHeader
# display the task progress bar
fnProgressBarTaskDisplay "$counter_report" "$count_asg_modify_process"
#
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Creating job summary report file "
fnWriteLog ${LINENO} level_0 ""
#
# pull the ASG process status from AWS
echo "--------------------------------------------------------------------------------------------------------------">>"$this_path"/asg-process-status.json
aws autoscaling describe-auto-scaling-groups --auto-scaling-group-name "$asg_name_process_status" --profile "$cli_profile" \
| jq '.AutoScalingGroups[] | {AutoScalingGroupName}, {SuspendedProcesses}' \
| tr -d ',"[]{} ' | grep -v '^$' | grep -v "SuspensionReason" | sed 's/ProcessName:/ - /' >>"$this_path"/asg-process-status.json
#
# check for command / pipeline error(s)
if ["$?" -ne 0 ]
then
#
# set the command/pipeline error line number
error_line_pipeline="$((${LINENO}-7))"
#
# call the command / pipeline error function
fnErrorPipeline
#
#
fi
#
echo "--------------------------------------------------------------------------------------------------------------">>"$this_path"/asg-process-status.json
#
# increment the modify counter
fnCounterIncrementReport
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- loop tail: create report asg name status ----------------------- "
fnWriteLog ${LINENO} ""
#
done
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------- done: create report asg name status ----------------------- "
fnWriteLog ${LINENO} ""
#
#
# display the header
fnHeader
# display the task progress bar
fnProgressBarTaskDisplay "$counter_report" "$count_asg_modify_process"
#
# add leading 5 characters to match report margin
cat "$this_path"/asg-process-status.json | sed -e 's/^/ /'>>"$this_summary_report_full_path"
#
#
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo " ------------------------------------------------------------------------------------------">>"$this_summary_report_full_path"
echo " ------------------------------------------------------------------------------------------">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
echo "">>"$this_summary_report_full_path"
#
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Summary report complete. "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "Report is located here: "
fnWriteLog ${LINENO} level_0 "$this_summary_report_full_path"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} "increment the task counter"
fnCounterIncrementTask
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------- end: print summary report for each LC name ---------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
#
##########################################################################
#
#
# delete the work files
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "---------------------------------------- begin: delete work files ----------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnHeader
fnDeleteWorkFiles
fnWriteLog ${LINENO} ""
#
fnWriteLog ${LINENO} "increment the task counter"
fnCounterIncrementTask
#
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------- end: delete work files -----------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "----------------------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} ""
#
##########################################################################
#
#
# done
#
fnHeader
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Job Complete "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 " Summary report location: "
fnWriteLog ${LINENO} level_0 " "$write_path"/ "
fnWriteLog ${LINENO} level_0 " "$this_summary_report" "
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
if [[ "$logging" = "y" ]] ;
then
fnWriteLog ${LINENO} level_0 " Log location: "
fnWriteLog ${LINENO} level_0 " "$write_path"/ "
fnWriteLog ${LINENO} level_0 " "$this_log_file" "
fnWriteLog ${LINENO} level_0 ""
fi
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "----------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 ""
if (( "$count_error_lines" > 2 ))
then
fnWriteLog ${LINENO} level_0 ""
feed_write_log="$(cat "$this_log_file_errors_full_path" 2>&1)"
fnWriteLog ${LINENO} level_0 "$feed_write_log"
fnWriteLog ${LINENO} level_0 ""
fnWriteLog ${LINENO} level_0 "----------------------------------------------------------------------"
fnWriteLog ${LINENO} level_0 ""
fi
#
##########################################################################
#
#
# write the stop timestamp to the log
#
#
date_now="$(date +"%Y-%m-%d-%H%M%S")"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "run end timestamp: "$date_now" "
fnWriteLog ${LINENO} ""
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} "-------------------------------------------------------------------------------------------"
fnWriteLog ${LINENO} ""
#
##########################################################################
#
#
# write the log file
#
if [[ ("$logging" = "y") || ("$logging" = "z") ]]
then
# append the temp log onto the log file
fnWriteLogTempFile
# write the log variable to the log file
fnWriteLogFile
else
# delete the temp log file
rm -f "$this_log_temp_file_full_path"
fi
#
# exit with success
exit 0
#
#
# ------------------ end script ----------------------
| true
|
6dd68bb1bea238a90ba43dcd8104ee5ff8947386
|
Shell
|
Nurdilin/scripts-utilities
|
/functions/func_date.sh
|
UTF-8
| 157
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# source it and run in in another script
# . ./functions/func_date.sh
function _date {
TIME=$(date +%d-%m-%Y_%H.%M.%S)
echo $TIME
}
#_date
| true
|
a940db1272c237679e800ac81d50ef4e6ec89035
|
Shell
|
MW-autocat-script/MW-autocat-script
|
/catscripts/Government/Countries/Mexico/Mexico.sh
|
UTF-8
| 336
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
KEYWORDS_MEXICO="'Mexic(o|an)|Tijuana"
KEYWORDS_MEXICO_EXCLUDE="New(| )Mexic(o|an)"
KEYWORDS_MEXICO_ALL="$KEYWORDS_MEXICO"
if [ "$1" == "" ];
then
debug_start "Mexico"
MEXICO=$(egrep -i "$KEYWORDS_MEXICO" "$NEWPAGES" | egrep -iv "$KEYWORDS_MEXICO_EXCLUDE")
categorize "MEXICO" "Mexico"
debug_end "Mexico"
fi
| true
|
e94306c52d0c775e542ae6f93f98eec76edab9fc
|
Shell
|
pierre-morisse-phd-private/PhD
|
/Guided_Assembly/Tests_et_Résultats/test.sh
|
UTF-8
| 1,388
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
res="scaffolds"
echo ""
echo "$res"
./generateGraphFile.sh "$1" "$2" "$3" 0 10000 0 1 "$4" > log
wc -l "graph_file_$1"
rm mmaps
./getMultimaps "filtered_alignments_$1" mmaps
cut -f 2 mmaps | sort -u > pairable
wc -l pairable
./getLinkedContigs.sh ../../sequences/reads/Ecoli/abyss.fa pairable > log 2> log
#awk '{/>/&&++a||b+=length()}END{print b/a}' ../../sequences/reads/Ecoli/abyss.fa_linked
#awk '{/>/&&++a||b+=length()}END{print b/a}' ../../sequences/reads/Ecoli/abyss.fa_unlinked
#bwa mem ../../../../../sequences/references/ADP1G/CR543861.fasta linked_shortreads.contigs.fa > sh.sam
#python3.4 /home/reads/bin/identity.py sh.sam > sh.id
#echo "STATS CONTIGS PRE TRAITEMENT"
#python3.4 ../../stats.py sh.id shortreads.contigs.fa
#samtools view -Sb sh.sam > sh.bam
#samtools sort sh.bam qry
#samtools depth -q0 -Q0 qry.bam > qry.depth
#wc -l qry.depth
./a.out "graph_file_$1" "$res.fa" ../../sequences/references/EcoliM/MAP006.fasta ../../sequences/reads/Ecoli/abyss.fa ../../sequences/reads/Ecoli/abyss.sam
wc -l "graph_file_$1"
bwa mem ../../sequences/references/EcoliM/EcoliK12.fasta "$res.fa" > "$res.sam"
python3.5 identity.py "$res.sam" > "$res.id"
echo ""
echo "STATS CONTIGS POST TRAITEMENT"
python3.5 stats.py "$res.id" "$res.fa"
samtools view -Sb "$res.sam" > "$res.bam"
samtools sort "$res.bam" qry
samtools depth -q0 -Q0 qry.bam > qry.depth
wc -l qry.depth
| true
|
b5a43eeae1bf066b78385173d9b1ccce3e001672
|
Shell
|
FluidSimulation/LBM-D2Q6
|
/environment.sh
|
UTF-8
| 785
| 3.125
| 3
|
[] |
no_license
|
# Shell script to contain environment settings for various systems
# Usage: 'source environment.sh <system>'
SYSTEM=$1
case $SYSTEM in
idun)
module load intel
export CC=mpiicc
export ARCH=intel
export FFMPEG=${HOME}/tools/bin/ffmpeg
export FFMPEG_FLAGS="-y -r 25 -b:v 16384k"
;;
epic)
module load GCC/5.4.0-2.26
module load OpenMPI/1.10.3
export PATH+=:/usr/local/cuda/bin
export NVCC=nvcc
export CC=nvcc
export ARCH=cuda
export FFMPEG=${HOME}/tools/bin/ffmpeg
export FFMPEG_FLAGS="-y -r 25 -b:v 16384k"
;;
local)
export CC=mpicc
export ARCH=generic
export FFMPEG=ffmpeg
export FFMPEG_FLAGS="-y -r 25 -tune grain -b:v 16384k"
;;
*)
echo "Environment not predefined for system '${SYSTEM}'"
;;
esac
| true
|
df3dc0c52d43ac8fecf97caddfe78549447bfb2e
|
Shell
|
thesubtlety/historical-samples
|
/getCOF-clean.sh
|
UTF-8
| 1,390
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
#***********************************************************
# getCOF.sh - thesubtlety - Nov 20, 2012
#
# This script polls yahoo finance every
# so often (3 hours) and upon recieving a suitable stock
# price, emails me. Very hacky and no robustness..
#
# TO DO
# >add functionality to search for stock name
# >replace COF with user input
# >search the site for the spand id='xyz' that is in grep below
# >search the <h2> tag for the full name
#
#**********************************************************
URL='finance.yahoo.com/q?s=COF&ql=1'
DESIRED_PRICE=60
STOCK_PRICE=0
WAIT_TIME=10800
while [ true ]; do
DATE=$(date)
HOUR=$(date +%H)
# Pulls down the source from the COF finance page. the yfs_l84 seems to be unique for the
# stock price. I'm assuming each stock has its own id
STOCK_PRICE="$(lynx -source $URL | grep -o 'yfs_l84_cof.......' | sed 's/yfs_l84_cof">//' )"
echo $STOCK_PRICE > /tmp/stock_price
echo
echo $DATE
echo "Capital One Financial Corp. (COF)"
echo "Stock Price: " $STOCK_PRICE
# Once the price is higher than I specify, emails me
if [[ ${STOCK_PRICE%%.*} -gt ${DESIRED_PRICE%%.*} ]] ; then
thunderbird -compose "to='email',subject='COF Price',body='The COF Stock Price is now $(cat /tmp/stock_price)'"
sleep 2
xdotool search --name "Write: COF Price" windowactivate key ctrl+Return
exit 1;
fi
sleep $WAIT_TIME
done
exit 0
| true
|
ea61386b97bfc25b90674af29adcb85ef630cea4
|
Shell
|
navikt/dagpenger-journalforing-gsak
|
/travis/deploy.sh
|
UTF-8
| 997
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "DEPLOY DISABLED!"
#set -e
#
#echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
#
#IMAGE_VERSION=$DOCKER_IMG_NAME:$VERSION
#IMAGE_LATEST=$DOCKER_IMG_NAME:latest
#
#docker build . --pull -t $IMAGE_VERSION -t $IMAGE_LATEST
#
#docker push $DOCKER_IMG_NAME
#
#jq -n --arg ref $VERSION '{ "ref": $ref, "description": "Deploy from travis", "required_contexts": [], "payload": "{\"freestyle\": \"payload\"}" }' >> deploy.json
#
#echo "DEPLOYING TO GITHUB"
#cat deploy.json
#
#DEPLOYMENT_RESPONSE=$(curl -X POST -H "Authorization: token $GH_TOKEN" https://api.github.com/repos/navikt/dagpenger-journalforing-gsak/deployments --data @deploy.json)
#DEPLOYMENT_ID=$(echo ${DEPLOYMENT_RESPONSE} | jq -r '.id')
#
#
#if [ -z "$DEPLOYMENT_ID" ];
#then
# >&2 echo "Unable to obtain deployment ID"
# >&2 echo "$DEPLOYMENT_RESPONSE"
# exit 1
#fi
#
#
#>&2 echo "Created depoyment against github deployment API, deployment id $DEPLOYMENT_ID"
#
#
| true
|
57a988032269762ea3e57b4853f7f44646671355
|
Shell
|
IFCA-Uniovi/WZ
|
/scripts/plotting3l_data.sh
|
UTF-8
| 320
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
#VAR=("njets" "nbjets" "met" "ht" "pt1" "pt2" "pt3" "srs")
#VAR=("ftype")
VAR=("srs")
for var in ${VAR[@]}; do
variable=$var
cp cards/susy3l_data.C cards/tmpFiles/susy3l_data.C
sed -i 's|VARIABLE|'$variable'|' cards/tmpFiles/susy3l_data.C
root -l -b cards/tmpFiles/susy3l_data.C
done
| true
|
accd964fc942f727e5e77783e176ee57e4eb0078
|
Shell
|
onap/aai-graphadmin
|
/src/main/scripts/updatePem.sh
|
UTF-8
| 1,581
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/ksh
###
# ============LICENSE_START=======================================================
# org.onap.aai
# ================================================================================
# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
###
#
COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P )
. ${COMMON_ENV_PATH}/common_functions.sh
start_date;
check_user;
source_profile;
CERTPATH=$PROJECT_HOME/resources/etc/auth/
KEYNAME=aaiClientPrivateKey.pem
CERTNAME=aaiClientPublicCert.pem
pw=$(execute_spring_jar org.onap.aai.util.AAIConfigCommandLinePropGetter "" "aai.keystore.passwd" 2> /dev/null | tail -1)
openssl pkcs12 -in ${CERTPATH}/aai-client-cert.p12 -out $CERTPATH$CERTNAME -clcerts -nokeys -passin pass:$pw
openssl pkcs12 -in ${CERTPATH}/aai-client-cert.p12 -out $CERTPATH$KEYNAME -nocerts -nodes -passin pass:$pw
end_date;
exit 0
| true
|
cd24128b956f56e52e6e83a51187e9b76809f1dd
|
Shell
|
rbrich/bashtrace
|
/debug.sh
|
UTF-8
| 804
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This file is preprocessed before run.
# Following placeholders are replaced by appropriate FD number:
# __DBG_WR__ Debug pipe, write end.
# __STP_RD__ Step pipe, read end.
#
shopt -s extdebug
set -T
debug_trap()
{
# Communicate script line to our "debug" pipe
echo "DBG $(caller)!!!${BASH_COMMAND}!!!${#BASH_ARGC[*]}!!!${BASH_SUBSHELL}" >&__DBG_WR__
# Wait for instruction from our "step" pipe
while true
do
read -u __STP_RD__ -r DEBUG_CMD
case "${DEBUG_CMD}" in
EVAL*)
eval ${DEBUG_CMD:4}
;;
*)
# Return the answer as is (should be numeric 0, 1 or 2)
return ${DEBUG_CMD}
;;
esac
done
}
trap debug_trap DEBUG
source "$0"
| true
|
bf41a01fdd0dd5ee1a120ac66791d51207855da8
|
Shell
|
marbl/merqury
|
/_submit_build_10x.sh
|
UTF-8
| 3,248
| 3.515625
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
build=$MERQURY/build
if [ -z $1 ]; then
echo "Usage: ./_submit_build.sh [-c] <k-size> <R1.fofn> <R2.fofn> <out_prefix> [mem=T]"
echo -e "\t-c: OPTIONAL. homopolymer compress the sequence before counting kmers."
echo -e "\t<k-size>: kmer size k"
echo -e "\t<R1.fofn>: Read 1. The first 23 bases will get stripped off."
echo -e "\t<R2.fofn>: Read 2. Will be processed as normal."
echo -e "\t<out_prefix>: Final merged meryl db will be named as <out_prefix>.meryl"
echo -e "\t[mem=T]: Submit memory option on sbatch [DEFAULT=TRUE]. Set it to F to turn it off."
exit -1
fi
if [ "x$1" = "x-c" ]; then
compress="-c"
shift
fi
k=$1
R1=$2
R2=$3
out_prefix=$4
mem_opt=$5
mkdir -p logs
# Split files >10GB
cpus=20
if [[ "$mem_opt" = "F" ]]; then
mem=""
else
mem="--mem=4g"
fi
name=$out_prefix.split
partition=quick
walltime=4:00:00
path=`pwd`
log=logs/$name.%A_%a.log
wait_for=""
split=0
script=$build/split_10x.sh
LEN=`wc -l $R1 | awk '{print $1}'`
echo "R1 will be split to trim off the barcodes."
split_arrs="1-$LEN"
args="$R1"
echo "
sbatch -D $path -J $name --array=$split_arrs --partition=$partition $mem --cpus-per-task=$cpus --time=$walltime --error=$log --output=$log $script $args"
sbatch -D $path -J $name --array=$split_arrs --partition=$partition $mem --cpus-per-task=$cpus --time=$walltime --error=$log --output=$log $script $args | awk '{print $NF}' > split_jid
split_jid=`cat split_jid`
wait_for="${wait_for}afterok:$split_jid,"
#####
echo "$R2 will be split if >12G"
script=$build/split.sh
LEN2=`wc -l $R2 | awk '{print $1}'`
split_arrs=""
for i in $(seq 1 $LEN2)
do
fq=`sed -n ${i}p $R2`
GB=`du -k $fq | awk '{printf "%.0f", $1/1024/1024}'`
if [[ $GB -lt 12 ]]; then
echo "$fq is $GB, less than 12GB. Skip splitting."
echo $fq >> $R2.$i
else
echo "$fq is $GB, over 12GB. Will split and run meryl in parallel. Split files will be in $R2.$i"
split_arrs="$split_arrs$i," # keep the line nums $i to split
split=1
echo
fi
done
if [[ $split -eq 1 ]]; then
split_arrs=${split_arrs%,}
args="$R2"
echo "
sbatch -D $path -J $name --array=$split_arrs --partition=$partition $mem --cpus-per-task=$cpus --time=$walltime --error=$log --output=$log $script $args"
sbatch -D $path -J $name --array=$split_arrs --partition=$partition $mem --cpus-per-task=$cpus --time=$walltime --error=$log --output=$log $script $args | awk '{print $NF}' >> split_jid
split_jid=`cat split_jid | tail -n1`
wait_for="${wait_for}afterok:$split_jid,"
fi
cpus=2
if [[ "$mem_opt" = "F" ]]; then
mem=""
else
mem="--mem=1g"
fi
name=$out_prefix.concat
script=$build/concat_splits.sh
args="$compress $k $R1 $out_prefix $R2"
partition=quick
walltime=10:00
path=`pwd`
log=logs/$name.%A.log
wait_for="--dependency=${wait_for%,}"
echo "$wait_for"
echo "
sbatch -D $path -J $name --partition=$partition $mem --cpus-per-task=$cpus --time=$walltime $wait_for --error=$log --output=$log $script $args"
sbatch -D $path -J $name --partition=$partition $mem --cpus-per-task=$cpus --time=$walltime $wait_for --error=$log --output=$log $script $args
| true
|
350fda017c35e554fb56e5d28700859e603f0121
|
Shell
|
icarosimoes/nefilim
|
/nefilim.sh
|
UTF-8
| 3,479
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#============================================================================#
# AUTOR : Jefferson Carneiro <slackjeff@riseup.net>
# LICENÇA : GPLv2
# DESCRIÇÃO : Backup para arquivos/diretórios e banco de dados MariaDB
#
# Versão: 1.0
#============================================================================#
#============== BACKUP DE ARQUIVOS ==========================================#
# Esta é a sessão para backup de arquivos! Para ativar o backup de arquivos
# Para desativar esta sessão desligue a chave abaixo para false.
BACKUP_FILES=true
# Selecione os diretorios que deseja fazer backup
# Conforme for adicionando diretorios utilize as aspas simples ou duplas
# para envolver o diretorio.
# É de importância você adicionar o caminho absoluto do diretório. Não
# esqueça de retirar o comentário '#'.
SOURCE_DIRS=(
# '/caminho/absoluto/aqui'
# '/caminho/absoluto/aqui'
# '/caminho/absoluto/aqui'
)
# Caso precise que o nefilim pule alguns diretórios na hora do backup
# adicione o caminho absoluto dos mesmos aqui.
# Exemplo: '--exclude=/home/usuario/Downloads'
# Neste caso o diretório Downloads do usuário não entrará no backup.
EXCLUDE_DIRS=(
# '--exclude=/caminho/absoluto/do/diretorio/'
# '--exclude=/caminho/absoluto/do/diretorio/'
# '--exclude=/caminho/absoluto/do/diretorio/'
)
#============== BACKUP MariaDB ==============================================#
# Esta é a sessão para backup do banco de dados do MariaDB!
# Para desativar esta sessão desligue a chave abaixo para false.
BACKUP_DB=true
# Usuário do banco de dados.
user='MeuUSUARIOaqui'
# Password do banco de dados
password='MinhaSenhaAqui'
#============== Variaveis GLOBAIS ===========================================#
# Automaticamente a cada 'N' dias os backups mais antigos serao apagados.
# Digite um numero inteiro para quantos dias voce deseja manter os backups.
# O padrao e manter os backups por 7 dias.
KEEP_DAY='7'
# Diretorio aonde o backup sera salvo.
BACKUP_DIR='/var/nefilim_backup'
# O log sera registrado aqui
LOG='/var/log/nefilim-mariadb.log'
# Formato de Hora para utilizar no nome do backup.
# O padrao e: (dd-mm-aaaa)
DATE="$(date +%d-%b-%Y)"
#============== TESTES ======================================================#
# Verificando se diretorio para backup existe.
[ ! -d $BACKUP_DIR ] && mkdir $BACKUP_DIR
#============== FUNÇÕES ======================================================#
die() { echo "$@" >>${LOG}; exit 1 ;}
#============== INICIO ======================================================#
# Backup para arquivos
if [ "$BACKUP_FILES" = 'true' ]; then
tar ${EXCLUDE_DIRS[@]} -cpzf "${BACKUP_DIR}/daily_backup-${DATE}.tar.gz" "${SOURCE_DIRS[@]}" || die "------ $(date +'%d-%m-%Y %T') Backup Diretorios [ERRO]"
echo "------ $(date +'%d-%m-%Y %T') Backup Diretorios [SUCESSO]" >>${LOG}
fi
# Backup para banco de dados
if [ "$BACKUP_DB" = 'true' ]; then
sqlfile="mariadb_${DATE}.sql" # Nome temporario quando exportado do DB.
mysqldump -u "$user" -p"$password" --all-databases > ${BACKUP_DIR}/$sqlfile 2>>${LOG} || die "------ $(date +'%d-%m-%Y %T') Backup database [ERRO]"
tar cJf "${BACKUP_DIR}/mariadb_${DATE}.tar.xz" ${BACKUP_DIR}/$sqlfile && rm ${BACKUP_DIR}/$sqlfile
echo "------ $(date +'%d-%m-%Y %T') Backup database [SUCESSO]" >>${LOG}
fi
# Checagem de Backups antigos mais antigos.
# Se existirem serão removidos.
find "$BACKUP_DIR" -mtime "$KEEP_DAY" -delete
| true
|
c95d8de5d729d84b9f4e2f3f04abad93ab54275e
|
Shell
|
pclever1/centELK-dev
|
/configure.sh
|
UTF-8
| 1,048
| 2.78125
| 3
|
[] |
no_license
|
#check local IP
ip=$(eval "ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p'")
#configure elasticsearch
sed -i "s/#network.host: 192.168.0.1/network.host: $ip/g" /etc/elasticsearch/elasticsearch.yml
sed -i 's/#http.port: 9200/http.port: 9200/g' /etc/elasticsearch/elasticsearch.yml
sed -i "s/#discovery.seed_hosts/discovery.seed_hosts/g" /etc/elasticsearch/elasticsearch.yml
sed -i "s/host1/127.0.0.1/g" /etc/elasticsearch/elasticsearch.yml
sed -i "s/host2/$ip/g" /etc/elasticsearch/elasticsearch.yml
#configure kibana
sed -i "s/#elasticsearch.host/elasticsearch.host/g" /etc/kibana/kibana.yml
#configure logstash
sed -i "s/localhost/$ip/g" /etc/logstash/conf.d/50-outputs.conf
#start services
systemctl restart elasticsearch
systemctl restart kibana
systemctl restart logstash
clear
echo
echo '-------------------------'
echo "Install has completed."
echo "You must configure pfSense to forward logs to $ip:5140"
echo "ELK is now running, you can access it at $ip:5601"
echo '-------------------------'
| true
|
59c2d53a40e8fcf816e88b82a58b13588f9b69d9
|
Shell
|
Jesin/home-jesin
|
/.local/bin/dups-of
|
UTF-8
| 171
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
f0="$(readlink -f -- "$1")"
shift
[ -f "$f0" ] && [ -r "$f0" ] || exit
for f; do
[ "$f0" -ef "$f" ] && continue
cmp -s -- "$f0" "$f" && printf %s\\0 "$f"
done
| true
|
9a76041fe3d1d7c2dda212a89cd96b2305083956
|
Shell
|
flecoqui/101-vm-simple-vegeta-universal
|
/install-software.sh
|
UTF-8
| 4,376
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# This bash file install apache
# Parameter 1 hostname
azure_hostname=$1
#############################################################################
log()
{
# If you want to enable this logging, uncomment the line below and specify your logging key
#curl -X POST -H "content-type:text/plain" --data-binary "$(date) | ${HOSTNAME} | $1" https://logs-01.loggly.com/inputs/${LOGGING_KEY}/tag/redis-extension,${HOSTNAME}
echo "$1"
echo "$1" >> /testvegeta/log/install.log
}
#############################################################################
check_os() {
grep ubuntu /proc/version > /dev/null 2>&1
isubuntu=${?}
grep centos /proc/version > /dev/null 2>&1
iscentos=${?}
grep redhat /proc/version > /dev/null 2>&1
isredhat=${?}
if [ -f /etc/debian_version ]; then
isdebian=0
else
isdebian=1
fi
if [ $isubuntu -eq 0 ]; then
OS=Ubuntu
VER=$(lsb_release -a | grep Release: | sed 's/Release://'| sed -e 's/^[ \t]*//' | cut -d . -f 1)
elif [ $iscentos -eq 0 ]; then
OS=Centos
VER=$(cat /etc/centos-release)
elif [ $isredhat -eq 0 ]; then
OS=RedHat
VER=$(cat /etc/redhat-release)
elif [ $isdebian -eq 0 ];then
OS=Debian # XXX or Ubuntu??
VER=$(cat /etc/debian_version)
else
OS=$(uname -s)
VER=$(uname -r)
fi
ARCH=$(uname -m | sed 's/x86_//;s/i[3-6]86/32/')
log "OS=$OS version $VER Architecture $ARCH"
}
#############################################################################
configure_network(){
# firewall configuration
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
}
#############################################################################
install_vegeta(){
wget -q go1.12.7.linux-amd64.tar.gz https://dl.google.com/go/go1.12.7.linux-amd64.tar.gz
mkdir /usr/local/go
tar -C /usr/local -xzf go1.12.7.linux-amd64.tar.gz
echo "export PATH=\$PATH:/usr/local/go/bin" >> /etc/profile
echo "export GOPATH=/testvegeta/go" >> /etc/profile
export GOPATH=/testvegeta/go
echo "export GOCACHE=/testvegeta/gocache" >> /etc/profile
export GOCACHE=/testvegeta/gocache
/usr/local/go/bin/go get -u github.com/tsenart/vegeta
export PATH=$PATH:/testvegeta/go/bin
echo "export PATH=\$PATH:/testvegeta/go/bin" >> /etc/profile
chmod +x /testvegeta/go/bin/vegeta
}
#############################################################################
install_git_ubuntu(){
apt-get -y install git
}
install_git_centos(){
yum -y install git
}
#############################################################################
#############################################################################
configure_network_centos(){
# firewall configuration
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
service firewalld start
firewall-cmd --permanent --add-port=80/tcp
firewall-cmd --permanent --add-port=443/tcp
firewall-cmd --reload
}
#############################################################################
environ=`env`
# Create folders
mkdir /git
mkdir /testvegeta
mkdir /testvegeta/log
mkdir /testvegeta/go
mkdir /testvegeta/gocache
mkdir /testvegeta/config
# Write access in log subfolder
chmod -R a+rw /testvegeta/log
log "Environment before installation: $environ"
log "Installation script start : $(date)"
log "GO Installation: $(date)"
log "##### azure_hostname: $azure_hostname"
log "Installation script start : $(date)"
check_os
if [ $iscentos -ne 0 ] && [ $isredhat -ne 0 ] && [ $isubuntu -ne 0 ] && [ $isdebian -ne 0 ];
then
log "unsupported operating system"
exit 1
else
if [ $iscentos -eq 0 ] ; then
log "configure network centos"
configure_network_centos
log "install git centos"
install_git_centos
log "install vegeta centos"
install_vegeta
elif [ $isredhat -eq 0 ] ; then
log "configure network redhat"
configure_network_centos
log "install git redhat"
install_git_centos
log "install vegeta redhat"
install_vegeta
elif [ $isubuntu -eq 0 ] ; then
log "configure network ubuntu"
configure_network
log "install git ubuntu"
install_git_ubuntu
log "install vegeta ubuntu"
install_vegeta
elif [ $isdebian -eq 0 ] ; then
log "configure network"
configure_network
log "install git debian"
install_git_ubuntu
log "install vegeta debian"
install_vegeta
fi
log "installation done"
fi
exit 0
| true
|
c40dd705270a0a3b904e77fde04bbf1e1a9c984b
|
Shell
|
faiproject/fai-config
|
/class/50-host-classes
|
UTF-8
| 706
| 3.3125
| 3
|
[] |
no_license
|
#! /bin/bash
# assign classes to hosts based on their hostname
# do not use this if a menu will be presented
[ "$flag_menu" ] && exit 0
# use a list of classes for our demo machine
case $HOSTNAME in
faiserver)
echo "FAIBASE DEBIAN DEMO FAISERVER" ;;
demohost|client*)
echo "FAIBASE DEBIAN DEMO" ;;
xfcehost)
echo "FAIBASE DEBIAN DEMO XORG XFCE LVM";;
gnomehost)
echo "FAIBASE DEBIAN DEMO XORG GNOME";;
ubuntuhost)
echo "FAIBASE DEBIAN DEMO UBUNTU JAMMY JAMMY64 XORG";;
rocky)
echo "FAIBASE ROCKY" # you may want to add class XORG here
ifclass AMD64 && echo ROCKY8_64
;;
*)
echo "FAIBASE DEBIAN DEMO" ;;
esac
| true
|
dd4ba0d761456e363865492d8f68f4f0b1cc1052
|
Shell
|
earthIsAPrisonIfYouNotAwareItCanNotOut/keepAwakeWhenSleeping
|
/shell/bash/TLCL/expansions_and_quoting.sh
|
UTF-8
| 3,862
| 3.5
| 4
|
[] |
no_license
|
## echo
# Any argument passed to echo gets displaye
$ echo this is a test
this is a test
## filepath expansiions
$ echo *
Desktop Documents ls-output.txt Music Pictures Public Templates Videos
$ echo D*
Desktop Documents
$ echo *s
Documents Pictures Templates Videos
$ echo [[:upper:]]*
Desktop Documents Music Pictures Public Templates Videos
# listing of hidden files
$ ls -d .[!.]?*
$ ls -A
## (“~”)
# home directory of the named user
$ echo ~
/home/me
$ echo ~foo
/home/foo
## arithmetic expression
# use as a calculator
$ echo $((2 + 2)) # $((expression))
4
$ echo $(((5**2) * 3)) # nest
75
## brace expansion
# comma-separated list of string
$ echo Front-{A,B,C}-Back
Front-A-Back Front-B-Back Front-C-Back
$ echo a{A{1,2},B{3,4}}b # nest
aA1b aA2b aB3b aB4b
# a range of integers or single characters
$ echo Number_{1..5} # number
Number_1 Number_2 Number_3 Number_4 Number_5
$ echo {Z..A} # letter
Z Y X W V U T S R Q P O N M L K J I H G F E D C B A
# make lists of files or directories
$ mkdir Pics
$ cd Pics
$ mkdir {2007..2009}-0{1..9} {2007..2009}-{10..12}
$ ls
2007-01 2007-07 2008-01 2008-07 2009-01 2009-07
2007-02 2007-08 2008-02 2008-08 2009-02 2009-08
2007-03 2007-09 2008-03 2008-09 2009-03 2009-09
2007-04 2007-10 2008-04 2008-10 2009-04 2009-10
2007-05 2007-11 2008-05 2008-11 2009-05 2009-11
2007-06 2007-12 2008-06 2008-12 2009-06 2009-12
## parameter expansion
# Many variables are available for your examination
$ echo $USER # the variable named “USER” contains your user name
me
$ printenv | less # To see a list of available variables
$ echo $SUER
$ # if you mistype a pattern, it will result in an empty string
## command substitution
# allows us to use the output of a command as an expansion
$ echo $(ls)
Desktop Documents ls-output.txt Music Pictures Public Templates
Videos
# getting the listing of of the cp program without having to know its full pathname
$ ls -l $(which cp)
-rwxr-xr-x 1 root root 71516 2007-12-05 08:58 /bin/cp
# the results of the pipeline became the argument list of the file command
$ file $(ls /usr/bin/* | grep zip)
/usr/bin/bunzip2: symbolic link to `bzip2`
# uses *back-quotes* instead of the dollar sign and parentheses
$ ls -l `which cp`
-rwxr-xr-x 1 root root 71516 2007-12-05 08:58 /bin/cp
## quoting
# extra whitespace
$ echo this is a test
this is a test
# $1
$ echo The total is $100.00
The total is 00.00
## double quotes
# Remember, parameter expansion, arithmetic expansion
# and command substitution still take place within double quotes
$ echo "$USER $((2+2)) $(cal)"
me 4 February 2008
Su Mo Tu We Th Fr Sa
....
# shell will seperate file name "two words.txt" to "two" and "words.txt"
$ ls -l two words.txt
ls: cannot access two: No such file or directory
ls: cannot access words.txt: No such file or directory
# By using double quotes, we stop the word-splitting and get the desired result
$ ls -l "two words.txt"
-rw-rw-r-- 1 me me 18 2008-02-20 13:03 two words.txt
$ mv "two words.txt" two_words.txt
# include double quotes, it become part of the argument
$ echo this is a test
this is a test
$ echo "this is a test"
this is a test
## single quotes
# a comparison of unquoted, double quotes, and single quotes
$ echo text ~/*.txt {a,b} $(echo foo) $((2+2)) $USER
text /home/me/ls-output.txt a b foo 4 me
$ echo "text ~/*.txt {a,b} $(echo foo) $((2+2)) $USER"
text ~/*.txt {a,b} foo 4 me
$ echo 'text ~/*.txt {a,b} $(echo foo) $((2+2)) $USER'
text ~/*.txt {a,b} $(echo foo) $((2+2)) $USER
## escape character
$ echo "The balance for user $USER is: \$5.00"
The balance for user me is: $5.00
$ mv bad\&filename good_filename
# Adding the ‘-e’ option to echo will enable interpretation of escape sequences
sleep 10; echo -e "Time's up\a"
sleep 10; echo "Time's up" $'\a'
| true
|
a4f6b1f6bb7b8ee8f75dcad98a5ead5b9892ec67
|
Shell
|
apple/cups
|
/scripts/makecups
|
UTF-8
| 917
| 4.09375
| 4
|
[
"LLVM-exception",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"EPL-1.0",
"GPL-2.0-only",
"LGPL-2.0-only"
] |
permissive
|
#!/bin/sh
#
# Script to configure and make CUPS with the standard build options. When no
# targets are specified, the "clean" and "check" targets are used.
#
# Usage:
#
# scripts/makecups [configure option(s)] [make target(s)]
#
# Scan the command-line arguments...
confopts="--enable-debug --enable-debug-guards --enable-debug-printfs --enable-sanitizer --enable-unit-tests"
makeopts=""
while test $# -gt 0; do
opt="$1"
shift
case "$opt" in
-*)
confopts="$confopts $opt"
;;
*)
makeopts="$makeopts $opt"
;;
esac
done
if test "x$makeopts" = x; then
makeopts="clean check"
fi
case "`uname`" in
Darwin)
makeopts="-j`sysctl -n hw.activecpu` $makeopts"
;;
Linux*)
ASAN_OPTIONS="leak_check_at_exit=false"; export ASAN_OPTIONS
;;
esac
# Run the configure script...
echo ./configure $confopts
./configure $confopts || exit 1
# Build the software...
echo make $makeopts
make $makeopts
| true
|
4aad7c4400381f2a064ff82b31f7b0ae1bb8cd03
|
Shell
|
pdlfs/bosen-umbrella
|
/processor/process_data.sh.in
|
UTF-8
| 2,469
| 3.0625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2017, Carnegie Mellon University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
### instant death on misc errors ###
set -euo pipefail
######################
# Tunable parameters #
######################
nodes=${1:-"2"}
procs_per_node=${2:-"1"}
indir=${3:-"."}
outdir=${4:-"."}
###############
# Core script #
###############
cores=$((nodes * procs_per_node))
source @CMAKE_INSTALL_PREFIX@/processor/common.sh
common_init
#
# do_mpirun: Run CRAY MPICH, ANL MPICH, or OpenMPI run command
#
# Arguments:
# @1 number of processes
# @2 number of processes per node
# @3 array of env vars: ("name1", "val1", "name2", ... )
# @4 host list (comma-separated)
# @5 executable (and any options that don't fit elsewhere)
# @6 extra_opts: extra options to mpiexec (optional)
# @7 log: primary log file for mpi stdout (optional)
#
do_mpirun $cores $procs_per_node "" "$all_nodes" \
"@CMAKE_INSTALL_PREFIX@/bin/bigml-processor -i $indir -o $outdir" ""
common_teardown
exit 0
| true
|
cf5a5ca1a5eaf9146e4508f056ce2ba0400d4aaf
|
Shell
|
pelt24/dotfiles
|
/install/brew.sh
|
UTF-8
| 560
| 3.046875
| 3
|
[] |
no_license
|
# Install homebrew.
if [ ! -x "$(which brew)" ]; then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Update homebrew.
brew update 1>/dev/null
brew upgrade
# Apps to be installed by homebrew.
apps=(
ansible
docker
docker-compose
docker-machine
docker-swarm
git
golang
jq
openconnect
packer
tree
)
brew install "${apps[@]}"
# Git comes with diff-highlight, but isn't in the PATH
ln -sf "$(brew --prefix)/share/git-core/contrib/diff-highlight/diff-highlight" /usr/local/bin/diff-highlight
| true
|
82ad027b472922d49548398a208811f31117368d
|
Shell
|
SerdarARIKAN/TEMP
|
/djangoSite.sh
|
UTF-8
| 16,952
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo -e "\e[0;31m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;33m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;34m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;35m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;36m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo "Creat and Configure Django Project for Virtualmin."
echo ""
echo -e "\e[0;36m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;35m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;34m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;33m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;31m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "FQDN : N A M E.TDL"
echo " example.com"
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
# sleep 2.5
read -p "NAME : " name
echo ""
read -p "TDL : " tdl
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Deleting existing project..."
echo ""
echo ""
rm -rf "/home/$name/public_html/websites/${name}.${tdl}/"
echo ""
echo ""
# sleep 2.5
echo "Deleted existing project..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Git Cloning..."
echo ""
git clone "https://github.com/SerdarARIKAN/${name}_${tdl}" "/home/$name/public_html/websites/${name}.${tdl}"
echo ""
# sleep 2.5
echo "Git Clone..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Creating VENV..."
echo ""
python3 "/usr/local/lib/python3.5/dist-packages/virtualenv.py" "/home/$name/public_html/websites/${name}.${tdl}/VENV"
echo ""
# sleep 2.5
echo "Created VENV..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Activating VENV..."
echo ""
source "/home/$name/public_html/websites/$name.$tdl/VENV/bin/activate"
echo ""
# sleep 2.5
echo "Activated VENV..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Installing Requirements..."
echo ""
pip3 install -r /home/$name/public_html/websites/$name.$tdl/requirements.txt
echo ""
# sleep 2.5
echo ""
echo "Installed Requirements..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Editing settings.py..."
echo ""
sed -i "s#DEBUG = True#DEBUG = False#g" "/home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/settings.py"
sed -i "s#ALLOWED_HOSTS = \[\]#ALLOWED_HOSTS = ['*']#g" "/home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/settings.py"
sed -i "s#LANGUAGE_CODE = 'en-us'#LANGUAGE_CODE = 'tr-TR'#g" "/home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/settings.py"
sed -i "s#TIME_ZONE = 'UTC'#TIME_ZONE = 'Asia\/Istanbul'#g" "/home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/settings.py"
sed -i "s#STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)#STATIC_ROOT = os.path.join(BASE_DIR, 'static')#g" "/home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/settings.py"
sed -i "#urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)#d" "/home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/urls.py"
echo ""
# sleep 2.5
echo "Edited settings.py..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Making Migrations..."
echo ""
python3 "/home/$name/public_html/websites/$name.$tdl/manage.py" "makemigrations"
echo ""
# sleep 2.5
echo "Maked Migrations..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Migrating..."
echo ""
python3 "/home/$name/public_html/websites/$name.$tdl/manage.py" "migrate"
echo ""
# sleep 2.5
echo "Migrated..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Creating Super User..."
echo ""
python3 "/home/$name/public_html/websites/$name.$tdl/manage.py" "createsuperuser"
echo ""
# sleep 2.5
echo "Created Super User..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Collecting Static..."
echo ""
python3 "/home/$name/public_html/websites/$name.$tdl/manage.py" "collectstatic"
echo ""
# sleep 2.5
echo "Collected Static..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Configuring BIND settings..."
echo ""
echo "\$ttl 38400" > "/var/lib/bind/$name.$tdl.hosts"
echo "@ IN SOA ns1.$name.$tdl. root.ns1.$name.$tdl. (" >> "/var/lib/bind/$name.$tdl.hosts"
echo " $(date +%s)" >> "/var/lib/bind/$name.$tdl.hosts"
echo " 10800" >> "/var/lib/bind/$name.$tdl.hosts"
echo " 3600" >> "/var/lib/bind/$name.$tdl.hosts"
echo " 604800" >> "/var/lib/bind/$name.$tdl.hosts"
echo " 38400 )" >> "/var/lib/bind/$name.$tdl.hosts"
echo "@ IN NS ns1.$name.$tdl." >> "/var/lib/bind/$name.$tdl.hosts"
echo "@ IN NS ns2.$name.$tdl." >> "/var/lib/bind/$name.$tdl.hosts"
echo "ns1.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "ns2.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "www.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "ftp.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "m.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "localhost.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "webmail.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "admin.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "mail.$name.$tdl. IN A $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "$name.$tdl. IN MX 5 $(dig +short myip.opendns.com @resolver1.opendns.com)" >> "/var/lib/bind/$name.$tdl.hosts"
echo "$name.$tdl. IN TXT \"v=spf1 a mx a:$name.$tdl ip4:$(dig +short myip.opendns.com @resolver1.opendns.com) ip4:$(dig +short myip.opendns.com @resolver1.opendns.com) ?all\"" >> "/var/lib/bind/$name.$tdl.hosts"
echo ""
# sleep 2.5
echo "Configured BIND settings..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Configuring Apache settings..."
echo ""
echo "<VirtualHost $(hostname -I | cut -d' ' -f1):80>" > "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerName $name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerAlias www.$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerAlias webmail.$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerAlias admin.$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "DocumentRoot /home/$name/public_html/websites/$name.$tdl/" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ErrorLog /var/log/virtualmin/$name.$tdl_error_log" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "CustomLog /var/log/virtualmin/$name.$tdl_access_log combined" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "WSGIProcessGroup $name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "WSGIDaemonProcess $name.$tdl python-home=/home/$name/public_html/websites/$name.$tdl/VENV python-path=/home/$name/public_html/websites/$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "WSGIScriptAlias / /home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/wsgi.py process-group=$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Directory /home/$name/public_html/websites/$name.$tdl/${name}_${tdl}>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Files wsgi.py>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Require all granted" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Files>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Directory>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /robots.txt /home/$name/public_html/websites/$name.$tdl/static/robots.txt" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /favicon.ico /home/$name/public_html/websites/$name.$tdl/static/favicon.ico" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /static/ /home/$name/public_html/websites/$name.$tdl/static/" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Directory /home/$name/public_html/websites/$name.$tdl/static>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Require all granted" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Directory>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /media/ /home/$name/public_html/websites/$name.$tdl/media/" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Directory /home/$name/public_html/websites/$name.$tdl/media>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Require all granted" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Directory>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</VirtualHost>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo ""
echo "<VirtualHost $(hostname -I | cut -d' ' -f1):443>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerName $name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerAlias www.$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerAlias webmail.$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ServerAlias admin.$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "DocumentRoot /home/$name/public_html/websites/$name.$tdl/" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "ErrorLog /var/log/virtualmin/$name.$tdl_error_log" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "CustomLog /var/log/virtualmin/$name.$tdl_access_log combined" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "WSGIProcessGroup ssl_$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "WSGIDaemonProcess ssl_$name.$tdl python-home=/home/$name/public_html/websites/$name.$tdl/VENV python-path=/home/$name/public_html/websites/$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "WSGIScriptAlias / /home/$name/public_html/websites/$name.$tdl/${name}_${tdl}/wsgi.py process-group=ssl_$name.$tdl" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Directory /home/$name/public_html/websites/$name.$tdl/${name}_${tdl}>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Files wsgi.py>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Require all granted" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Files>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Directory>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /robots.txt /home/$name/public_html/websites/$name.$tdl/static/robots.txt" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /favicon.ico /home/$name/public_html/websites/$name.$tdl/static/favicon.ico" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /static/ /home/$name/public_html/websites/$name.$tdl/static/" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Directory /home/$name/public_html/websites/$name.$tdl/static>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Require all granted" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Directory>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Alias /media/ /home/$name/public_html/websites/$name.$tdl/media/" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "<Directory /home/$name/public_html/websites/$name.$tdl/media>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "Require all granted" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</Directory>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "SSLEngine on" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "SSLCertificateFile /home/$name/ssl.cert" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "SSLCertificateKeyFile /home/$name/ssl.key" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "SSLProtocol all -SSLv2 -SSLv3 -TLSv1 -TLSv1.1" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo "</VirtualHost>" >> "/etc/apache2/sites-available/$name.$tdl.conf"
echo ""
# sleep 2.5
echo "Configured Apache settings..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Configuring Permissions..."
echo ""
chmod -R 3777 /home/$name/public_html/websites/$name.$tdl
chown -R $name:$name /home/$name/public_html/websites/$name.$tdl
echo ""
# sleep 2.5
echo "Configured Permissions..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo "Restarting Services..."
echo ""
service apache2 restart
service bind9 restart
echo ""
# sleep 2.5
echo "Restarted Services..."
echo ""
echo ""
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo ""
echo -e "\e[0;31m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;33m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;34m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;35m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;36m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo ""
echo " M I S S I O N S C O M P L E T E D "
echo ""
echo "Don't Forget L e t ’ s E n c r y p t Settings"
echo ""
echo -e "\e[0;36m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;35m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;34m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;33m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;32m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
echo -e "\e[0;31m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
| true
|
4599620d1ed0741acdc52e39d4bea44c0815969f
|
Shell
|
elvisassis/Docker-PostGIS
|
/PostgreSQL-9.3.5-PostGIS-2.1.7-GDAL-1.11.2-Patched/packages/run.sh
|
UTF-8
| 1,100
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Create data store
mkdir -p ${POSTGRES_DATA_FOLDER}
chown postgres:postgres ${POSTGRES_DATA_FOLDER}
chmod 700 ${POSTGRES_DATA_FOLDER}
# Check if data folder is empty. If it is, start the dataserver
if ! [ "$(ls -A ${POSTGRES_DATA_FOLDER})" ]; then
su postgres -c "initdb --encoding=${ENCODING} --locale=${LOCALE} --lc-collate=${COLLATE} --lc-monetary=${LC_MONETARY} --lc-numeric=${LC_NUMERIC} --lc-time=${LC_TIME} -D ${POSTGRES_DATA_FOLDER}"
# Modify basic configuration
su postgres -c "echo \"host all all 0.0.0.0/0 md5\" >> $POSTGRES_DATA_FOLDER/pg_hba.conf"
su postgres -c "echo \"listen_addresses='*'\" >> $POSTGRES_DATA_FOLDER/postgresql.conf"
# Establish postgres user password and run the database
su postgres -c "pg_ctl -w -D ${POSTGRES_DATA_FOLDER} start" ; su postgres -c "psql -h localhost -U postgres -p 5432 -c \"alter role postgres password '${POSTGRES_PASSWD}';\"" ; python /usr/local/bin/run_psql_scripts ; su postgres -c "pg_ctl -w -D ${POSTGRES_DATA_FOLDER} stop"
fi
# Start the database
exec gosu postgres postgres -D $POSTGRES_DATA_FOLDER
| true
|
19b89a92a6fc98722dc81e71a44e2778b2b4a070
|
Shell
|
mmoehrlein/.dotfiles
|
/scripts/bin/mkscriptdir
|
UTF-8
| 1,957
| 4.25
| 4
|
[
"MIT",
"ISC"
] |
permissive
|
#!/bin/bash
# help output
if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
echo -e " \
This tool is a lot like mkscript bit it creates a directory and links the executable up instead of a simple single file script.
usage: mkscript <script-name> [script-folder]
"
exit 0
fi
# folder where all the scipts shall be stored
SCRIPT_FOLDER=${2:-"$HOME/bin"}
# check if first param is provided
if [ -z "$1" ]; then
# if $1 is not present, it is asked for interactively either via terminal or with rofi
if [ -t 0 ]; then
source bashful-input
name=$(input -p "name")
[ -z "$name" ] && echo "no script name given" && exit 1
else
name=$(rofi -dmenu -lines 0 -p name)
[ -z "$name" ] && dunstify canceled "no script name given" && exit 1
fi
SCRIPT_FILE="$name"
else
SCRIPT_FILE="$1"
fi
# check if script already exists as single file
if [ -e "$SCRIPT_FOLDER/$SCRIPT_FILE" ]; then
if [ -d "$SCRIPT_FOLDER/$SCRIPT_FILE.d" ]; then
echo "Script directory already exists."
exit
fi
echo "file already exists as single script"
echo "Do you want to make it into a script dir? [yN]"
if [ "$(read -r line && echo "$line")" == y ]; then
# convert from single file script to script dir
mkdir "$SCRIPT_FOLDER/$SCRIPT_FILE.d"
mv "$SCRIPT_FOLDER/$SCRIPT_FILE" "$SCRIPT_FOLDER/$SCRIPT_FILE.d/$SCRIPT_FILE"
ln -s "$SCRIPT_FILE.d/$SCRIPT_FILE" "$SCRIPT_FOLDER/$SCRIPT_FILE"
fi
exit 0
fi
# check if script exists as a directory
if [ -d "$SCRIPT_FOLDER/$SCRIPT_FILE.d" ];then
echo "script already exists"
exit
fi
# create script as directory
mkdir "$SCRIPT_FOLDER/$SCRIPT_FILE.d"
echo $'#!/bin/bash\n' > "$SCRIPT_FOLDER/$SCRIPT_FILE.d/$SCRIPT_FILE"
chmod +x "$SCRIPT_FOLDER/$SCRIPT_FILE.d/$SCRIPT_FILE"
if [ -x /usr/bin/nvim ]; then
if [ -t 0 ]; then
nvim +2 "$SCRIPT_FOLDER/$SCRIPT_FILE.d/$SCRIPT_FILE"
else
alacritty -e nvim +2 "$SCRIPT_FOLDER/$SCRIPT_FILE.d/$SCRIPT_FILE"
fi
fi
| true
|
013da1b324d16103220be8e1c97a699fc1b46f4d
|
Shell
|
dlenski/create_ap
|
/create_ap.resume
|
UTF-8
| 266
| 3.203125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
set -e
# After resume, restart create_ap service (if it was already running)
# See:
# https://www.freedesktop.org/software/systemd/man/systemd-suspend.service.html
case "$1" in
pre) ;;
post) /bin/systemctl try-restart create_ap.service ;;
esac
| true
|
33c7eeb3c14432fcefc66b4ffde36ce21f868a15
|
Shell
|
frendo/unix
|
/ipod.sh
|
UTF-8
| 274
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
FILE_PATH=$1
echo $FILE_PATH
SOURCE_PATH=$(find / -maxdepth 1 -type d -iname "boot")
echo $SOURCE_PATH
shopt -s dotglob
find $SOURCE_PATH* -prune -type d | while read d; do
echo "$d"
FILES=$d/*
for f in $FILES
do
echo "Processing $f file..."
done
done
| true
|
61d2a55d4dfff4f2bfced9d85be22cd239e2495d
|
Shell
|
FlyInWind1/boot-starter
|
/boot-starter.conf
|
UTF-8
| 1,066
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2034
# 备份目录
backup_directory=./backup
# 新部署文件存放位置
new_directory=./new
# 后端所需要的文件列表
backend_files=(xx.jar)
# 前端所需要的文件列表
front_files=(dist)
# 启动项目的命令,前半部分
start_shell_prefix='nohup java -jar -Dserver.port='
# 启动项目的命令,后半部分
start_shell_suffix=' -Dloader.path=libs -jar xx.jar &'
# 项目使用的两个端口
ports=(8000 8001)
# 是否使用systemd管理项目
use_systemd=
# 两个用来启动项目的service的名称,需要和ports配置的端口对应
systemd_services=(xxx xxx2)
# systemctl 命令,需要的自己加 --user 参数
systemd_command="systemctl "
# 部署时新进程成功监听后,关闭旧进程的等待时间。详情见 sleep 命令帮助
deploy_sleep_time="5s"
# 在部署后端之前调用的函数
# preBackend(){}
# 在部署后端之后调用的函数
# postBackend(){}
# 在部署前端之前调用的函数
# preFront(){}
# 在部署前端之后调用的函数
# postFront(){}
| true
|
8bda256fafa89476f882f732fecbd5ae0d1bff47
|
Shell
|
OctaveC/holberton-system_engineering-devops
|
/0x0C-web_server/4-not_found_page_404
|
UTF-8
| 579
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# 404 page replacement.
sudo apt -y update
sudo apt -y install nginx
echo 'Holberton School for the win!' | sudo tee /var/www/html/index.nginx-debian.html
sudo sed -i '/listen 80 default_server;/a \\n\trewrite ^/redirect_me https://www.youtube.com/watch?v=QH2-TGUlwu4 permanent;' /etc/nginx/sites-available/default
echo "Ceci n'est pas une page" | sudo tee /var/www/html/404.html
sudo sed -i '/server_name _;/a error_page 404 /404.html;\nlocation = /404.html {\nroot /var/www/html;\ninternal;\n}' /etc/nginx/sites-available/default
sudo service nginx restart
| true
|
089f898c3e375082926473dc842ce9a8013ded3b
|
Shell
|
bridgecrew-perf4/terraform-infras
|
/aws/bin/destroy-s3-backend.sh
|
UTF-8
| 543
| 3.328125
| 3
|
[] |
no_license
|
BUCKET_STATUS=$(aws s3api head-bucket --bucket "${TF_BACKEND_S3_BUCKET}" 2>&1)
echo "Bucket name: $TF_BACKEND_S3_BUCKET"
if echo "${BUCKET_STATUS}" | grep 'Not Found';
then
echo "Bucket not found"
elif echo "${BUCKET_STATUS}" | grep 'Forbidden';
then
echo "Bucket exists but not owned"
elif echo "${BUCKET_STATUS}" | grep 'Bad Request';
then
echo "Bucket name specified is less than 3 or greater than 63 characters"
else
echo "Bucket owned and exists";
echo "Destroying bucket..."
aws s3 rb s3://$TF_BACKEND_S3_BUCKET --force
fi
| true
|
505222e64899b94d881973edfc16e9666cdb808c
|
Shell
|
Daryljocky/rsync-docker
|
/docker-entrypoint-lsyncd.sh
|
UTF-8
| 290
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
echo "$SYNC_PASSWORD" > /etc/lsyncd/rsyncd.password
sed -i "s/{{SERVER_IP}}/$SERVER_IP/g" /etc/lsyncd/lsyncd.conf
sed -i "s/{{SECTION}}/$SECTION/g" /etc/lsyncd/lsyncd.conf
chmod 0600 /etc/lsyncd/rsyncd.password
lsyncd -insist -nodaemon -log Exec /etc/lsyncd/lsyncd.conf
| true
|
0f969a2bf026a734edb606e22cbcf13eda6474d3
|
Shell
|
Vaibhavgodha/nikkyjain.github.io
|
/others/collaborate/shastra/genPDF
|
UTF-8
| 1,306
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
curDir=$PWD
convert=
[ "$1" = "-c" ] && convert="-c"
cat <<EOF
<!doctype>
<html>
<head>
<title>jsPDF</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="hi" />
<script type="text/javascript" src="../libs/base64.js"></script>
<script type="text/javascript" src="../libs/sprintf.js"></script>
<script type="text/javascript" src="../jspdf.js"></script>
</head>
<body>
<h2>Simple Two-page Text Document</h2>
<a href="javascript:convertToPDF()">Convert To PDF</a>
<script type="text/javascript">
var doc = new jsPDF();
function convertToPDF() {
createPages();
// Output as Data URI
doc.output('datauri');
}
function createPages() {
EOF
for dir in `find /mnt/hgfs/winshare/Personal/Dropbox/Public/HTML/myWebsite/jainDataBase/bhajans/. -maxdepth 1 -type d -name [^\.]\* | sed 's:^\./::' | head -1`
do
cd $dir/main
for file in *
do
IFS=$'\n'
#dos2unix $file
echo " doc.text(20, 30, \"$file\");"
cntr=30;
for line in $(cat $file | dos2unix)
do
l=$(echo "$line" | tr '\r' ' ' | sed 's/^\xEF\xBB\xBF//')
cntr=$((cntr+20))
echo " doc.text(20, $cntr, \"$l\");"
done
echo "doc.addPage();"
#unix2dos $file
done
done
cat <<EOF
}
</script>
</body>
</html>
EOF
| true
|
8d003a3b9f6185a73ef8c15aed11714533331643
|
Shell
|
ahmedtd/ballistae
|
/third_party/jpeg/update.bash
|
UTF-8
| 201
| 2.578125
| 3
|
[
"BSD-3-Clause",
"IJG",
"Zlib",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
VERSION=2.0.4
curl -OL "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/${VERSION}.tar.gz" || exit 1
tar xf "${VERSION}.tar.gz" --strip-components=1 || exit 1
rm "${VERSION}.tar.gz" || exit 1
| true
|
ea373182325c1ca3fb502400d01b346f2cf3f949
|
Shell
|
fancyspeed/solution_of_kaggle_merck
|
/weighted-knn/script/run_svm_format.sh
|
UTF-8
| 643
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh -x
ori_dir=../../data/dog
in_dir=../../data/dog.stdscore.svmformat.sample
out_dir=../data
i=4
f_record=${ori_dir}/record_${i}.txt
f_valid=${ori_dir}/validation_${i}.txt
f_truth=${ori_dir}/groundtruth_${i}.txt
f_train=${in_dir}/fm_traind_${i}
f_test=${in_dir}/fm_testd_${i}
f_pred=${out_dir}/knn_predd_${i}
f_out=${out_dir}/knn_outd_${i}
K=9
gamma=0.001
NF=`python get_nfeat.py ${f_record}`
#python knn_var_svmformat.py ${f_train} ${f_test} ${f_pred} ${NF} ${K}
python wknn_svmformat.py ${f_train} ${f_test} ${f_pred} ${NF} ${K} ${gamma}
python construct.py $f_valid $f_pred $f_out
python ../../evaluate/R2.py $f_out $f_truth
| true
|
071e51a5e6e2cdf88f09c145579fc89e06f1290b
|
Shell
|
tianhongbo/controller
|
/src/github.com/tianhongbo/node/deviceinstall.sh
|
UTF-8
| 1,422
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# For Mac
#vnc_server_path=/Users/Scott/master/src/github.com/tianhongbo/node
#novnc_path=/Users/Scott/noVNC
For Ubuntu
vnc_server_path=/home/ubuntu2/controller/src/github.com/tianhongbo/node
novnc_path=/home/ubuntu2/noVNC
# ADB name, device_ip, vnc_port, ssh_port
adb_name=$1
device_ip=$2
vnc_port=$3
vnc_internal_port=`expr $vnc_port + 40`
ssh_port=$4
echo "adb_name=$adb_name, device_ip=$device_ip, vnc_port=$vnc_port, ssh_port=$ssh_port"
#waiting for device online
adb -s $adb_name wait-for-device
#waiting for device booting
A=$(adb -s $adb_name shell getprop sys.boot_completed | tr -d '\r')
while [ "$A" != "1" ]; do
sleep 1
A=$(adb -s $adb_name shell getprop sys.boot_completed | tr -d '\r')
done
#disconnect Internet connection
#adb -s $adb_name shell 'su -c "svc wifi disable"'
#adb -s $adb_name shell 'su -c "svc data disable"'
#adb -s $adb_name shell setprop net.dns1 0.0.0.0
adb -s $adb_name shell 'su -c "setprop net.dns1 0.0.0.0"'
adb -s $adb_name shell 'su -c "setprop net.dns2 0.0.0.0"'
#configure SSH
adb -s $adb_name forward tcp:$ssh_port tcp:22
#configure VNC
adb -s $adb_name forward tcp:$vnc_internal_port tcp:5901
#start vnc proxy on the host
#/Users/Scott/noVNC/utils/launch.sh --listen 5910 --vnc 192.168.1.16:5901 --web /Users/Scott/noVNC
cd $novnc_path
$novnc_path/utils/launch.sh --listen $vnc_port --vnc localhost:$vnc_internal_port --web $novnc_path&
| true
|
0dd63bce6a036f8ab08c2b03b8df296fbabec00c
|
Shell
|
adypappi/infra
|
/scripts/AdipappiUtils.sh
|
UTF-8
| 9,962
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Defines all usefull functions used in laapi infra managemnt
#
#set -x
# Adipappi infra scripts repository root
export OK="OK"
export KO="KO"
# Use full Regex
export IPV4_REGEX="^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
#FNCT: 1
#
#
# Chech that the runner of script is sudo or root
# $1: the UID
# Exit code -1 : if $1 is not equal to 1
# Return 0.
function isUserRootOrSudo() {
local SUDO_RIGHTS_MSG="This script must be run with sudo command or root"
if [ ! $UID -eq 0 ] ; then
printf "${SUDO_RIGHTS_MSG}\n"
return -1
fi
return 0
}
#FNCT: 2
#
#
# Print any array each element in one line into console output
function printArray {
tab=("$@")
printf '%s\n' "${tab[@]}"
}
#FNCT: 3
#
#
function aptgetUpdate {
apt-get update
}
#FNCT: 4
#
#
function aptgetForceUpgrade {
apt-get upgrade -y
}
#FNCT: 5
# Install list of package or single package
#
function aptgetForceInstall {
for pkg in $(echo $@); do
apt-get install -y $pkg
done
}
#FNCT: 6
# Check provided argument agains X.Y where X and Y re interger
#
function checkPgsql10PlusVersion() {
if [[ $1 =~ [0-9]+(\.[0-9])? ]]; then
echo $OK
else
echo $KO
fi
}
#FNCT: 7
# Check provided argument agains X.Y where X and Y re interger
#
function checkXDotYVersion() {
if [[ $1 =~ [0-9]+\.[0-9]+ ]]; then
echo $OK
else
echo $KO
fi
}
#FNCT: 8
#
# Check provided argument agains X.Y where X and Y re interger
function checkXDotYDotZVersion() {
if [[ $1 =~ [0-9]+\.[0-9]+\.[0-9]+ ]]; then
echo $OK
else
echo $KO
fi
}
#FNCT: 9
#
#heck that the argument is "OK"
# $1: version number to check
function isVersionNumberOK() {
if [[ "$1" != "OK" ]]; then
printf "The version number $1 KO (is incorrect)\n"
exit -2
fi
}
#FNCT: 10
#
# Chekc that given debian package is installed or not.
#
# parm: $1 package name
# return: OK if package is installed and KO otherwise.
function isPackageInstalled() {
cmd=$(dpkg -s $1 2>/dev/null >/dev/null)
if [[ $cmd -eq 0 ]]; then
echo $OK
else
echo $KO
fi
}
#FNCT: 11
#
# Add the main ppa repository of postgresql database
#
# return OK if postgresq's apt repository is already installed
function addPostgresqlAptRepository() {
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
printf "OK\n"
}
#FNCT: 12
#
# Add postgresql apt repository media key
#
# Need sudo user
#
function addPostgresqlAptRepositoryKey(){
local pkgName1="wget"
local pkgName2="ca-certificates"
for pkg in $pkgName1 $pkgName2; do
if [[ $(isPackageInstalled $pkg) == $KO ]]; then
aptgetForceInstall $pkg
fi
done
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc|apt-key add -
aptgetUpdate
aptgetForceUpgrade
}
#FNCT: 13
#
# Check that postgresql is installed or not
#
# Script parameters:
# $1: The name of script in ${PAPI_INFRA_SCRIPTS} folder used to get postgresql version
# $2: version number of postgresql to check
# Return: 'OK' if postgresql is installed. 'KO' if not
#
function checkPostgresql() {
pgVersion=$(${PAPI_INFRA_SCRIPTS}/$1)
if [[ "$pgVersion" == "$2" ]]; then
echo $OK
else
echo $KO
fi
}
#FNCT: 14
# Install specific version of postgresql
#
# Script parameters
# $1: The version number of postgresql to install
function installPostgresql() {
printf "Install postgresql database version $postgresqlVersion \n"
aptgetForceInstall "postgresql-"$1
}
#FNCT: 15
#
# List all postgresql instance user and group in laapi infrastructure architecture
#
# in laapi postgresql instance is named pginst<i> user associated is named pginstusr<i> and the usergroup is pginstgrp
# where <i> is an integer.
# Per default the number of instances per insfrastructure host depend on its capacity.
#
# By default each instance is installed on each host (VM, Container, Baremetal...) into File system /caldron/pg/pginst<i>
# The prefix is so: pginst.
#
# Return all postgresql instance as a bash string convertible to array "instanceName<i>:instanceUser<i>:instanceGroup:instanceRootFS<i>"
function listPgInstanceGroupUser() {
local PREFIX1="pginst"
local PREFIX2="${PREFIX1}usr"
local PREFIX3="${PREFIX1}grp"
allPgInstUsers=($(grep -P "^$PREFIX2\d+" /etc/passwd | cut -d':' -f1| tr '\n' ' '|uniq))
declare -a pgInstGroupsUsers
local j=1
for user in "${allPgInstUsers[@]}"; do
userIndex=${user#$PREFIX2}
userGroup=$(id -Gn $user | grep -o "$PREFIX3")
# Check FS of pg instance normally associated to the user
pginstFSPrefix="/caldron/pg/${PREFIX1}"
instRootFS="${pginstFSPrefix}${userIndex}"
if [[ ! -e $instRootFS ]] ; then
instRootFS=""
fi
pgInstGroupsUsers[$j]="${PREFIX1}${userIndex}:${PREFIX2}${userIndex}:${userGroup}:${instRootFS}"
j=$((j+1))
done
printf "%s\n" ${pgInstGroupsUsers[@]}
}
#FUNC: 16
# Create selft signed certificat for sysetele. Three files are generated .key .csr and .crt
#
# Generate certificat with openssl rsa4096
#
#String X.500 AttributeType none interactively
#------------------------------
#CN commonName
#L localityName
#ST stateOrProvinceName
#O organizationName
#OU organizationalUnitName
#C countryName
#STREET streetAddress
#DC domainComponent
#UID userid
#
#parameters:mandatory
# $1: Name of root folder holding all certificats
# $2: certificat name file name without extension. Example for www.crt www.key and www.csr files provides www
# $3: countryName
# $4: stateOrProvinceName
# $5: localityName
# $6: organizationName
# $7: domainComponent
# Depends on openssl
function createDomainSSLCertificat() {
if [[ "$(which openssl)" == "openssl:" ]]; then
printf "Function ${FUNCNAME[0]} needs openssl to be installed\n"
exit -1
fi
local CERT_ROOT_DIR=$1
local CERT_FILE_PREFIX=$2
local countryName=$3
local stateOrProvinceName=$4
local localityName=$5
local organizationName=$6
local domainComponent=$7
mkdir -p ${CERT_ROOT_DIR}
cd ${CERT_ROOT_DIR}
openssl req -nodes -newkey rsa:4096 -keyout ${CERT_FILE_PREFIX}.key -out ${CERT_FILE_PREFIX}.csr -subj "/C=${countryName}/ST=${stateOrProvinceName}/L=${localityName}/O=${organizationName}/CN=${domainComponent}"
openssl x509 -req -days 730 -in ${CERT_FILE_PREFIX}.csr -signkey ${CERT_FILE_PREFIX}.key -out ${CERT_FILE_PREFIX}.crt
}
#FUNC: 17
# Check that the provided String represents a correct username or group name in linux lsb
function isValidUsername() {
res=$(echo $1 | grep -Po '^([a-z_][a-z_0-9]{2,16})$')
if [[ "$res" == "$1" ]]; then
echo $OK
else
echo $KO
fi
}
#FUNC: 18
# Check that linux user exists or not.
#
#parameters:mandatory
# $1: username to check
#return: OK is user exists KO otherwise.
function isUserExists() {
if [[ $(id -u $1 > /dev/null 2>&1) ]]; then
echo $OK
else
echo $KO
fi
}
#FUNC: 19
# $1: username to check
#return: OK is user exists KO otherwise.
function isUserExists() {
if [[ $(id -u $1 > /dev/null 2>&1) ]]; then
echo $OK
else
echo $KO
fi
}
#FUNC: 20
#parameters:mandatory
# $1: username to check
#return: OK is user exists KO otherwise.
function isUserExists() {
if [[ $(id -u $1 > /dev/null 2>&1) ]]; then
echo $OK
else
echo $KO
fi
}
#FUNC: 21
# Check that linux group exists or not.
#
#parameters:mandatory
# $1: groupname to check
#return: OK if group exists KO otherwise.
function isGroupExists() {
if [[ $(id -g "$1" > /dev/null 2>&1) ]]; then
echo $OK
else
echo $KO
fi
}
#FUNC: 22
# Check that given user exist and it is in given group.
#
#parameters:mandatory
# $1: username to check
# $2: groupname in which user will be checked.
#return: OK is user exists in group. Othewise return KO
function isUserInGroup() {
local res=$(id -Gn $1 | grep -c "\b$2\b")
if [[ $res -gt 0 ]]; then
echo $OK
else
echo $KO
fi
}
#FUNC: 22
# Add a user to it's group group with password. If the group already exists set to the primary group of user to this group.
# If the group does not exist, create and set user primary 's group to this group.
#
#parameters:mandatory
# $1: user name to add
# $2: primary group name to which user will be added
# $3: password of user
#return: OK if user has been created into group. Otherwise return KO
function createUserInGroupWithPassword() {
isUserRootOrSudo
if [[ $# != 3 ]]; then
printf "The Function ${FUNCNAME} must be used with 3 arguments as \n"
printf "Usage: ${FUNCNAME} <userName> <GroupName> <userPassword>\n"
printf "Example: ${FUNCNAME} papiwebadmin adimida WebAdmin*_45\n"
echo $KO
else
local userName=$1
local groupName=$2
local userPassword=$3
adduser --disabled-password --gecos "" $userName
echo "$userName:$userPassword" | sudo chpasswd
# Check if the group exist else create group
if [[ $(isGroupExists $groupName) == $KO ]]; then
groupadd $groupName
fi
usermod -g $groupName $userName
echo $OK
fi
}
#FUNC: 23
# Return the ipv4 of a nic interface from its name by using ip command
#
#parameters:mandatory
# $1: nic name
#
#return: ipv4 of nic or empty string.
function getNicIpv4(){
local res=$(ip addr show $1 2>/dev/null | grep -Po "(?<=inet\s)((\d+\.){3}\d+)")
}
#FUNC: 24
# Reset iptables to default
function resetIptablesToDefault(){
isUserRootOrSudo
#IPV6
## set default policies to let everything in
ip6tables --policy INPUT ACCEPT;
ip6tables --policy OUTPUT ACCEPT;
ip6tables --policy FORWARD ACCEPT;
ip6tables -Z; # zero counters
ip6tables -F; # flush (delete) rules
ip6tables -X; # delete all extra chains
# IPv4
## set default policies to let everything in
iptables --policy INPUT ACCEPT;
iptables --policy OUTPUT ACCEPT;
iptables --policy FORWARD ACCEPT;
iptables -Z; # zero counters
iptables -F; # flush (delete) rules
iptables -X; # delete all extra chains
}
| true
|
5b744cec96040a4ecd4681fc23a7182f3d335df6
|
Shell
|
DivyaMundhada/ProblemsonSequenceandSelection
|
/NumbertoWeekCase.sh
|
UTF-8
| 409
| 3.890625
| 4
|
[] |
no_license
|
#! /bin/bash -x
# Read a number and display the week day
read -p "Please enter the day number 1 to 7 (considering 1= monday,..and 7= sun)" number
case $number in
1) echo "Monday";;
2) echo "Tuesday";;
3) echo "Wednesday";;
4) echo "Thursday";;
5) echo "Friday";;
6) echo "Saturday";;
7) echo "Sunday";;
*) echo "default wrong number" ;;
esac
| true
|
e2803202737314f9516994c9548cee7cfa98563b
|
Shell
|
sheppduck/scripts
|
/brew-tmux-ohmyzsh-rhel7.sh
|
UTF-8
| 3,958
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# ============================
# Install BREW and LinuxBrew
# ============================
sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)"
test -d ~/.linuxbrew && PATH="$HOME/.linuxbrew/bin:$HOME/.linuxbrew/sbin:$PATH"
test -d /home/linuxbrew/.linuxbrew && PATH="/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin:$PATH"
test -r ~/.bash_profile && echo "export PATH='$(brew --prefix)/bin:$(brew --prefix)/sbin'":'"$PATH"' >>~/.bash_profile
echo "export PATH='$(brew --prefix)/bin:$(brew --prefix)/sbin'":'"$PATH"' >>~/.profile
sudo yum groupinstall -y 'Development Tools' && sudo yum install -y curl file git
# Now install LinuxBrew
sudo yum update -y
sudo yum groupinstall -y "Development Tools"
sudo yum install -y \
autoconf automake19 libtool gettext \
git scons cmake flex bison \
libcurl-devel curl \
ncurses-devel ruby bzip2-devel expat-devel
git clone https://github.com/Homebrew/linuxbrew.git ~/.linuxbrew
# Append to the END to ~./bashrc
# Until LinuxBrew is fixed, the following is required.
# See: https://github.com/Homebrew/linuxbrew/issues/47
echo "export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:/usr/local/lib64/pkgconfig:/usr/lib64/pkgconfig:/usr/lib/pkgconfig:/usr/lib/x86_64-linux-gnu/pkgconfig:/usr/lib64/pkgconfig:/usr/share/pkgconfig:$PKG_CONFIG_PATH" >> ~/.bashrc
## Setup linux brew
echo "export LINUXBREWHOME=$HOME/.linuxbrew" >> ~/.bashrc
echo "export PATH=$LINUXBREWHOME/bin:$PATH" >> ~/.bashrc
echo "export MANPATH=$LINUXBREWHOME/man:$MANPATH" >> ~/.bashrc
echo "export PKG_CONFIG_PATH=$LINUXBREWHOME/lib64/pkgconfig:$LINUXBREWHOME/lib/pkgconfig:$PKG_CONFIG_PATH" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=$LINUXBREWHOME/lib64:$LINUXBREWHOME/lib:$LD_LIBRARY_PATH" >> ~/.bashrc
# ===============================
# Install oh-my-vim!
# ===============================
echo "Installing oh-my-vim..."
curl -L https://raw.github.com/liangxianzhe/oh-my-vim/master/tools/install.sh | sh
# ================================
# Install TMUX
# ===============================
# install deps
sudo yum install -y gcc kernel-devel make ncurses-devel
# DOWNLOAD SOURCES FOR LIBEVENT AND MAKE AND INSTALL
curl -OL https://github.com/libevent/libevent/releases/download/release-2.0.22-stable/libevent-2.0.22-stable.tar.gz
tar -xvzf libevent-2.0.22-stable.tar.gz
cd libevent-2.0.22-stable
./configure --prefix=/usr/local
make
sudo make install
cd ..
# DOWNLOAD SOURCES FOR TMUX AND MAKE AND INSTALL
curl -OL https://github.com/tmux/tmux/releases/download/2.3/tmux-2.3.tar.gz
tar -xvzf tmux-2.3.tar.gz
cd tmux-2.3
LDFLAGS="-L/usr/local/lib -Wl,-rpath=/usr/local/lib" ./configure --prefix=/usr/local
make
sudo make install
cd ..
# ===============================
# Install ZSH
# ===============================
sudo yum install zsh -y
# ===============================
# Install Oh-My-ZSH
# ===============================
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# ===============================
# Now push LinuxBrew vars to ~/.zshrc
echo "export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:/usr/local/lib64/pkgconfig:/usr/lib64/pkgconfig:/usr/lib/pkgconfig:/usr/lib/x86_64-linux-gnu/pkgconfig:/usr/lib64/pkgconfig:/usr/share/pkgconfig:$PKG_CONFIG_PATH" >> ~/.bashrc
echo "export LINUXBREWHOME=$HOME/.linuxbrew" >> ~/.bashrc
echo "export PATH=$LINUXBREWHOME/bin:$PATH" >> ~/.bashrc
echo "export MANPATH=$LINUXBREWHOME/man:$MANPATH" >> ~/.bashrc
echo "export PKG_CONFIG_PATH=$LINUXBREWHOME/lib64/pkgconfig:$LINUXBREWHOME/lib/pkgconfig:$PKG_CONFIG_PATH" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=$LINUXBREWHOME/lib64:$LINUXBREWHOME/lib:$LD_LIBRARY_PATH" >> ~/.bashrc
# ===============================
# ===============================
# Try oh-my-vim install JIC
curl -L https://raw.github.com/liangxianzhe/oh-my-vim/master/tools/install.sh | sh
# ===============================
| true
|
cb956491ef78aafdad36dddbbe781252a6c9eeae
|
Shell
|
kiennt/pinry
|
/pinry.sh
|
UTF-8
| 597
| 2.96875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env sh
. venv/bin/activate
runserver() {
python manage.py runserver
}
dumpdata() {
python manage.py dumpdata auth.user --indent=4 > pinry/pins/fixtures/user.json
python manage.py dumpdata social_auth --indent=4 > pinry/pins/fixtures/social_auth.json
python manage.py dumpdata core --indent=4 > pinry/pins/fixtures/member.json
python manage.py dumpdata pins --indent=4 > pinry/pins/fixtures/pins.json
}
case "$1" in
runserver)
runserver
;;
dumpdata)
dumpdata
;;
*)
echo "USAGE ./pinry {runserver|dumpdata}"
esac
| true
|
1b886cd77f8a46f38ed7cb25883561843c68b059
|
Shell
|
TheCoderMerlin/MerlinMake
|
/swift-clean.sh
|
UTF-8
| 908
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# This script is part of the MerlinMake repository
# Copyright (C) 2021 CoderMerlin.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# This script searches through all directories from the home directory
# and recursively removes and .build directory
find ~ -type d -name .build -print -exec rm -rf "{}" \;
| true
|
19193e2eae489c998068d92f1ebba662b61d1ba7
|
Shell
|
Olave/bigdata
|
/shell/dw/ads/ads_continuity_uv_count.sh
|
UTF-8
| 916
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
day=$1
if [ -z "$day" ]; then
day=$(date -d '-1 day' +%F)
fi
app=gmall
sql="
insert overwrite table $app.ads_continuity_uv_count
select *
from $app.ads_continuity_uv_count
union all
select '$day' dt,
concat(date_sub(next_day('$day', 'Monday'), 7), date_sub(next_day('$day', 'Monday'), 1)) wk_dt,
count(*) continuity_count
from (
select mid_id
from (
select mid_id, dt, datediff(dt, lag(dt, 2, '1970-01-01') over (partition by mid_id order by dt)) diff
from $app.dws_uv_detail_daycount
where dt between date_sub('$day', 7) and '$day'
) t
where t.diff = 2
group by mid_id
) t;
"
echo "$sql"
| true
|
5a68268971ddd2201f1baec720490aca12d5b5ff
|
Shell
|
AMDResearch/DAGEE
|
/tools/bin/git-sort-commits-by-size.sh
|
UTF-8
| 1,297
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
# Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms.
#!/bin/bash
git rev-list --objects --all \
| git cat-file --batch-check='%(objecttype) %(objectname) %(objectsize) %(rest)' \
| sed -n 's/^blob //p' \
| sort --numeric-sort --key=2 \
| cut -c 1-12,41- \
| numfmt --field=2 --to=iec-i --suffix=B --padding=7 --round=nearest
cat <<EOM
Remove large files from the history using the following command (WARNING: DANGEROUS OPERATION)
git filter-branch --tree-filter 'rm -f large files' HEAD
IMPORTANT PRECAUTIONS:
-command must be executed from root dir of repo. File paths should be relative to root
-large files may be in multiple branches, so need to execute this for each branch
-The command will re-write history of the branch and replace existing commits with
new similar looking commits. This has more implications:
- Specify all large file names once so history is rewritten once only
- If the local branch, say B, tracks a remote branch, say origin/B, a simple
push won't work. You'll have to git push --force.
- Other people who have checked out origin/B will need to remove their local B
and checkout again. For every branch. A simple pull will try to merge and do the
wrong thing, i.e. retain the deleted files.
EOM
| true
|
80653a7b10f58015b25c8ee029bad3a1715c05aa
|
Shell
|
belivem/Study
|
/FFMpeg
|
UTF-8
| 2,219
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
NDK=/data/liyanan/ffmpeg/android-ndk-r19c
ADDI_LDFLAGS="-fPIE -pie"
ADDI_CFLAGS="-fPIE -pie -march=armv8-a -mfloat-abi=softfp -mfpu=neon"
CPU=arm64-v8a
ARCH=arm64
SYSROOT=$NDK/toolchains/llvm/prebuilt/linux-x86_64/sysroot
TOOLCHAIN=$NDK/toolchains/llvm/prebuilt/linux-x86_64/bin
PREFIX=$(pwd)/android/$CPU
configure()
{
./configure \
--prefix=$PREFIX \
--toolchain=clang-usan \
--enable-cross-compile \
--target-os=android \
--arch=$ARCH \
--sysroot=$SYSROOT \
--cc=$TOOLCHAIN/aarch64-linux-android24-clang \
--cxx=$TOOLCHAIN/aarch64-linux-android24-clang++ \
--strip=$TOOLCHAIN/aarch64-linux-android-strip \
--extra-cflags="$ADDI_CFLAGS" \
--extra-ldflags="$ADDI_LDFLAGS" \
--disable-encoders \
--disable-decoders \
--disable-avdevice \
--disable-static \
--disable-doc \
--disable-ffplay \
--disable-network \
--disable-doc \
--disable-symver \
--disable-ffprobe \
--enable-neon \
--enable-shared \
--enable-gpl \
--enable-pic \
--enable-jni \
--enable-pthreads \
--enable-mediacodec \
--enable-encoder=aac \
--enable-encoder=gif \
--enable-encoder=libopenjpeg \
--enable-encoder=libmp3lame \
--enable-encoder=libwavpack \
--enable-encoder=mpeg4 \
--enable-encoder=pcm_s16le \
--enable-encoder=png \
--enable-encoder=mjpeg \
--enable-encoder=srt \
--enable-encoder=subrip \
--enable-encoder=yuv4 \
--enable-encoder=text \
--enable-decoder=aac \
--enable-decoder=aac_latm \
--enable-decoder=libopenjpeg \
--enable-decoder=mp3 \
--enable-decoder=mpeg4_mediacodec \
--enable-decoder=pcm_s16le \
--enable-decoder=flac \
--enable-decoder=flv \
--enable-decoder=gif \
--enable-decoder=png \
--enable-decoder=srt \
--enable-decoder=xsub \
--enable-decoder=yuv4 \
--enable-decoder=vp8_mediacodec \
--enable-decoder=h264_mediacodec \
--enable-decoder=hevc_mediacodec \
--enable-bsf=aac_adtstoasc \
--enable-bsf=h264_mp4toannexb \
--enable-bsf=hevc_mp4toannexb \
--enable-bsf=mpeg4_unpack_bframes
}
build()
{
configure
make clean
make -j4
make install
}
build
| true
|
a529f966b30494fd011390ffd3064c0bfb3c5375
|
Shell
|
dokku/dokku
|
/tests/unit/git_1.bats
|
UTF-8
| 2,380
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
load test_helper
setup() {
global_setup
create_app
}
teardown() {
destroy_app
global_teardown
}
@test "(git) git:help" {
run /bin/bash -c "dokku git"
echo "output: $output"
echo "status: $status"
assert_output_contains "Manage app deploys via git"
help_output="$output"
run /bin/bash -c "dokku git:help"
echo "output: $output"
echo "status: $status"
assert_output_contains "Manage app deploys via git"
assert_output "$help_output"
}
@test "(git) ensure GIT_REV env var is set" {
run deploy_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku config:get $TEST_APP GIT_REV"
echo "output: $output"
echo "status: $status"
assert_output_exists
}
@test "(git) disable GIT_REV" {
run /bin/bash -c "dokku git:set $TEST_APP rev-env-var"
echo "output: $output"
echo "status: $status"
assert_success
run deploy_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku config:get $TEST_APP GIT_REV"
echo "output: $output"
echo "status: $status"
assert_output_not_exists
}
@test "(git) customize the GIT_REV environment variable" {
run /bin/bash -c "dokku git:set $TEST_APP rev-env-var GIT_REV_ALT"
echo "output: $output"
echo "status: $status"
assert_success
run deploy_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku config:get $TEST_APP GIT_REV_ALT"
echo "output: $output"
echo "status: $status"
assert_output_exists
}
@test "(git) keep-git-dir" {
run /bin/bash -c "dokku git:set $TEST_APP keep-git-dir true"
echo "output: $output"
echo "status: $status"
assert_success
run deploy_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku enter $TEST_APP web ls .git"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "branches"
assert_output_contains "config"
assert_output_contains "description"
assert_output_contains "HEAD"
assert_output_contains "hooks"
assert_output_contains "index"
assert_output_contains "info"
assert_output_contains "logs"
assert_output_contains "objects"
assert_output_contains "refs"
run /bin/bash -c "dokku enter $TEST_APP web test -d .git"
echo "output: $output"
echo "status: $status"
assert_success
}
| true
|
90ac91025f24f53826d3242e0a5573d1ba0e5d54
|
Shell
|
dombak1608/embedded_linux
|
/LV2/resources/post-build.sh
|
UTF-8
| 1,677
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
set -u
set -e
# Add a console on tty1
if [ -e ${TARGET_DIR}/etc/inittab ]; then
grep -qE '^tty1::' ${TARGET_DIR}/etc/inittab || \
sed -i '/GENERIC_SERIAL/a\
tty1::respawn:/sbin/getty -L tty1 0 vt100 # HDMI console' ${TARGET_DIR}/etc/inittab
fi
# Change config.txt to boot u-boot.bin instead of zImage
sed -i -e '/.*kernel=.*/c\
kernel=u-boot.bin' ${BINARIES_DIR}/rpi-firmware/config.txt
if ! grep -Fxq "enable_uart=1" ${BINARIES_DIR}/rpi-firmware/config.txt
then
echo "enable_uart=1" >> ${BINARIES_DIR}/rpi-firmware/config.txt
fi
# Replace previous line with the following one in order to change uart0 clock and baud
# (used as workarround with previous linux kernel versions when DTS was using 3MHz clock
# and firmware overriden it to 48MHz as the new firmware realy sets uart0 clock to 48MHz,
# but overriding was not working through u-boot. now it is set in DTS to 48MHz)
#kernel=u-boot.bin\ninit_uart_clock=3000000\ninit_uart_baud=115200' ${BINARIES_DIR}/rpi-firmware/config.txt
# Change profile to print path
sed -i '/export PS1='"'"'\# '"'"'.*/c\
export PS1="\\\`if \[\[ \\\$? = "0" ]]; then echo '"'"'\\e\[32m\\h\\e\[0m'"'"'; else echo '"'"'\\e\[31m\\h\\e\[0m'"'"' ; fi\\\`:\\\w\\\# "' ${TARGET_DIR}/etc/profile
sed -i '/export PS1='"'"'\$ '"'"'.*/c\
export PS1="\\\`if \[\[ \\\$? = "0" ]]; then echo '"'"'\\e\[32m\\h\\e\[0m'"'"'; else echo '"'"'\\e\[31m\\h\\e\[0m'"'"' ; fi\\\`:\\\w\\\$ "' ${TARGET_DIR}/etc/profile
# Change sshd_config for SSH server
sed -i '/.*PermitRootLogin.*/c\
PermitRootLogin yes' ${TARGET_DIR}/etc/ssh/sshd_config
sed -i '/.*PermitEmptyPasswords.*/c\
PermitEmptyPasswords yes' ${TARGET_DIR}/etc/ssh/sshd_config
| true
|
576b9fc0aa93265f7b8c0a326fd9e51515150e90
|
Shell
|
ganbarodigital/storyplayer
|
/src/bin/browsermob-proxy.sh
|
UTF-8
| 2,311
| 4.1875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# browsermob-proxy.sh
# start | stop browsermob-proxy.sh
JAR=browsermob-proxy-2.0-beta-10-SNAPSHOT-standalone.jar
NAME=browsermob-proxy
PARAMS="-port 9090"
# special case - where is the JAR file?
BIN_DIR="@@BIN_DIR@@"
if [[ $BIN_DIR == "@""@BIN_DIR@@" ]] ; then
# we are running out of a vendor folder
BIN_DIR="`dirname $0`/../bin"
fi
function die() {
echo "*** error: $@"
exit 1
}
# make sure we have Java installed
if ! which java > /dev/null 2>&1 ; then
die "java not found. please install and then try again"
fi
# make sure we have the JAR file installed
if [[ ! -e $BIN_DIR/$JAR ]] ; then
die "$BIN_DIR/$JAR not found; do you need to download it?"
fi
function start() {
if ! is_running ; then
# start the process
echo "Starting $NAME in a screen"
screen -d -m -S $NAME java -jar "$BIN_DIR/$JAR" $PARAMS
# did it start?
sleep 1
is_running
fi
}
function stop() {
local pid=`get_pid`
if [[ -z $pid ]] ; then
echo "$NAME was not running"
return 0
fi
# stop the children first
kill_children $pid
kill $pid
pid=`get_pid`
if [[ -n $pid ]] ; then
sleep 2
pid=`get_pid`
fi
if [[ -n $pid ]] ; then
kill -9 $pid
pid=`get_pid`
fi
if [[ -n $pid ]] ; then
echo "$NAME is running as pid $pid, and has ignored attempts to terminate"
return 1
fi
echo "$NAME has been stopped"
}
function restart() {
local pid=`get_pid`
if [[ -n $pid ]] ; then
stop
fi
start
}
function is_running() {
local pid=`get_pid`
if [[ -n $pid ]] ; then
echo "$NAME is running as pid $pid"
return 0
fi
echo "$NAME is not running"
return 1
}
function monitor() {
local pid=`get_pid`
if [[ -z $pid ]] ; then
echo "$NAME is not running"
exit 1
fi
screen -rd $NAME
}
function usage() {
echo "usage: $NAME.sh <start|stop|restart|status|monitor>"
}
function get_pid() {
# get the pid of our daemon
local pid=`ps -ef | grep "$NAME" | grep [S]CREEN | awk {' print $2 '}`
if [[ -n $pid ]] ; then
echo "$pid"
fi
}
function kill_children() {
local ppid=$1
# get the pid of all the child processes
for x in `ps -ef | awk '$3 == '${ppid}' { print $2 }'` ; do
kill $x
done
}
case "$1" in
"status")
is_running
;;
"stop")
stop
;;
"restart")
restart
;;
"start")
start
;;
"monitor")
monitor
;;
*)
usage
;;
esac
| true
|
dfdc1a2dc7b345a3a896ba795784473861054b6b
|
Shell
|
mutanthost/backupEC2
|
/ORACLE_19/bin/ORE
|
UTF-8
| 909
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
ORE_VER=1.5.1
R_MAJOR_VER=3
R_MINOR_VER=3
# Checking R
type R > /dev/null 2>&1
if [ $? -eq 0 ]; then
unset R_HOME
R_HOME=`R RHOME`
fi
if [ -n "${R_HOME}" ]; then
RCMD="${R_HOME}/bin/R"
fi
if [ -f "${RCMD}" ]; then
unset R_HOME
R_HOME=`${RCMD} RHOME`
else
echo "Fail"
echo " ERROR: R not found"
exit 1
fi
R_CHK="as.integer(R.Version()[['major']]) > ${R_MAJOR_VER}L || (as.integer(R.Version()[['major']])==${R_MAJOR_VER}L && as.integer(R.Version()[['minor']]) >= ${R_MINOR_VER}L)"
rver_chk=`$RCMD --vanilla --slave -e "$R_CHK" | cut -f2 -d' '`
if [ "$rver_chk" = "FALSE" ]; then
echo "Fail"
echo " ERROR: ORE $ORE_VER requires R ${R_MAJOR_VER}.${R_MINOR_VER}.0 or later"
exit 1
fi
ORE_LIBS_USER="${ORACLE_HOME}/R/library"
if [ -z "${R_LIBS_USER}" ]; then
R_LIBS_USER="${ORE_LIBS_USER}"
else
R_LIBS_USER="${ORE_LIBS_USER}:${R_LIBS_USER}"
fi
export R_LIBS_USER
$RCMD $@
| true
|
e71e4ba29b9da511c3163902a53f4df21c6390fc
|
Shell
|
yul14nrc/k8sDynatrace
|
/3-dynatrace/connectK8sDynatrace/k8sClusterToDynatrace.sh
|
UTF-8
| 3,811
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$DT_TENANTID" ]; then
export CREDS=../../1-credentials/creds.json
if [ -f "$CREDS" ]; then
echo "The $CREDS file exists."
else
echo "The $CREDS file does not exists. Executing the defineDTcredentials.sh script..."
cd ../../1-credentials
./defineDTCredentials.sh
cd ../3-dynatrace/connectK8sDynatrace
fi
export DT_TENANTID=$(cat ../../1-credentials/creds.json | jq -r '.dynatraceTenantID')
export DT_ENVIRONMENTID=$(cat ../../1-credentials/creds.json | jq -r '.dynatraceEnvironmentID')
export DT_API_TOKEN=$(cat ../../1-credentials/creds.json | jq -r '.dynatraceApiToken')
fi
echo ""
echo "Verifying dynatrace namespace..."
echo ""
ns=$(kubectl get namespace dynatrace --no-headers --output=go-template={{.metadata.name}} --kubeconfig ~/.kube/config 2>/dev/null)
if [ -z "${ns}" ]; then
echo "Namespace dynatrace not found"
echo ""
echo "Creating namespace dynatrace:"
echo ""
kubectl create namespace dynatrace --kubeconfig ~/.kube/config
else
echo "Namespace dynatrace exists"
echo ""
echo "Using namespace dynatrace"
fi
echo ""
echo "Creating monitoring service account:"
echo ""
kubectl apply -f ./kubernetes-monitoring-service-account.yaml --kubeconfig ~/.kube/config
echo ""
case $DT_ENVIRONMENTID in
'')
DYNATRACE_BASE_URL="https://$DT_TENANTID.live.dynatrace.com"
;;
*)
DYNATRACE_BASE_URL="https://$DT_TENANTID.dynatrace-managed.com/e/$DT_ENVIRONMENTID"
;;
?)
usage
;;
esac
DYNATRACE_API_TOKEN="$DT_API_TOKEN"
DYNATRACE_API_URL="$DYNATRACE_BASE_URL/api/config/v1/kubernetes/credentials"
API_ENDPOINT_URL=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' --kubeconfig ~/.kube/config)
BEARER_TOKEN=$(kubectl get secret $(kubectl get sa dynatrace-monitoring -o jsonpath='{.secrets[0].name}' -n dynatrace --kubeconfig ~/.kube/config) -o jsonpath='{.data.token}' -n dynatrace --kubeconfig ~/.kube/config | base64 --decode)
echo "================================================================="
echo "Dynatrace Kubernetes configuration:"
echo ""
echo "DYNATRACE_BASE_URL = $DYNATRACE_BASE_URL"
echo "DYNATRACE_API_URL = $DYNATRACE_API_URL"
echo "DYNATRACE_API_TOKEN = $DYNATRACE_API_TOKEN"
echo "================================================================="
echo ""
POST_DATA=$(
cat <<EOF
{
"label": "$CLUSTER_NAME",
"endpointUrl": "$API_ENDPOINT_URL",
"authToken": "$BEARER_TOKEN",
"eventsFieldSelectors": [
{
"label": "Node warning events",
"fieldSelector": "involvedObject.kind=Node,type=Warning",
"active": true
},
{
"label": "Sockshop prod warning events",
"fieldSelector": "involvedObject.namespace=sockshop-production,type=Warning",
"active": true
}
],
"active": true,
"eventsIntegrationEnabled": true,
"workloadIntegrationEnabled": true,
"certificateCheckEnabled": false,
"hostnameVerificationEnabled": true,
"davisEventsIntegrationEnabled": true
}
EOF
)
echo $POST_DATA
echo ""
echo "Result: "
echo ""
CONNECT_K8S_DYNATRACE=$(curl -X POST "$DYNATRACE_API_URL" -H "Content-type: application/json" -H "Authorization: Api-Token "$DYNATRACE_API_TOKEN -d "$POST_DATA")
echo $CONNECT_K8S_DYNATRACE
DYNATRACE_K8S_ID=$(echo $CONNECT_K8S_DYNATRACE | jq -r '.id')
DYNATRACE_K8S_NAME=$(echo $CONNECT_K8S_DYNATRACE | jq -r '.name')
INFO=$(
cat <<EOF
{
"dynatrace_k8s_id": "$DYNATRACE_K8S_ID",
"dynatrace_k8s_name": "$DYNATRACE_K8S_NAME"
}
EOF
)
FILE=./dynatracek8sinfo.json
rm $FILE 2>/dev/null
echo $INFO | jq -r '.' >>$FILE
| true
|
621d30f615cb4c56657e9a1c042188a75d31fc6a
|
Shell
|
brennanfee/dotfiles
|
/bash/shared/functions/misc.bash
|
UTF-8
| 1,567
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Bash strict mode
# shellcheck disable=SC2154
([[ -n ${ZSH_EVAL_CONTEXT} && ${ZSH_EVAL_CONTEXT} =~ :file$ ]] \
|| [[ -n ${BASH_VERSION} ]] && (return 0 2> /dev/null)) && SOURCED=true || SOURCED=false
if ! ${SOURCED}; then
set -o errexit # same as set -e
set -o nounset # same as set -u
set -o errtrace # same as set -E
set -o pipefail
set -o posix
#set -o xtrace # same as set -x, turn on for debugging
shopt -s extdebug
IFS=$(printf '\n\t')
fi
# END Bash scrict mode
function reload_profile() {
# shellcheck source=/dev/null
source "${HOME}/.bash_profile"
}
function lsgrep() {
# shellcheck disable=SC2010
ls -A | grep -i "$@"
}
function llgrep() {
# shellcheck disable=SC2010
ls -hlA --time-style=long-iso | grep -i "$@"
}
function lsrg() {
# shellcheck disable=SC2012
ls -A | rg -S "$@"
}
function llrg() {
# shellcheck disable=SC2012
ls -hlA --time-style=long-iso | rg -S "$@"
}
function psgrep() {
# shellcheck disable=SC2009,SC2001
ps aux | grep -i "$(echo "$@" | sed "s/^\(.\)/[\1]/g")"
}
function psrg() {
# shellcheck disable=SC2001
ps aux | rg -S "$(echo "$@" | sed "s/^\(.\)/[\1]/g")"
}
function myip() {
curl ifconfig.co/ip
}
function usage() {
if [[ -n "$1" ]]; then
du -h --max-depth=1 "$1" | sort -hr
else
du -h --max-depth=1 | sort -hr
fi
}
function weather() {
curl -sL "http://api.wunderground.com/auto/wui/geo/ForecastXML/index.xml?query=${1:-<YOURZIPORLOCATION>}" | perl -ne '/<title>([^<]+)/&&printf "%s: ",$1;/<fcttext>([^<]+)/&&print $1,"\n"'
}
| true
|
a0fbddef85d7f7aff6c30ef76aa820819ae83d37
|
Shell
|
guke/flarum
|
/system/vagrant/environment.sh
|
UTF-8
| 970
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
su - vagrant
### Setup NPM globals and create necessary directories ###
sudo apt-get install -y phantomjs zsh exuberant-ctags
mkdir /home/vagrant/npm
sudo chown -R vagrant:vagrant /home/vagrant
cp /vagrant/scripts/aliases ~/.aliases
### Create rc file ###
if [ -e "/home/vagrant/.zshrc" ]
then
echo "source ~/.aliases" >> ~/.zshrc
else
echo "source ~/.aliases" >> ~/.bashrc
fi
### Set up environment files and database ###
cp /vagrant/system/.env.example /vagrant/system/.env
mysql -u root -proot -e 'create database flarum'
### Setup flarum/core and install dependencies ###
cd /vagrant/system/core
composer install --prefer-dist
cd /vagrant/system
composer install --prefer-dist
composer dump-autoload
cd /vagrant/system/core/js
bower install
cd /vagrant/system/core/js/forum
npm install
gulp
cd /vagrant/system/core/js/admin
npm install
gulp
cd /vagrant/system
php artisan vendor:publish
php artisan flarum:install
php artisan flarum:seed
| true
|
40560d0676d0bba9e99520a61c4108a06062d009
|
Shell
|
Agaisme/nopain-setupserver
|
/main.sh
|
UTF-8
| 596
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################
# NoPain Setup Server V0.1
# Authors Subraga Islammada S (Agaisme)
# Support Bash Version 4.3
# This file is taken from various sources
#######################################################
# Sunday, 11-Aug-2019
# Load Support File
CURRENT_DIR=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
source "${CURRENT_DIR}/loader.sh"
# set locale temporarily to english
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
function main() {
clear
image_head
pc_info
menu_show
}
main
| true
|
ceaf8fc4a12a4525424561efac6b7eddf3276c4d
|
Shell
|
Wyyyyylla/shell2python
|
/test/test05.sh
|
UTF-8
| 247
| 2.9375
| 3
|
[] |
no_license
|
#/bin/sh
# Matthew Moss
# mdm@cse.unsw.edu.au
# cs2041, 12s2
# We're going to have nested loops
for i in 1 2 3
do
for j in 4 5 6
do
while `test $i -lt $j`
do
echo $1
$i = `expr $1 + 1`
done
done
done
| true
|
1cd1b01ec52701be16046884e36a28dc8c484499
|
Shell
|
BerkeleyLibrary/iipsrv
|
/test/test.sh
|
UTF-8
| 2,637
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
############################################################
# Helper functions
# shellcheck disable=SC2155
download() {
local download_url="${1}"
local output_path="${2}"
if ! curl -v "${download_url}" > "${output_path}" ; then
echo "Downloading ${download_url} to ${output_path} failed"
exit 1
else
echo "Downloaded ${download_url} to ${output_path}"
echo
fi
}
# shellcheck disable=SC2155
assert_identical() {
local expected_file="${1}"
local actual_file="${2}"
if ! cmp "${expected_file}" "${actual_file}"; then
echo "${actual_file} did not match expected ${expected_file}"
local expected_md5=$(md5sum "${expected_file}")
local expected_size=$(wc -c "${expected_file}" | /usr/bin/grep -Eo '[[:digit:]]+')
echo "Expected: ${expected_md5}"
echo " ${expected_size} bytes"
local actual_md5=$(md5sum "${actual_file}")
local actual_size=$(wc -c "${actual_file}" | /usr/bin/grep -Eo '[[:digit:]]+')
echo "Actual: ${actual_md5}"
echo " ${actual_size} bytes"
exit 1
else
echo "${actual_file} matches expected ${expected_file}"
echo
fi
}
############################################################
# Fixture
INFO_URL='http://localhost/iiif/test.tif/info.json'
IMAGE_URL='http://localhost/iiif/test.tif/full/64,/0/default.jpg'
EXPECTED_INFO_PATH="test/info.json"
EXPECTED_IMAGE_PATH="test/default.jpg"
ARTIFACTS_DIR="artifacts"
ACTUAL_INFO_PATH="${ARTIFACTS_DIR}/info.json"
ACTUAL_IMAGE_PATH="${ARTIFACTS_DIR}/default.jpg"
############################################################
# Setup
echo '------------------------------------------------------------'
echo 'Creating artifacts directory:'
echo
if ! mkdir -p "${ARTIFACTS_DIR}" ; then
echo "Unable to create artifacts directory ${ARTIFACTS_DIR} in $(pwd)"
fi
echo "Created $(realpath "${ARTIFACTS_DIR}")"
echo
############################################################
# Tests
echo '------------------------------------------------------------'
echo 'Making IIIF info request:'
echo
download "${INFO_URL}" "${ACTUAL_INFO_PATH}"
echo
echo '------------------------------------------------------------'
echo 'Verifying IIIF info result:'
echo
assert_identical "${EXPECTED_INFO_PATH}" "${ACTUAL_INFO_PATH}"
echo '------------------------------------------------------------'
echo 'Making IIIF image request:'
echo
download "${IMAGE_URL}" "${ACTUAL_IMAGE_PATH}"
echo
echo '------------------------------------------------------------'
echo 'Verifying IIIF image result:'
echo
assert_identical "${EXPECTED_IMAGE_PATH}" "${ACTUAL_IMAGE_PATH}"
| true
|
f8064a5f30012b66a1b861a2c0c58962f523839b
|
Shell
|
szorfein/dots
|
/home/.chezmoiscripts/run_once_awesome_config.sh.tmpl
|
UTF-8
| 737
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -o errexit
DIR="${HOME}/.config/awesome/config"
DEST="$DIR/env.lua"
[ -d "$DIR" ] || mkdir -p "$DIR"
cat >"$DEST" <<EOF
terminal = os.getenv("TERMINAL") or "xst"
terminal_cmd = terminal .. " -e "
editor = "{{ .editor }}"
editor_cmd = terminal_cmd .. editor
{{- if eq .sound "alsa" }}
{{- if eq .chezmoi.osRelease.id "arch" }}
web_browser = "brave"
{{- else if eq .chezmoi.osRelease.id "void" }}
web_browser = "firefox"
{{- else }}
web_browser = "brave-bin"
{{- end }}
{{ else }}
web_browser = "firefox-bin"
{{ end -}}
file_browser = terminal_cmd .. "vifm"
terminal_args = { " -c ", " -e " }
sound_system = "{{ .sound }}"
sound_card = "{{ .sound_card }}"
cpu_core = $(nproc)
password = "awesome"
EOF
| true
|
51dd9b1dce79182105acaa69090a56037b31cc76
|
Shell
|
timotheehub/Projbook
|
/build/deploy-doc.sh
|
UTF-8
| 1,248
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e # exit with nonzero exit code if anything fails
# inside this git repo we'll pretend to be a new user
git config user.name "Travis CI"
git config user.email "defrancea@gmail.com"
# The first and only commit to this new Git repo contains all the
# files present with the commit message "Deploy to GitHub Pages".
git checkout --orphan gh-pages
git rm -rf .
cp ./src/Projbook.Documentation/bin/Release/projbook.html index.html
cp ./src/Projbook.Documentation/bin/Release/projbook-pdf.pdf projbook.pdf
cp -R ./src/Projbook.Documentation/bin/Release/Content Content
cp -R ./src/Projbook.Documentation/bin/Release/Scripts Scripts
cp -R ./src/Projbook.Documentation/bin/Release/fonts fonts
git add ./index.html
git add ./projbook.pdf
git add ./Content
git add ./fonts
git add ./Scripts
git commit -m "Deploy Documentation"
# Force push from the current repo's master branch to the remote
# repo's gh-pages branch. (All previous history on the gh-pages branch
# will be lost, since we are overwriting it.) We redirect any output to
# /dev/null to hide any sensitive credential data that might otherwise be exposed.
git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" gh-pages > /dev/null 2>&1
# Checkout master
git checkout ${TRAVIS_BRANCH}
| true
|
936be9359aaad370e8c7f049fb77e68e85840370
|
Shell
|
shts/keyakifeed-api
|
/deamon_crawler_matomefeeds
|
UTF-8
| 785
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
# chkconfig: 2345 95 5
# description: crawler for keyakizaka46 matome feed entries
# processname: crawler_entries
NAME="[ matomefeeds crawler ]"
APP_ROOT_DIR="/var/www/app"
PID="/var/www/app/tmp/pids/crawler_matomefeeds.pid"
CMD="ruby crawler_matomefeeds.rb"
USER=root
start()
{
if [ -e $PID ]; then
echo "$NAME already started"
exit 1
fi
echo "$NAME START!"
sudo su -l ${USER} -c "cd ${APP_ROOT_DIR} && ${CMD}"
}
stop()
{
if [ ! -e $PID ]; then
echo "$NAME not started"
exit 1
fi
echo "$NAME STOP!"
kill -INT `cat ${PID}`
rm $PID
}
restart()
{
stop
sleep 2
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
*)
echo "Syntax Error: release [start|stop|restart]"
;;
esac
| true
|
deddab5d03cfc7ab9097fdc3ed566e2fac5df7e1
|
Shell
|
minorhash/3bs
|
/bin/kill.sh
|
UTF-8
| 176
| 2.890625
| 3
|
[] |
no_license
|
sta=$(netstat -lpn |grep 3023)
echo $sta
str=$(echo $sta|awk '{print $7}')
echo $str
str2=${str:0: -5}
echo $str2
if [ -z $str2 ];then
echo "dead"
else
kill -9 $str2
fi
| true
|
7ca83f826cbeab966796ba9c760a0d7a4c59cdbd
|
Shell
|
leopard152015/AMRmodel
|
/SI/BASE V10 - Exp/mp_R4.sh
|
UTF-8
| 1,316
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH --account=def-amrowe
#SBATCH --time=02:00:00
#SBATCH --cpus-per-task=1
#SBATCH --nodes=1
#SBATCH --mem=1000mb
#SBATCH --job-name=V10_R4
#SBATCH --output=./output/output-%x-%a.out
#SBATCH --open-mode=append
if test -e ./pickleddata/$SLURM_JOB_NAME-$SLURM_ARRAY_TASK_ID; then
# Rerun the simulation
module load python/3.7
stat `which python3.7`
source ../ENV2/bin/activate
echo "prog restarted at: `date`"
mpiexec python ./$SLURM_JOB_NAME.py $SLURM_ARRAY_TASK_ID $SLURM_JOB_NAME
else
# First Run
echo "Current working directory: `pwd`"
echo "Starting run at: `date`"
echo ""
echo "Job Array ID / Job ID: $SLURM_ARRAY_JOB_ID / $SLURM_JOB_ID"
echo "This is job $SLURM_ARRAY_TASK_ID out of $SLURM_ARRAY_TASK_COUNT jobs."
echo ""
module load python/3.7
stat `which python3.7`
source ../ENV2/bin/activate
echo "prog started at: `date`"
mpiexec python ./$SLURM_JOB_NAME.py $SLURM_ARRAY_TASK_ID $SLURM_JOB_NAME
fi
deactivate
if test -e ./pickleddata/$SLURM_JOB_NAME-$SLURM_ARRAY_TASK_ID; then
# If there is pickeled data available resubmit just this single job.
sbatch --array=$SLURM_ARRAY_TASK_ID ${BASH_SOURCE[0]}
else
# If we have steady state there will be no pickled data file
# at the end of the simulation.
echo "prog ended at: `date`"
fi
| true
|
ffa022f30e902233f38acf62e92aa09022e42684
|
Shell
|
petronny/aur3-mirror
|
/jabberd14-ubuntu/PKGBUILD
|
UTF-8
| 1,196
| 3
| 3
|
[] |
no_license
|
pkgname=jabberd14-ubuntu
_pkgname=jabberd14
pkgver=1.6.1.1
pkgrel=1
pkgdesc="XMPP Server with ubuntu patches"
arch=('i686' 'x86_64')
url="http://jabberd.org/"
license=('GPL')
#depends=('expat' 'udns' 'libidn' 'libgsasl')
#makedepends=('expat' 'udns' 'libidn' 'libgsasl' 'openssl' 'mysql')
source=("http://download.jabberd.org/jabberd14/jabberd14-1.6.1.1.tar.gz" "http://archive.ubuntu.com/ubuntu/pool/universe/j/jabberd14/jabberd14_1.6.1.1-5.diff.gz" "jabberd")
md5sums=('597c7ee14518ba22b1cee883b4737d87' 'cffda97f4a6c19aa318bf740a319d981' 'e3e672e81e6a70d02d7458e0edd4021c')
build() {
cd "$srcdir/$_pkgname-$pkgver"
patch -p1 < ../jabberd14_1.6.1.1-5.diff || return 1
for i in `ls debian/patches/*.dpatch* | sort`
do
patch -p1 -s -i $i
if [ $? = 0 ]; then
echo "$i applied"
else
echo "Error processing $i"
return 1
fi
done
./configure --prefix=/usr --enable-ssl --enable-legacy --sysconfdir=/etc/jabberd --localstatedir=/var || return 1
make -j1 || return 1
make DESTDIR="$pkgdir/" install || return 1
mkdir -p $startdir/pkg/etc/rc.d/ && \
install -m 0755 $startdir/src/jabberd $startdir/pkg/etc/rc.d/jabberd
}
# vim:set ts=2 sw=2 et:
| true
|
acb263effce4eb489835f6afd4625f6e80e56d24
|
Shell
|
nishant3794/mediawiki
|
/terraform/modules/user_data/service/mediawiki/userdata.sh.tpl
|
UTF-8
| 1,145
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
sudo rm -rf /var/www/mediawiki
sudo aws s3 cp s3://deployment-zips-mediawiki/mediawiki.tar /home/centos/mediawiki.tar
sudo tar -xvf /home/centos/mediawiki.tar -C /var/www/
sudo mv /var/www/mediawiki-1.34.2 /var/www/mediawiki
sleep 30
sudo aws s3 cp s3://resource-softwares-files/LocalSettings.php /var/www/mediawiki/LocalSettings.php
sudo chown centos:centos /var/www/mediawiki/LocalSettings.php
sudo chown apache:apache /var/www/mediawiki
secret=`aws secretsmanager get-secret-value --region ap-south-1 --secret-id mysql-credentials --query "SecretString" --output text`
dbname=`echo $secret | jq -r ".db_name"`
dbuser=`echo $secret | jq -r ".db_username"`
dbpassword=`echo $secret | jq -r ".db_password"`
dbhost=`echo $secret | jq -r ".db_host"`
sudo sed -i -e 's/dbname/'$dbname'/g' /var/www/mediawiki/LocalSettings.php
sudo sed -i -e 's/dbserver/'$dbhost'/g' /var/www/mediawiki/LocalSettings.php
sudo sed -i -e 's/dbuser/'$dbuser'/g' /var/www/mediawiki/LocalSettings.php
sudo sed -i -e 's/dbpassword/'$dbpassword'/g' /var/www/mediawiki/LocalSettings.php
sudo setsebool -P httpd_can_network_connect 1
sudo systemctl restart httpd
| true
|
2c62750d5fc55e1b2b3fa2df1adf00eb79c12e14
|
Shell
|
XiaoGeNintendo/HHSOJ-Web-Edition
|
/getoj.sh
|
UTF-8
| 9,109
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
Green_font_prefix="\033[32m" && Red_font_prefix="\033[31m" && Grey_font_preffix="\e[37m" && Green_background_prefix="\033[42;37m" && Red_background_prefix="\033[41;37m" && Font_color_suffix="\033[0m"
column_size=${COLUMNS}
length_size=${LINES}
debug_mode=false
check_root(){
[[ $EUID != 0 ]] && echo -e "Please use root account or use command 'sudo' to get root access." && exit 1
}
print_info(){
echo -e "${Green_font_prefix}[INFO]$1${Font_color_suffix}"
}
print_err(){
echo -e "${Red_font_prefix}[ERR]$1${Font_color_suffix}"
}
print_center(){
len1=${#1}
spaces=`expr 38 - $len1 / 2 `
cnt=0
while (($cnt<=$spaces))
do
if [ ! $2 == ' ' ]; then
echo -n $2
else
echo -n ' '
fi
let "cnt++"
done
echo -n -e "$1"
cnt=0
while (($cnt<=$spaces))
do
if [ ! $2 == ' ' ]; then
echo -n $2
else
echo -n ' '
fi
let "cnt++"
done
printf '\n'
}
print_grey(){
echo -n -e "${Grey_font_size}:${Font_color_suffix}"
}
get_sysinfo(){
cnt=0
while (($cnt<80))
do
echo -n '='
let "cnt++"
done
CPU=$(grep 'model name' /proc/cpuinfo |uniq |awk -F : '{print $2}' |sed 's/^[ \t]*//g' |sed 's/ \+/ /g')
printf "%-30s" "CPU Model"
print_grey
echo -e " ${Green_font_prefix}${CPU}${Font_color_suffix}"
cpu_num=`cat /proc/cpuinfo|grep 'processor'|sort|uniq|wc -l`
printf "%-30s" "CPU Numbers"
print_grey
echo -e " ${Green_font_prefix}$cpu_num${Font_color_suffix}"
PROCESSOR=$(grep 'processor' /proc/cpuinfo |sort |uniq |wc -l)
printf "%-30s" "Logical CPU Number"
print_grey
echo -e " ${Green_font_prefix}${PROCESSOR}${Font_color_suffix}"
Mode=$(getconf LONG_BIT)
printf "%-30s" "CPU Running Mode"
print_grey
echo -e " ${Green_font_prefix}${Mode}Bits${Font_color_suffix}"
Cores=$(grep 'cpu cores' /proc/cpuinfo |uniq |awk -F : '{print $2}' |sed 's/^[ \t]*//g')
printf "%-30s" "CPU Cores"
print_grey
echo -e " ${Green_font_prefix}${Cores}${Font_color_suffix}"
Total=$(cat /proc/meminfo |grep 'MemTotal' |awk -F : '{print $2}' |sed 's/^[ \t]*//g')
printf "%-30s" "Memory in Total"
print_grey
echo -e " ${Green_font_prefix}${Total}${Font_color_suffix}"
Available=$(free -m |grep - |awk -F : '{print $2}' |awk '{print $2}')
printf "%-30s" "Memory Free"
print_grey
echo -e " ${Green_font_prefix}${Available}${Font_color_suffix}"
SwapTotal=$(cat /proc/meminfo |grep 'SwapTotal' |awk -F : '{print $2}' |sed 's/^[ \t]*//g')
printf "%-30s" "Swap in Total"
print_grey
echo -e " ${Green_font_prefix}${SwapTotal}${Font_color_suffix}"
disk_size=`df -h / | awk '{print $2}'|grep -E '[0-9]'`
printf "%-30s" "Disk Size Free"
print_grey
echo -e " ${Green_font_prefix}${disk_size}${Font_color_suffix}"
linux_v=$(cat /etc/os-release|grep 'PRETTY_NAME'|cut -c14-)
linux_bit=`uname -i`
printf "%-30s" "System"
print_grey
echo -e " ${Green_font_prefix}${linux_v:0:${#linux_v}-1} $linux_bit${Font_color_suffix}"
process=`ps aux|wc -l`
let process--
printf "%-30s" "Running Processes"
print_grey
echo -e " ${Green_font_prefix}$process${Font_color_suffix}"
software_num=`dpkg -l |wc -l`
printf "%-30s" "Software Installed"
print_grey
echo -e " ${Green_font_prefix}$software_num${Font_color_suffix}"
kernel_version=$(uname -r)
printf "%-30s" "Kernel Version"
print_grey
echo -e " ${Green_font_prefix}$kernel_version${Font_color_suffix}"
cnt=0
while (($cnt<80))
do
echo -n '='
let "cnt++"
done
}
update_com(){
print_info "Will update apt sources."
apt-get update
apt-get upgrade
}
install_com(){
print_err "$1 Not Found."
print_info "Will Install."
apt -y install $1
if [ $? -eq 0 ]; then
print_info "Installation Succeed for $1."
else
print_err "Installation Failed for $1."
fi
}
check_com(){
for nowc in 'wget' 'tar' 'unzip' 'python3'
do
if command -v ${nowc} >/dev/null 2>&1; then
print_info "${nowc} Found."
else
install_com ${nowc}
fi
done
if command -v pip3 >/dev/null 2>&1; then
print_info "pip3 found."
else
install_com python3-pip
fi
}
install_pip(){
print_err "$1 Not Found."
print_info "Will Install."
pip3 install $1
if [ $? -eq 0 ]; then
print_info "Installation Succeed for Module $1."
else
print_err "Installation Failed for Module $1."
fi
}
check_pip(){
for nowb in 'robobrowser' 'requests'
do
if python3 -c "import ${nowb}" >/dev/null 2>&1 ; then
print_info "${nowb} Found."
else
install_pip ${nowb}
fi
done
}
install_tomcat(){
wget -P /usr/ 'http://apache.01link.hk/tomcat/tomcat-9/v9.0.19/bin/apache-tomcat-9.0.19.tar.gz'
tar zxvf /usr/apache-tomcat-9.0.19.tar.gz -C /usr/
rm -f /usr/tomcat.tar.gz
mv /usr/apache-tomcat-9.0.19/ /usr/tomcat/
print_info "Tomcat installed successfully"
}
install_webapp(){
hhsoj_ver=$(wget --no-check-certificate -qO- https://api.github.com/repos/XiaoGeNintendo/HHSOJ-Web-Edition/releases | grep -o '"tag_name": ".*"' |head -n 1| sed 's/"//g;s/v//g' | sed 's/tag_name: //g')
print_info "Latest WebApp Version:${hhsoj_ver}"
read -e -p "Install/Update Now?[Y/n]:" ch
if [[ -z $ch ]]; then
down_link="https://github.com/XiaoGeNintendo/HHSOJ-Web-Edition/releases/download/${hhsoj_ver}/HellOJ.war"
wget -P '/usr/tomcat/webapps/ROOT.war' ${down_link}
rm -rf '/usr/tomcat/webapps/ROOT/'
print_info "Please now run tomcat to unpack the file"
else
if [ "${ch}" == 'y' -o "${ch}" == "Y" ]; then
down_link="https://github.com/XiaoGeNintendo/HHSOJ-Web-Edition/releases/download/${hhsoj_ver}/HellOJ.war"
wget -P '/usr/tomcat/webapps/ROOT.war' ${down_link}
rm -rf '/usr/tomcat/webapps/ROOT/'
print_info "Please now run tomcat to unpack the file"
else
print_info "Installation/Update Canceled."
fi
fi
}
install_folder(){
down_link=$(wget --no-check-certificate -qO- https://api.github.com/repos/XiaoGeNintendo/HHSOJ-Web-Edition/releases | grep -o 'https://github.com/XiaoGeNintendo/HHSOJ-Web-Edition/releases/download/.*/hhsoj.zip' | head -n 1)
folder_ver=$(echo ${down_link} | grep -P '\d+\.\d+' -o)
print_info "Latest HHSOJ Folder Version:${folder_ver}"
read -e -p "Install/Update Now?[Y/n]:" ch
if [[ -z $ch ]]; then
wget -P=/usr/ ${down_link}
rm -rf /usr/hhsoj/
unzip /usr/hhsoj.zip -d /usr/
rm -f /usr/hhsoj.zip
else
if [ "${ch}" == 'y' -o "${ch}" == "Y" ]; then
wget -P=/usr/ ${down_link}
rm -rf /usr/hhsoj/
unzip /usr/hhsoj.zip -d /usr/
rm -f /usr/hhsoj.zip
else
print_info "Installation/Update Canceled."
fi
fi
}
check_all(){
if javac >/dev/null 2>&1; then
install_com 'openjdk-8-jdk'
else
print_info 'JDK Found.'
fi
if java >/dev/null 2>&1; then
install_com 'openjdk-8-jdk'
else
print_info 'Java Found.'
fi
if g++ >/dev/null 2>&1; then
install_com 'gcc-g++'
else
print_info 'G++ Found.'
fi
if [ -d "/usr/tomcat/" ]; then
print_info "Tomcat Found."
else
install_tomcat
fi
if [ ! -f "/usr/tomcat/webapps/ROOT/index.jsp" -o -f "/usr/tomcat/webapps/ROOT.war" ]; then
print_info "HHSOJ Webapp Found."
else
install_webapp
fi
if [ ! -f "/usr/hhsoj" ]; then
print_info "HHSOJ Folder Found."
else
install_folder
fi
}
get_port(){
if [ ! -d "/usr/tomcat/" ]; then
print_err "Please install tomcat first!" && exit 1
fi
port1=$(cat /usr/tomcat/conf/server.xml|grep 'protocol="HTTP/1.1"'|head -n 1|sed 's/"//g'|sed 's/ <Connector port=//g'|sed 's/ protocol=HTTP\/1.1//g')
port2=$(cat /usr/tomcat/conf/server.xml|grep 'protocol="AJP/1.3"'|sed 's/.*<Connector port=//g'|sed 's/"//g'|sed 's/ protocol=AJP\/1.3.*//g')
port3=$(cat /usr/tomcat/conf/server.xml|grep 'shutdown="SHUTDOWN"'|sed 's/<Server port="//g'|sed 's/".*//g')
print_info "HTTP Port:${port1}"
print_info "AJP Port:${port2}"
print_info "Shutdown Port:${port3}"
}
if [ "$#" -gt 0 -a "$1" = "--debug" ]; then
debug_mode=true
get_sysinfo
else
echo -e "${Green_font_prefix}==============================HHSOJ Control Shell===============================${Font_color_suffix}"
echo -e "${Green_font_prefix}| By XIZCM |${Font_color_suffix}"
echo -e "${Green_font_prefix}| HellHoleStudios©, 2019 |${Font_color_suffix}"
echo -e "${Green_font_prefix}================================================================================${Font_color_suffix}"
echo ""
echo "Operations:"
echo "[1]Check&Install HHSOJ"
echo "[2]Update HHSOJ"
echo "[3]Tomcat config"
echo "[4]Get System Info"
read -e -p "Input Your Choice:" ch
case "$ch" in
1)
check_root
update_com
check_com
check_pip
check_all
;;
2)
install_webapp
install_folder
;;
3)
print_center 'Tomcat Config' '='
get_port
print_center '' '='
;;
4)
get_sysinfo
;;
*)
print_err "Please input the right number"
;;
esac
fi
| true
|
efd5dae36315c2dab515434eb41c8d12029c2bc8
|
Shell
|
rahulgidde/Shellscript
|
/for_loop/HarmonicNumber.sh
|
UTF-8
| 302
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash -x
#TAKE INPUT FROM USER
read -p "Enter The Range Of Harmonic Number:" number
#VARIABLE
harmonic=0
#CALCULATE HARMONIC NUMBER
for (( index=1; index<=number; index++ ))
do
harmonic=$(echo "scale=2; $harmonic + 1 / $index" | bc)
done
#DISPLAY RESULT
echo "Harmonic Number is: $harmonic"
| true
|
c18dd7fa526531704692f8476340b809b02c49c0
|
Shell
|
lhudson08/salmonella_cerro
|
/pseudochrom.sh
|
UTF-8
| 335
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#Usage: sh pseudochrom.sh <path to input files>
#lmc297@cornell.edu
cd $1
mkdir original_contigs
for f in *.fasta
do
cp $f original_contigs
sed -i 's/^>NODE.*/NNnnNNnnNNnnNNnn/' $f
sed -i "1i >${f}" $f
awk '/^>/{print s? s"\n"$0:$0;s="";next}{s=s sprintf("%s",$0)}END{if(s)print s}' $f > ${f%.fasta}_pseudochrom.fasta
rm -r $f
done
| true
|
349f4a2c2a7b666831b0f2b5ef8dfcf877a34d57
|
Shell
|
OpenBEL/bel_parser
|
/bump.sh
|
UTF-8
| 955
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
CURRENT=$(cat VERSION)
NEXT=$(echo "puts \"${CURRENT}\".next" | ruby)
read -p "Bump and release $NEXT [Y OR y]? " -n 1 -r
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo -e "\nExiting."
exit 1
fi
# set version in VERSION file
echo "$NEXT" > VERSION
# update ISO 8601 date in gemspec
sed -ri "s/[0-9]{4}-[0-9]{2}-[0-9]{2}/$(date --iso-8601)/g" .gemspec
# commit
git commit .gemspec VERSION -m "Bumped to version $NEXT."
# tag as VERSION
git tag -a "$NEXT" -m "Tagged as version $NEXT."
# push commits upstream
REF=$(git symbolic-ref HEAD); git push upstream $REF
# push tag upstream
REF=$(git symbolic-ref HEAD); git push upstream "$NEXT"
# build mri gem
gem build .gemspec
# push mri gem
gem push "bel_parser-$NEXT.gem"
source /usr/share/chruby/chruby.sh
RUBIES+=(
/home/tony/tools/rubies/jruby-9.1.0.0
)
chruby jruby-9.1.0.0
# build java gem
gem build .gemspec-java
# push java gem
gem push "bel_parser-${NEXT}-java.gem"
| true
|
6ed3b171fc190788ce0e62e1972d7e5c414446fe
|
Shell
|
artemrys/google-cloud-functions-deploy
|
/pipe/pipe.sh
|
UTF-8
| 1,846
| 3.828125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# Deploy to GCP Cloud Functions, https://cloud.google.com/functions/
#
# Required globals:
# KEY_FILE
# PROJECT
# FUNCTION_NAME
# ENTRY_POINT
# RUNTIME
#
# Optional globals:
# MEMORY
# TIMEOUT
# EXTRA_ARGS
# DEBUG
# SOURCE
# TRIGGER
source "$(dirname "$0")/common.sh"
enable_debug
# mandatory parameters
KEY_FILE=${KEY_FILE:?'KEY_FILE variable missing.'}
PROJECT=${PROJECT:?'PROJECT variable missing.'}
FUNCTION_NAME=${FUNCTION_NAME:?'FUNCTION_NAME variable missing.'}
ENTRY_POINT=${ENTRY_POINT:?'ENTRY_POINT variable missing.'}
RUNTIME=${RUNTIME:?'RUNTIME variable missing.'}
info "Setting up environment".
run 'echo "${KEY_FILE}" >> /tmp/key-file.json'
run gcloud auth activate-service-account --key-file /tmp/key-file.json --quiet ${gcloud_debug_args}
run gcloud config set project $PROJECT --quiet ${gcloud_debug_args}
ARGS_STRING=""
if [ ! -z "${PROJECT}" ]; then
ARGS_STRING="${ARGS_STRING} --project=${PROJECT}"
fi
if [ ! -z "${RUNTIME}" ]; then
ARGS_STRING="${ARGS_STRING} --runtime=${RUNTIME} "
fi
if [ ! -z "${ENTRY_POINT}" ]; then
ARGS_STRING="${ARGS_STRING} --entry-point=${ENTRY_POINT} "
fi
if [ ! -z "${MEMORY}" ]; then
ARGS_STRING="${ARGS_STRING} --memory=${MEMORY} "
fi
if [ ! -z "${TIMEOUT}" ]; then
ARGS_STRING="${ARGS_STRING} --timeout=${TIMEOUT} "
fi
if [ ! -z "${SOURCE}" ]; then
ARGS_STRING="${ARGS_STRING} --source=${SOURCE} "
else
ARGS_STRING="${ARGS_STRING} --source . "
fi
if [ ! -z "${TRIGGER}" ]; then
ARGS_STRING="${ARGS_STRING} ${TRIGGER} "
else
ARGS_STRING="${ARGS_STRING} --trigger-http "
fi
info "Starting deployment GCP Cloud Function..."
run gcloud functions deploy ${FUNCTION_NAME} ${ARGS_STRING} ${EXTRA_ARGS} ${gcloud_debug_args}
if [ "${status}" -eq 0 ]; then
success "Deployment successful."
else
fail "Deployment failed."
fi
| true
|
81a5a7cc2144aa78bb2c01bf7deeb1582846da8c
|
Shell
|
noloerino/dotfiles
|
/bin/runjava
|
UTF-8
| 240
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
FILE_NAME=$1
shift
if [[ $FILE_NAME == *\.java ]]; then
:
else
FILE_NAME="$FILE_NAME.java"
fi
javac $FILE_NAME
COMPILED=$?
if [[ $COMPILED -eq 0 ]]; then
echo "Compiled $FILE_NAME"
java ${FILE_NAME%\.java} $@
fi
| true
|
95ee7c3eb5775fa9a9d2b26e78eba5e9964f61cc
|
Shell
|
KIT-CMS/sm-htt-analysis
|
/condor_jobs/run_remote_job.sh
|
UTF-8
| 392
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export XRD_LOGLEVEL="Info"
echo " --------------"
echo " Unpacking tarball ..."
tar -xf gc_tarball.tar.gz
echo " Sucessfully unpacked tarball"
echo " --------------"
echo " Starting shape Production ! "
bash ./shapes/produce_shapes_remote.sh
echo " Finished shape Production ! "
echo " --------------"
# echo " Packing result tarball ..."
# tar -czf gc_output.tar.gz output
| true
|
13cdb6ae12e729d28da96df381ec41e346f5248e
|
Shell
|
CarbsLinux/repository
|
/extra/info/build
|
UTF-8
| 448
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
# Standalone build for info that doesn't depend on Perl. Only the info page
# viewer.
export LDFLAGS="$LDFLAGS -static"
export PERL=/bin/true
./configure \
--disable-nls \
--disable-rpath
make -C gnulib/lib
make -C info
make -C install-info
clinst -Dm755 info/ginfo "$1/usr/bin/info"
clinst -Dm755 install-info/ginstall-info "$1/usr/bin/install-info"
clman -d "$1" man/info.1 man/install-info.1 man/info.5
| true
|
b83b3f22c0a0989b6b2a382da3a30894b96e4af3
|
Shell
|
dwhitena/pachyderm-go-stats
|
/stats/stats.sh
|
UTF-8
| 668
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# Grab the source code
go get -d github.com/$REPONAME/...
# Grab Go package name
pkgName=github.com/$REPONAME
# Grab just first path listed in GOPATH
goPath="${GOPATH%%:*}"
# Construct Go package path
pkgPath="$goPath/src/$pkgName"
if [ -e "$pkgPath/Godeps/_workspace" ];
then
# Add local godeps dir to GOPATH
GOPATH=$pkgPath/Godeps/_workspace:$GOPATH
fi
# get the number of dependencies in the repo
go list $pkgName/... > dep.log || true
deps=`wc -l dep.log | cut -d' ' -f1`;
rm dep.log
# get number of lines of go code
golines=`( find $pkgPath -name '*.go' -print0 | xargs -0 cat ) | wc -l`
# output the stats
echo $REPONAME, $deps, $golines
| true
|
2c9f787edef167265ea4d21f75bbf63121e6b6d0
|
Shell
|
royish/my
|
/scripts/zd
|
UTF-8
| 331
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
rdiff.sh $1
printf "command:\n"
printf "rev ci new \`cat $1.list\`\n"
printf "Should I Commit? [y/N] "
read cont
if [ $cont == "y" ]; then
printf "\n=============\n\n"
printf "Copy this:\n"
printf "$2\n\n"
review ci new `cat $1.list`
printf "\n=============\n"
else
printf "not patching...\n"
exit 1
fi
| true
|
8d749d36a646bc2198420cd114adf9454413db40
|
Shell
|
nasrinkabir/kubernetes-helm-poc
|
/installation_instruction
|
UTF-8
| 987
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash -x
#Install Docker
sudo apt update -y
sudo apt-get remove docker docker-engine docker.io -y
sudo apt install -y docker.io
### Install Kubectl ###
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
sudo chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
### Install Minikube ###
curl -LO https://github.com/kubernetes/minikube/releases/download/v1.12.0/minikube_1.12.0-0_amd64.deb
sudo dpkg -i minikube_1.12.0-0_amd64.deb
sudo apt-get install -y conntrack
sudo minikube config set vm-driver none
sudo minikube start --vm=true
sudo chown -R $USER $HOME/.minikube; chmod -R u+wrx $HOME/.minikube
sudo minikube addons enable ingress
### Install nodejs ###
curl -sL https://deb.nodesource.com/setup_14.x | sudo bash -
sudo apt install nodejs -y
### Install Helm ###
sudo snap install helm --classic
rm -f minikube_1.12.0-0_amd64.deb
| true
|
cabb63f45074ab60e16d181f4a1a61474a76ce10
|
Shell
|
skyroot/ctfd-deployment
|
/server_scripts/start_ctfd_instance.sh
|
UTF-8
| 889
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Variables
hostname="$1"
nclteamname="$2"
nclsio="http://172.18.178.45:8080"
printusage() {
echo "Usage: sudo $0 <hostname> <ncl_team_name>"
}
echo "$0: Started..."
if [ "$#" -ne 2 ]; then
echo "Failed: Wrong number of arguments."
printusage
exit
fi
if [ "$EUID" -ne 0 ]; then
echo "Failed: Please run as root."
printusage
exit
fi
# Set current working directory to script folder
cd "${0%/*}"
# Fail if this CTFd instance does not exist
if [ ! -d "$hostname" ]; then
echo "Failed: hostname does not exist."
exit
fi
echo "Starting CTFd..."
# Start this CTFd instance with uWSGI
cd "$hostname"
uwsgi --plugin python -s /tmp/uwsgi_"$hostname".sock -w 'CTFd:create_app()' --chmod-socket=666 --pidfile /tmp/ctfd_"$hostname".pid --pyargv "--ncl-sio-url $nclsio --ncl-team-name $nclteamname" &>/dev/null &
echo "$0: Completed successfully!"
| true
|
b4a5e9474e47a189acda1760d63647bc4665f16b
|
Shell
|
abgit143/whyred_miuieu_port
|
/port.sh
|
UTF-8
| 10,967
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export LOCALDIR=`cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd`
export TOOLS=${LOCALDIR}/tools
export DEVICE=lavender
export TYPE=eu
export VERSIONS=(stable)
export SDAT2IMG=${TOOLS}/sdat2img.py
export IMG2SDAT=${TOOLS}/img2sdat.py
export IMGEXTRACT=${TOOLS}/imgextractor.py
export MKUSERIMG=${TOOLS}/mkuserimg_mke2fs.sh
export APKTOOL=${TOOLS}/apktool
export SYSTEMDIR=${LOCALDIR}/system
export VENDORDIR=${LOCALDIR}/vendor
export OUTDIR=${LOCALDIR}/out
export INDIR=${LOCALDIR}/in
export INFOFILE=${LOCALDIR}/info.txt
export MIUIPIC=${LOCALDIR}/miui.jpg
export GENINFO=${TOOLS}/geninfo.sh
export SENDMESSAGE=${TOOLS}/sendmessage.py
export fframeworkres="${SYSTEMDIR}/system/framework/framework-res.apk"
export fframeworkextres="${SYSTEMDIR}/system/framework/framework-ext-res/framework-ext-res.apk"
export fmiuisystem="${SYSTEMDIR}/system/app/miuisystem/miuisystem.apk"
export fmiui="${SYSTEMDIR}/system/app/miui/miui.apk"
date=`date +%Y%m%d%H%M%S`
for VERSION in ${VERSIONS[@]}; do
if [ "${TYPE}" = "global" ]; then
python3 ${LOCALDIR}/${TYPE}.py ${DEVICE} ${VERSION}
URL=$(cat ${LOCALDIR}/url)
ZIPNAME=$(echo ${URL} | cut -d / -f 5)
elif [ "${TYPE}" = "mmx" ]; then
python3 ${LOCALDIR}/${TYPE}.py ${DEVICE} ${VERSION}
URL=$(cat ${LOCALDIR}/url)
ZIPNAME=$(echo ${URL} | cut -d / -f 9)
elif [ "${TYPE}" = "eu" ]; then
python3 ${LOCALDIR}/${TYPE}.py ${DEVICE} ${VERSION}
URL=$(cat ${LOCALDIR}/url)
ZIPNAME=$(echo ${URL} | cut -d / -f 10)
else
echo "Specify TYPE"
fi
NEWZIP=$(sed "s/lavender/whyred/g;s/LAVENDER/WHYRED/g;s/Lavender/Whyred/g;s/HMNote7/HMNote5Pro/g;s/.zip/-$date.zip/g" <<< $ZIPNAME)
rm -rf ${LOCALDIR}/url
rm -rf ${INDIR} ${OUTDIR}
mkdir -p ${INDIR}
mkdir -p ${OUTDIR}
rm -rf ${LOCALDIR}/flashable/system.*
rm -rf ${LOCALDIR}/flashable/vendor.*
export EUDATE=$(echo ${ZIPNAME} | cut -d _ -f 4)
git config --global user.email "anandsingh215@yahoo.com"
git config --global user.name "Anand Shekhawat"
# download and Unzip
echo "Downloading ${ZIPNAME}"
aria2c -x16 -j$(nproc) -q -d "${INDIR}" -o "${ZIPNAME}" ${URL}
partitions=(system vendor)
for partition in ${partitions[@]}; do
echo "Extracting ${partition} to ${INDIR}"
7z e "${INDIR}/${ZIPNAME}" ${partition}.new.dat.br ${partition}.transfer.list -o"$INDIR" > /dev/null
brotli -df ${INDIR}/${partition}.new.dat.br
$SDAT2IMG ${INDIR}/${partition}.transfer.list ${INDIR}/${partition}.new.dat ${INDIR}/${partition}.img > /dev/null
rm -rf ${INDIR}/${partition}.transfer.list ${INDIR}/${partition}.new.dat*
python3 $IMGEXTRACT ${INDIR}/${partition}.img .
rm -rf ${INDIR}/${partition}.img
done
# import APKTOOL frameworks
${APKTOOL} if ${fframeworkres}
${APKTOOL} if ${fframeworkextres}
${APKTOOL} if ${fmiui}
${APKTOOL} if ${fmiuisystem}
patch_rom() {
echo "Patching system and vendor"
rm ${VENDORDIR}/etc/init/android.hardware.gatekeeper@1.0-service-qti.rc
rm ${VENDORDIR}/etc/init/android.hardware.keymaster@4.0-service-qti.rc
# app
rm -rf ${SYSTEMDIR}/system/app/Email
rm -rf ${SYSTEMDIR}/system/app/MiuiVideoGlobal
rm -rf ${SYSTEMDIR}/system/app/MiPicks
rm -rf ${SYSTEMDIR}/system/app/InMipay
rm -rf ${SYSTEMDIR}/system/app/Updater
# priv-app
rm -rf ${SYSTEMDIR}/system/priv-app/Browser
rm -rf ${SYSTEMDIR}/system/priv-app/MiBrowserGlobal
rm -rf ${SYSTEMDIR}/system/priv-app/MiuiBrowserGlobal
rm -rf ${SYSTEMDIR}/system/priv-app/MiDrop
rm -rf ${SYSTEMDIR}/system/priv-app/MiuiCamera
rm -rf ${SYSTEMDIR}/system/priv-app/Updater
# data-app
rm -rf ${SYSTEMDIR}/system/data-app/PeelMiRemote
rm -rf ${SYSTEMDIR}/system/data-app/XMRemoteController
# product/app
rm -rf ${SYSTEMDIR}/system/product/app/YouTube
rm -rf ${SYSTEMDIR}/system/product/app/Maps
rm -rf ${SYSTEMDIR}/system/product/app/Gmail2
# theme
rm -rf ${SYSTEMDIR}/system/media/theme/miui_mod_icons
# vendor/overlay
rm -rf ${VENDORDIR}/app/NotchOverlay
rm -rf ${VENDORDIR}/overlay/DevicesOverlay.apk
rm -rf ${VENDORDIR}/overlay/DevicesAndroidOverlay.apk
rsync -ra ${LOCALDIR}/whyred/audio/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/camera/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/display/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/fingerprint/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/keymaster/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/sensors/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/thermal/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/wifi/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/app/vendor/ ${VENDORDIR}
rsync -ra ${LOCALDIR}/whyred/app/system/ ${LOCALDIR}/system
rsync -ra ${LOCALDIR}/customizations/system/ ${LOCALDIR}/system
# generate overlays
${LOCALDIR}/overlay/build.sh accent
${LOCALDIR}/overlay/build.sh custom
#${LOCALDIR}/overlay/build.sh language
${LOCALDIR}/overlay/build.sh whyred
#fstab
sed -i "s/forceencrypt/encryptable/g" ${VENDORDIR}/etc/fstab.qcom
sed -i "/\/dev\/block\/bootdevice\/by-name\/system/d" ${VENDORDIR}/etc/fstab.qcom
sed -i "\/dev\/block\/bootdevice\/by-name\/userdata/a /dev/block/bootdevice/by-name/cust /cust ext4 ro,nosuid,nodev,barrier=1 wait,check" ${VENDORDIR}/etc/fstab.qcom
# manifest
sed -i "/<name>android.hardware.keymaster<\/name>/!b;n;n;c\ \ \ \ \ \ \ <version>3.0</version>" ${VENDORDIR}/etc/vintf/manifest.xml
sed -i "/<fqname>@4.0::IKeymasterDevice\/default<\/fqname>/!b;c\ \ \ \ \ \ \ \ <fqname>@3.0::IKeymasterDevice/default</fqname>" ${VENDORDIR}/etc/vintf/manifest.xml
# postboot
sed -i "s/start vendor.cdsprpcd/\# start vendor.cdsprpcd/g" ${VENDORDIR}/bin/init.qcom.post_boot.sh
# build.prop
sprop=${SYSTEMDIR}/system/build.prop
oprop=${VENDORDIR}/odm/etc/build.prop
vprop=${VENDORDIR}/build.prop
phingerprint="$(grep ro.system.build.fingerprint=.* ${SYSTEMDIR}/system/build.prop | cut -d = -f 2)"
grep -q "ro.build.fingerprint=.*" $sprop || sed -i "/ro.system.build.fingerprint/i ro.build.fingerprint=$phingerprint" $sprop
poops=($sprop $vprop $oprop)
for poop in ${poops[@]}; do
sed -i "/ro.product.*\.name=.*/s|=.*|=whyred|g" $poop
sed -i "/ro.product.*\.device=.*/s|=.*|=whyred|g" $poop
sed -i "/ro.product.*\.model=.*/s|=.*|=Redmi Note 5|g" $poop
sed -i -e "/build.fingerprint_real/s/lavender/whyred/g" $poop
done
sed -i "s/persist.vendor.camera.model=Redmi Note 7/persist.vendor.camera.model=Redmi Note 5/g" ${SYSTEMDIR}/system/build.prop
sed -i "/ro.miui.notch=1/d" ${SYSTEMDIR}/system/build.prop
sed -i "s/sys.paper_mode_max_level=255/sys.paper_mode_max_level=400/g" ${SYSTEMDIR}/system/build.prop
cat ${LOCALDIR}/whyred/system.prop >> ${SYSTEMDIR}/system/build.prop
cat ${LOCALDIR}/whyred/vendor.prop >> ${VENDORDIR}/build.prop
# device_features
rm -rf ${SYSTEMDIR}/system/etc/device_features/lavender.xml
rm -rf ${VENDORDIR}/etc/device_features/lavender.xml
# Patch blobs
bash ${LOCALDIR}/whyred/patch/services_jar.sh
# file_contexts
echo "Patching file_contexts"
cat ${LOCALDIR}/overlay/system_file_contexts >> ${LOCALDIR}/config/system_file_contexts
cat ${LOCALDIR}/whyred/app/config/system_file_contexts >> ${LOCALDIR}/config/system_file_contexts
cat ${LOCALDIR}/customizations/config/system_file_contexts >> ${LOCALDIR}/config/system_file_contexts
cat ${LOCALDIR}/whyred/app/config/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
cat ${LOCALDIR}/whyred/audio/config/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
cat ${LOCALDIR}/whyred/camera/config/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
cat ${LOCALDIR}/whyred/display/config/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
cat ${LOCALDIR}/whyred/fingerprint/config/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
cat ${LOCALDIR}/whyred/keymaster/config/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
cat ${LOCALDIR}/overlay/vendor_file_contexts >> ${LOCALDIR}/config/vendor_file_contexts
# fs_config
echo "Patching fs_config"
cat ${LOCALDIR}/overlay/system_fs_config >> ${LOCALDIR}/config/system_fs_config
cat ${LOCALDIR}/whyred/app/config/system_fs_config >> ${LOCALDIR}/config/system_fs_config
cat ${LOCALDIR}/customizations/config/system_fs_config >> ${LOCALDIR}/config/system_fs_config
cat ${LOCALDIR}/whyred/app/config/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
cat ${LOCALDIR}/whyred/audio/config/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
cat ${LOCALDIR}/whyred/camera/config/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
cat ${LOCALDIR}/whyred/display/config/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
cat ${LOCALDIR}/whyred/fingerprint/config/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
cat ${LOCALDIR}/whyred/keymaster/config/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
cat ${LOCALDIR}/overlay/vendor_fs_config >> ${LOCALDIR}/config/vendor_fs_config
}
bytesToHuman() {
b=${1:-0}; d=''; s=0; S=(Bytes {K,M,G,T,P,E,Z,Y}iB)
while ((b > 1024)); do
d="$(printf ".%02d" $((b % 1024 * 100 / 1024)))"
b=$((b / 1024))
let s++
done
echo "$b$d ${S[$s]}"
}
# mk img
mk_img() {
ssize=3221225472
vsize=2147483648
pvsize=`du -sk ${VENDORDIR} | awk '{$1*=1024;printf $1}'`
pssize=`du -sk ${SYSTEMDIR} | awk '{$1*=1024;printf $1}'`
sout=${OUTDIR}/system.img
vout=${OUTDIR}/vendor.img
vfsconfig=${LOCALDIR}/config/vendor_fs_config
sfsconfig=${LOCALDIR}/config/system_fs_config
vfcontexts=${LOCALDIR}/config/vendor_file_contexts
sfcontexts=${LOCALDIR}/config/system_file_contexts
echo "Creating system.img"
echo "system.img size: $(bytesToHuman $pssize)"
$MKUSERIMG -s "${SYSTEMDIR}" "$sout" ext4 system $ssize -C $sfsconfig $sfcontexts -T 0 -L system > /dev/null || exit 1
echo "Creating vendor.img"
echo "vendor.img size: $(bytesToHuman $pvsize)"
$MKUSERIMG -s "${VENDORDIR}" "$vout" ext4 vendor $vsize -C $vfsconfig $vfcontexts -T 0 -L vendor > /dev/null || exit 1
rm -rf ${LOCALDIR}/config
rm -rf ${SYSTEMDIR}
rm -rf ${VENDORDIR}
}
mk_zip() {
echo "Creating ${NEWZIP}"
rm -rf ${NEWZIP}
cp flashable/flashable.zip ${NEWZIP}
$IMG2SDAT $vout -o flashable -v 4 -p vendor > /dev/null
$IMG2SDAT $sout -o flashable -v 4 -p system > /dev/null
cd flashable
echo "Compressing system.new.dat"
brotli -6 system.new.dat
echo "Conpressing vendor.new.dat"
brotli -6 vendor.new.dat
rm system.new.dat || exit 1
rm vendor.new.dat || exit 1
zip -rv9 ../${NEWZIP} boot.img system.new.dat.br system.patch.dat system.transfer.list vendor.new.dat.br vendor.patch.dat vendor.transfer.list > /dev/null
cd ..
}
patch_rom
mk_img || continue
mk_zip
rm -rf ${INDIR} ${OUTDIR}
if [ -f ${LOCALDIR}/${NEWZIP} ]; then
ssh-keyscan -t ecdsa -p 22 -H frs.sourceforge.net 2>&1 | tee -a /root/.ssh/known_hosts
SF_PROJECT=whyred-miui
scp ${NEWZIP} shekhawat2@frs.sourceforge.net:/home/frs/project/${SF_PROJECT}/note7
export DOWNLOADLINK="https://sourceforge.net/projects/${SF_PROJECT}/files/note7/${NEWZIP}"
export ZIPSIZE=`du -sk ${NEWZIP} | awk '{$1*=1024;printf $1}'`
$GENINFO > $INFOFILE
$SENDMESSAGE
else
exit 1
fi
done
| true
|
e5c0c02100455d8e09efb7e246bb490cb77cecaf
|
Shell
|
nkuehn/sphere-icecat-importer
|
/bin/12_download_and_clean_daily.sh
|
UTF-8
| 532
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
# you have to provide an environment variable with the icecat password via `export ICECATPWD=fooBar` and the same for
# your IceCat username as `export ICECATUSER=fooBar`
mkdir ../downloaded
curl -u $ICECATUSER:$ICECATPWD -o ../downloaded/daily.export_urls_rich.txt.gz http://data.icecat.biz/export/freeurls/daily.export_urls_rich.txt.gz
gunzip -f -v *.gz
mkdir ../transformed
csvformat --tabs --maxfieldsize 250000 ../downloaded/daily.export_urls_rich.txt > ../transformed/daily.on_market.export_urls_rich.csv
| true
|
3d8a3ab053e939e9488be617b11dd7de88e52028
|
Shell
|
yungez/iotdev-docker
|
/dockerfiles/deploy.sh
|
UTF-8
| 1,073
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
save_deviceip=0
save_username=0
save_password=0
save_srcdockerpath=0
save_destdir=0
for arg in "$@"
do
if [ $save_deviceip == 1 ]
then
deviceip="$arg"
save_deviceip=0
elif [ $save_username == 1 ]
then
username="$arg"
save_username=0
elif [ $save_password == 1 ]
then
password="$arg"
save_password=0
elif [ $save_srcdockerpath == 1 ]
then
srcdockerpath="$arg"
save_srcdockerpath=0
elif [ $save_destdir == 1 ]
then
destdir="$arg"
save_destdir=0
else
case "$arg" in
"--deviceip" ) save_deviceip=1;;
"--username" ) save_username=1;;
"--password" ) save_password=1;;
"--srcdockerpath" ) save_srcdockerpath=1;;
"--destdir" ) save_destdir=1;;
esac
fi
done
sshpass -p $password scp -vo UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r $srcdockerpath $username@$deviceip:$destdir
if [ $? -eq 0 ]; then
echo Deploy succeeded!
else
exit $?
fi
| true
|
bd9db5a23f78336bf6ed287b797b5b0da2574bc8
|
Shell
|
b2moo/learn
|
/experiments.sh
|
UTF-8
| 2,746
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# This file generates the data for the plots of figure 7 of the article
# Official parameters (K for K-parameterized experiments, D for dimension parameterized
# experiments
KPARAMS="50 100 150 200 250 300 350 400 450 500"
DPARAMS="3 4 5 10 50 100 200"
# 5 min
TIMEOUT=300
# dia-r (This is K Diagonal Restricted in the paper)
# dia-u (This is K Diagonal Unrestricted in the paper)
# big-c (This is K Big Overlapping Cube in the paper)
# k-cubes (This is K cubes in Z^d in the paper)
# k-dia (This is K Diagonal in the paper)
# mondec (This is Example 2 in the paper)
BENCHMARKS="dia-r dia-u big-c k-cubes k-dia mondec"
TOOLS="overshoot-u overshoot-b max-u max-b max-o mondec"
parseout() {
sed "s/Total time needed: *\(.*\)/\1/; t; d"
}
# Clean the generated data
clean() {
echo "Cleaning generated data";
for bench in $BENCHMARKS
do
echo "clean $bench"
cat reference_data/$bench.dat | head -n 2 > generated/$bench.dat
done
}
# Run experiment and fill generated file
# 1 = benchmark name, 2 = parameter
run_exp() {
if grep -q "^$2 " generated/$1.dat; then
echo " Already done, skipping;"
return
fi
# No Param ? Iterate on all of them
if [ -z $2 ]; then
if [ "$1" = "k-cubes" ]; then
params="$DPARAMS"
else
params="$KPARAMS"
fi;
for p in $params; do
echo " Instantiate $1 with p=$p"
run_exp $1 $p
done;
return
fi
# choose between d and k, and the tools to iterate on
d=2
k=10
if [ "$1" = "mondec" ]; then
tools="max-o mondec"
else
tools="$TOOLS"
fi
# Only k-cubes has d as the parameter
if [ "$1" = "k-cubes" ]; then
d="$2"
else
k="$2"
fi
out="$2"
for tool in $tools; do
echo -n " Running $tool on $1($k,$d) ..."
res=$(run_tool $tool $1 $k $d | parseout)
if [ -z "$res" ]; then
echo "failed"
elif [ "$res" = "$TIMEOUT" ]; then
echo "timeout (${TIMEOUT}s)"
else
echo "done: ${res}s"
fi
out="$out $res"
done
echo "$out" >> generated/$1.dat
}
run_tool() {
if [ "$1" = "mondec" ]; then
timeout $TIMEOUT python3 ./mondec.py $2 $3 $4
else
# Benchmark first
timeout $TIMEOUT python3 ./maximal_cubes.py $2 $1 $3 $4
fi
if [ "$?" -eq 124 ]; then
echo "Total time needed: $TIMEOUT"
fi
}
if [ -z "$1" ]; then
echo "USAGE: $0 [clean|all|$BENCHMARKS] [PARAM]"
exit 1
elif [ "$1" = "clean" ]; then
clean
elif [ "$1" = "all" ]; then
for bench in $BENCHMARKS; do
run_exp $bench $2
done
else
run_exp $1 $2
fi
| true
|
afe8279bdf9ce7dcf48d3948b959b72fc62edf1d
|
Shell
|
SamAinsworth/reproduce-ghostminion-paper
|
/scripts/run_spec17.sh
|
UTF-8
| 881
| 2.796875
| 3
|
[] |
no_license
|
cd ..
set -u
export BASE=$(pwd)
cd SPEC17/benchspec/CPU/
N=$(grep ^cpu\\scores /proc/cpuinfo | uniq | awk '{print $4}')
M=$(grep MemTotal /proc/meminfo | awk '{print $2}')
G=$(expr $M / 8192000)
P=$((G<N ? G : N))
i=0
for bench in bwaves perlbench gcc mcf omnetpp xalancbmk deepsjeng leela exchange2 xz bwaves cactusBSSN lbm wrf cam4 pop2 imagick nab fotonik3d roms
do
((i=i%P)); ((i++==0)) && wait
(
IN=$(grep $bench $BASE/spec_confs/input_2017.txt | awk -F':' '{print $2}'| xargs)
BIN=$(grep $bench $BASE/spec_confs/binaries_2017.txt | awk -F':' '{print $2}' | xargs)
BINA=./$(echo $BIN)"_base.mytest-64"
echo $BINA
ARGS=$(grep $bench $BASE/spec_confs/args_2017.txt | awk -F':' '{print $2}'| xargs)
cd *$(echo $bench)_s/run/run_base_refspeed_mytest-64.0000
$BASE/scripts/gem5_scripts/run_ghostminion10b.sh "$BINA" "$ARGS" "$IN"
) &
done
cd $BASE/scripts
| true
|
698801f0019e4ac76aeab9c171fae1e8ee5102d0
|
Shell
|
eshwen/SemivisibleJets
|
/utils/pid_change.sh
|
UTF-8
| 1,797
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z $1 ]; then
usr_msg="Usage: ./pid_change.sh <path to LHE file>"
$SVJ_TOP_DIR/utils/print_bash_script_usage.sh "$usr_msg"
exit
fi
lheFile=$1
echo "Changing PDGIDs from MadGraph to Pythia conventions in preparation for hadronisation..."
# madgraph sign convention is reversed?
# Change PDGIDs for dark quarks
sed -i 's/-5000521/-4900101/g' $lheFile
sed -i 's/-49001010/4900101/g' $lheFile
sed -i 's/-49001011/4900101/g' $lheFile
sed -i 's/-49001012/4900101/g' $lheFile
sed -i 's/-49001013/4900101/g' $lheFile
sed -i 's/-49001014/4900101/g' $lheFile
sed -i 's/5000521/4900101/g' $lheFile
sed -i 's/49001010/-4900101/g' $lheFile
sed -i 's/49001011/-4900101/g' $lheFile
sed -i 's/49001012/-4900101/g' $lheFile
sed -i 's/49001013/-4900101/g' $lheFile
sed -i 's/49001014/-4900101/g' $lheFile
# Change PDGIDs for t-channel bi-fundamentals
sed -i 's/9000005/4900001/g' $lheFile
sed -i 's/9000006/4900001/g' $lheFile
sed -i 's/9000007/4900001/g' $lheFile
sed -i 's/9000008/4900001/g' $lheFile
sed -i 's/9000009/4900003/g' $lheFile
sed -i 's/9000010/4900003/g' $lheFile
sed -i 's/9000011/4900003/g' $lheFile
sed -i 's/9000012/4900003/g' $lheFile
sed -i 's/9000013/4900005/g' $lheFile
sed -i 's/9000014/4900005/g' $lheFile
sed -i 's/9000015/4900005/g' $lheFile
sed -i 's/9000016/4900005/g' $lheFile
sed -i 's/9000017/4900002/g' $lheFile
sed -i 's/9000018/4900002/g' $lheFile
sed -i 's/9000019/4900002/g' $lheFile
sed -i 's/9000020/4900002/g' $lheFile
sed -i 's/9000021/4900004/g' $lheFile
sed -i 's/9000022/4900004/g' $lheFile
sed -i 's/9000023/4900004/g' $lheFile
sed -i 's/9000024/4900004/g' $lheFile
sed -i 's/9000025/4900006/g' $lheFile
sed -i 's/9000026/4900006/g' $lheFile
sed -i 's/9000027/4900006/g' $lheFile
sed -i 's/9000028/4900006/g' $lheFile
echo "Done!"
exit
| true
|
6b7ac020069f055bb0783f7fe7eb87ce9a9f449a
|
Shell
|
martin-walsh/utilities
|
/scripts/bash/completion.sh
|
UTF-8
| 1,348
| 3.0625
| 3
|
[] |
no_license
|
#/usr/bin/env bash
_complete () {
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
comp_folders=$@
COMPREPLY=( $(compgen -W "$comp_folders" -- $cur))
return 0
}
#########################################################################
#### CD SCRIPTS ####
#########################################################################
# Completion for cdc
_complete_cdc_folders () {
_complete `ls ~/code/`
}
complete -o nospace -F _complete_cdc_folders cdc
# Completion for cdi
_complete_cdi_folders () {
_complete `ls ~/code/ | grep integration- | cut -d '-' -f 2`
}
complete -o nospace -F _complete_cdi_folders cdi
# Completion for cdr
_complete_cdo_folders () {
_complete `ls ~/code/ | grep infrastructure- | cut -d '-' -f 2`
}
complete -o nospace -F _complete_cdo_folders cdo
# Completion for cdp
_complete_cdp_folders () {
_complete `ls ~/code/ | grep plugin- | cut -d '-' -f 2`
}
complete -o nospace -F _complete_cdp_folders cdp
#########################################################################
#### HUB ####
#########################################################################
_complete_hub () {
opts=("compare commits branches releases show")
_complete $opts
}
complete -o nospace -F _complete_hub hub
| true
|
8e0f16990f870ef0485d8fa05f7780eaf14aa191
|
Shell
|
dorucioclea/posthog
|
/ee/benchmarks/measure.sh
|
UTF-8
| 4,826
| 3.703125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
set -e
USER="default"
while test $# -gt 0; do
case "$1" in
-h|--help)
echo "This is a script to measure performance of a clickhouse query against a live database instance."
echo ""
echo "USAGE:"
echo " measure.sh [FLAGS]"
echo ""
echo "EXAMPLE:"
echo " measure.sh --clickhouse-server clickhouse-server --tunnel-server some-server --password PW --query-file some-query.sql"
echo ""
echo "FLAGS:"
echo " -h, --help Print this help information."
echo " -q, --query-file Send query to measure to here"
echo " -s, --clickhouse-server Address of clickhouse server"
echo " -t, --tunnel-server Address of server to tunnel clickhouse from"
echo " -u, --user Clickhouse user (default: default)"
echo " -p, --password Clickhouse user password"
echo " --explain Output explain for query"
echo " --drop-cache Drop clickhouse cache before executing the query. Don't use against production"
exit 0
;;
-q|--query-file)
QUERY_FILE="$2"
shift
shift
;;
-s|--clickhouse-server)
CLICKHOUSE_SERVER="$2"
shift
shift
;;
-t|--tunnel-server)
TUNNEL_SERVER="$2"
shift
shift
;;
-u|--user)
USER="$2"
shift
shift
;;
-p|--password)
PASSWORD="$2"
shift
shift
;;
--explain)
EXPLAIN=1
shift
;;
--drop-cache)
DROP_CACHE=1
shift
;;
--no-flamegraphs)
NO_FLAMEGRAPHS=1
shift
;;
*)
break
;;
esac
done
trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
QUERY_HOST="$CLICKHOUSE_SERVER"
PORT="8123"
QUERY=$(cat $QUERY_FILE)
RANDOM_QUERY_ID="$RANDOM"
QUERY_WITH_SETTINGS="
-- measure.sh:$RANDOM_QUERY_ID
${QUERY}
SETTINGS allow_introspection_functions=1,
query_profiler_real_time_period_ns=40000000,
query_profiler_cpu_time_period_ns=40000000,
memory_profiler_step=1048576,
max_untracked_memory=1048576,
memory_profiler_sample_probability=0.01,
use_uncompressed_cache=0,
max_execution_time=400
"
# echo "$QUERY"
if [[ -v TUNNEL_SERVER ]]; then
echo "Setting up SSH tunnel..."
PORT="8124"
QUERY_HOST="localhost"
ssh -L "$PORT:$CLICKHOUSE_SERVER:8123" -N "$TUNNEL_SERVER" &
sleep 5
fi
CLICKHOUSE_QUERY_ENDPOINT="http://${USER}:${PASSWORD}@${QUERY_HOST}:$PORT/?database=posthog"
CLICKHOUSE_DSN_STRING="http://${USER}:${PASSWORD}@${QUERY_HOST}:$PORT"
if [[ -v EXPLAIN ]]; then
echo "Query plan:"
# echo "EXPLAIN header=1, json=1, actions=1 ${QUERY} FORMAT TSVRaw" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @- | jq .
# echo "EXPLAIN PIPELINE header=1 ${QUERY} FORMAT TSVRaw" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @-
echo "EXPLAIN PIPELINE graph=1, header=1 ${QUERY}" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @-
else
if [[ -v DROP_CACHE ]]; then
echo "Dropping mark cache..."
echo "SYSTEM DROP MARK CACHE" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @- &> /dev/null
fi
echo "Executing query..."
echo "$QUERY_WITH_SETTINGS" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @- # &> /dev/null
echo "Flushing logs..."
echo "SYSTEM FLUSH LOGS" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @- &> /dev/null
echo "Getting query ID..."
QUERY_ID=$(echo "
SELECT query_id
FROM system.query_log
WHERE
query NOT LIKE '%query_log%'
AND query LIKE '%measure.sh:$RANDOM_QUERY_ID%'
AND type = 'QueryFinish'
ORDER BY query_start_time desc
LIMIT 1
" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @-)
echo "Query id: $QUERY_ID"
echo "Query stats:"
echo "
SELECT
event_time,
query_duration_ms,
read_rows,
formatReadableSize(read_bytes) as read_size,
result_rows,
formatReadableSize(result_bytes) as result_size,
formatReadableSize(memory_usage) as memory_usage,
tables,
columns
FROM system.query_log
WHERE query_id='$QUERY_ID' AND type = 'QueryFinish'
LIMIT 1
FORMAT Vertical
" | curl "$CLICKHOUSE_QUERY_ENDPOINT" -s --data-binary @- | sed 's/^/ /' | tail -n +2
FILENAME=$(basename -- "$QUERY_FILE")
OUTPUT_PATH="$(date +%s)-${QUERY_ID}-${FILENAME%.*}"
if [[ -z "$NO_FLAMEGRAPHS" ]]; then
clickhouse-flamegraph --query-id "$QUERY_ID" --clickhouse-dsn "$CLICKHOUSE_DSN_STRING" --console --date-from "2021-01-01" -o "${OUTPUT_PATH}" --width 1900
chromium-browser ${OUTPUT_PATH}/*
fi
fi
| true
|
7204fd771c72794b0e60879fc2c72aaa0f39f652
|
Shell
|
grahamallegretto/NuDataTest
|
/Question-2/numsort.sh
|
UTF-8
| 512
| 4.09375
| 4
|
[] |
no_license
|
##
# Sorts a file that contains only numeric entries. Must pass filename as a parameter.
# Will output to 'sortedData.txt'
#
SIZE_OF_CHUNKS=10000
# Exit if file doesn't exist
if [ $# -eq 0 ] || [ ! -e $1 ]; then
echo "File doesn't exist"
exit 1
fi
# Split into smaller files
split -l $SIZE_OF_CHUNKS $1 chunk
# Sort the smaller files
for X in chunk*; do sort --parallel=2 -n $X > sorted-$X; done
rm chunk*
# Sort split files and merge
sort -nm --parallel=2 sorted-chunk* > sortedData.txt
rm sorted-chunk*
| true
|
90479783987f87703d76ab22712211095a7cddd1
|
Shell
|
flavio-a/dotfiles
|
/dotfiles/config/zsh/.zshrc
|
UTF-8
| 2,211
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
# Cross words with ctrl arrows
autoload -U select-word-style
select-word-style bash
bindkey '^[[1;5D' backward-word
bindkey '^[[1;5C' forward-word
# Bind delete key to actually delete characters
bindkey "^[[3~" delete-char
# Disable adding completions, only shows
setopt noautomenu nomenucomplete
# Load shared shell customizations
source "$HOME/.shrc"
# The following lines were added by compinstall
zstyle ':completion:*' add-space true
zstyle ':completion:*' completer _expand _complete _prefix
zstyle ':completion:*' expand prefix suffix
zstyle ':completion:*' ignore-parents parent directory
zstyle ':completion:*' special-dirs true
zstyle ':completion:*' menu select=2
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' list-prompt '%SAt %p: Hit TAB for more, or the character to insert%s'
zstyle ':completion:*' match-original only
zstyle ':completion:*' menu select=long
zstyle ':completion:*' preserve-prefix '//[^/]##/'
zstyle ':completion:*' select-prompt '%SScrolling active: current selection at %p%s'
zstyle ':completion:*' squeeze-slashes true
zstyle ':completion:*' use-compctl false
zstyle ':completion:*' verbose true
# History
setopt SHARE_HISTORY
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_IGNORE_SPACE
setopt HIST_SAVE_NO_DUPS
HISTFILE="$ZDOTDIR/histfile"
HISTSIZE=100000
SAVEHIST=100000
# Automatically report time for long commands
REPORTTIME=5
# Making disown send continue to the disowned process
setopt AUTO_CONTINUE
# Changing prompt
{%@@ if profile == "marvin" or profile == "zenithia" @@%}
export PROMPT='%F{green}%n@%m%f> '
{%@@ endif @@%}
{%@@ if profile == "uz" @@%}
export PROMPT='%F{green}%n@%m%f$ '
{%@@ endif @@%}
export RPROMPT="%F{blue}%~%f"
# Generic options
unsetopt beep nomatch
# setopt auto_cd
bindkey -e
# Load plugins and setup completion
source "$ZDOTDIR/plugins.zsh"
# Use gpg for ssh
if [ "${gnupg_SSH_AUTH_SOCK_by:-0}" -ne $$ ]; then
export SSH_AUTH_SOCK="$(gpgconf --list-dirs agent-ssh-socket)"
fi
# Disable Ctrl-S to pause terminal
stty -ixon
# Open a file with a GUI program (determined by xdg)
gopen() {
xdg-open "$1" &> /dev/null &; disown
}
| true
|
a50d58dc00814ad7637de0ccc89027139998aff5
|
Shell
|
noelnamai/kent
|
/src/tagStorm/jsonToTagStorm/errCheck.sh
|
UTF-8
| 1,178
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "errCheck - run a program and compare error response to expected.
usage:
errCheck <expectedStderr.txt> <command> [command arguments]
Returns zero if command returns with a non-crashing nonzero return and the
stderr output of the command matches expected.txt" 1>&2
}
if [ $# -lt 2 ]; then
usage
exit 255
fi
export expectTextFile="$1"
if [ ! -s "${expectTextFile}" ]; then
echo "ERROR: can not read expectedStderr.txt file: $expectTextFile" 1>&2
usage
fi
export command="$2"
export stderrOut="/dev/shm/errCheck.err.$$"
export stdoutOut="/dev/shm/errCheck.out.$$"
shift 2
echo "# ${command} $*" 1>&2
${command} $* 2> "${stderrOut}" > "${stdoutOut}"
export returnCode=$?
export exitValue=0
if [ "${returnCode}" -ne 0 ]; then
diffCount=`diff "${stderrOut}" "${expectTextFile}" 2> /dev/null | wc -l`
if [ "${diffCount}" -ne 0 ]; then
echo "ERROR: command returns exit code ${returnCode}" 1>&2
echo "ERROR: did not find expected stderr content" 1>&2
exitValue=255
fi
else
echo "ERROR: command returns zero exit code" 1>&2
exitValue=255
fi
rm -f "${stderrOut}" "${stdoutOut}"
exit $exitValue
| true
|
4ab2a885c319b9fc12d9d02eef5c953c5be22162
|
Shell
|
ShotaroBaba/shotaro-redmine-docker
|
/generate-docker-compose-yml.sh
|
UTF-8
| 775
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Set passwords first
num_pass_char=24
# Generate random password
redmine_db_password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c $num_pass_char ; echo '')
redmine_secret_key=$(tr -dc A-Za-z0-9 </dev/urandom | head -c $num_pass_char ; echo '')
mysql_root_password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c $num_pass_char ; echo '')
web_client_password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c $num_pass_char ; echo '')
# Replace the string with generated passwords.
cat docker-compose-template.yml | sed "s/___REDMINE_DB_PASSWORD___/$redmine_db_password/g" | \
sed "s/___REDMINE_SECRET_KEY___/$redmine_secret_key/g" | \
sed "s/___WEB_CLIENT_PASSWORD___/$web_client_password/g" | \
sed "s/___MYSQL_ROOT_PASSWORD___/$mysql_root_password/g" > docker-compose.yml
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.