blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7a260a21726d531f52de5f675e49d5893546f9f8 | Shell | spareproject/ducking-shame | /bin/remote_execute/epoch/caller | UTF-8 | 205 | 3.1875 | 3 | [] | no_license | #!/bin/env bash
timestamp=0
trap "exit" INT
while true;do
sleep 1
if [[ $(date +%s) -gt $timestamp ]];then
./execute "true"
timestamp=$(($(date +%s)+60))
else
./execute "false"
fi
done
| true |
53e94bf709967989e1b2e818769f81fd0f2a4176 | Shell | Bhaskers-Blu-Org2/accessibility-insights-service | /packages/resource-deployment/scripts/setup-key-vault.sh | UTF-8 | 2,889 | 4.15625 | 4 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | #!/bin/bash
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# shellcheck disable=SC1090
set -eo pipefail
# This script will deploy Azure Batch account in user subscription mode
# and enable managed identity for Azure on Batch pools
export resourceGroupName
export keyVault
export enableSoftDeleteOnKeyVault
# Set default ARM Batch account template files
createKeyVaultTemplateFile="${0%/*}/../templates/key-vault-create.template.json"
setupKeyVaultResourcesTemplateFile="${0%/*}/../templates/key-vault-setup-resources.template.json"
exitWithUsageInfo() {
echo "
Usage: $0 -r <resource group> -k <enable soft delete for Azure Key Vault> -c <webApiAdClientId> -p <webApiAdClientSecret>
"
exit 1
}
. "${0%/*}/process-utilities.sh"
function createKeyvaultIfNotExists() {
local existingResourceId=$(az keyvault list \
--query "[?name=='$keyVault'].id|[0]" \
-o tsv
)
if [[ -z $existingResourceId ]]; then
echo "Key vault does not exist. Creating using ARM template."
resources=$(
az deployment group create \
--resource-group "$resourceGroupName" \
--template-file "$createKeyVaultTemplateFile" \
--query "properties.outputResources[].id" \
--parameters enableSoftDeleteOnKeyVault="$enableSoftDeleteOnKeyVault" \
-o tsv
)
echo "Created Key vault:
resource: $resources
"
else
echo "Key vault already exists. Skipping Key vault creation using ARM template"
fi
}
function setupKeyVaultResources() {
echo "Setting up key vault resources using ARM template."
resources=$(
az deployment group create \
--resource-group "$resourceGroupName" \
--template-file "$setupKeyVaultResourcesTemplateFile" \
--query "properties.outputResources[].id" \
-o tsv
)
echo "Successfully setup Key vault resources:
resource: $resources
"
}
# Read script arguments
while getopts ":r:k:c:p:" option; do
case $option in
r) resourceGroupName=${OPTARG} ;;
k) enableSoftDeleteOnKeyVault=${OPTARG} ;;
c) webApiAdClientId=${OPTARG} ;;
p) webApiAdClientSecret=${OPTARG} ;;
*) exitWithUsageInfo ;;
esac
done
# Print script usage help
if [[ -z $resourceGroupName ]] || [[ -z $enableSoftDeleteOnKeyVault ]] || [[ -z $webApiAdClientId ]] || [[ -z $webApiAdClientSecret ]]; then
exitWithUsageInfo
fi
# Login to Azure account if required
if ! az account show 1>/dev/null; then
az login
fi
. "${0%/*}/get-resource-names.sh"
createKeyvaultIfNotExists
setupKeyVaultResources
. "${0%/*}/push-secrets-to-key-vault.sh"
echo "The '$keyVault' Azure Key Vault account successfully deployed"
| true |
3ba64ea817efd0aaa97ee5730abb5ce54555a5fb | Shell | Carpe4me/android-sdk-docker | /bashrc | UTF-8 | 417 | 3.171875 | 3 | [] | no_license |
# Add git branch if its present to PS1
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\[\033[33m\]$(parse_git_branch)\[\033[0m\]\$ '
# More alias
alias so='source ~/.bashrc'
alias studio='studio.sh &'
alias clr='clear'
alias cls='clear'
alias gitdiff='git difftool -d'
| true |
66fb722a25c698fad8f3d52f4df24b92b3d9b803 | Shell | AnshumanSrivastavaGit/burmatscripts | /bash/hosts_to_ips.sh | UTF-8 | 327 | 3.625 | 4 | [] | no_license | #!/bin/bash
if [[ $# -eq 0 ]] ; then
echo '[!] Error: please provide a list of hostnames as an argument, e.g.:';
echo '# ./hosts_to_ips.sh /tmp/hosts.txt';
exit 1;
fi
if [[ -f "$1" ]]; then
cat $1 | while read line
do
host $line | grep -v "alias\|not found" | awk -F ' ' '{print $NF}'
done
fi
| true |
762a42a4594fadf9915675643778be4876265741 | Shell | arthur-mulkandov/PP | /MYWORK/CheckBranchUpdateFromMaster.sh | UTF-8 | 1,293 | 3.734375 | 4 | [] | no_license | #!/bin/bash +x
#################################
# Arthur 2016.11.28
#################################
if [ -z "$1" ] || [ -z "$2" ]
then echo; echo "Syntax: $0 <branch name> <repo path>"; echo;
exit -1
fi
echo "========================================================"
echo "LAST MASTER CHANGES IN BRANCH $1 VERIFICATION"
echo "========================================================"
cd $2 || exit -1
git fetch || exit -1
CM=`git rev-parse remotes/origin/master`
echo "Master revision is $CM"
CB=`git rev-parse remotes/origin/$1`
echo "$1 revision is $CB"
BM=`git merge-base origin/$1 origin/master`
echo "Base revision of $1 is $BM"
if [ $CM == $BM ]
then echo "$CM is equel to base commit of $1"
exit
fi
echo "WARNING: $CM is not equal to base commit of $1 - $BM"
LM=`git show -s --format=%ci ${BM}`
echo
echo "Last merge from master time is: $LM"
echo
CC=1
TT=${BM:0:7}
echo
echo "---------------------------------------------------------"
echo "Below is list of master commits for merge to your branch:"
echo "---------------------------------------------------------"
for line in `git log --oneline remotes/origin/master | head -200 | cut -d' ' -f1`
do
if [ $line == $TT ]
then break
fi
((CC++))
done
git log --oneline remotes/origin/master | head -${CC} | true |
6b84fc5d9c449cabcd1c3bb8530ec17a3c448688 | Shell | masaki-furuta/et_install.sh | /et_install_rhel7.sh | UTF-8 | 1,015 | 2.984375 | 3 | [] | no_license |
#!/bin/bash -xv
if [[ $UID -ne 0 ]]; then
echo "Need to run as root !"
exit 1
fi
yum -y install yum-utils yum-priorities
rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum-config-manager --setopt="epel.priority=100" --save | grep -E '\[|priority'
yum -y install wget make kernel-devel rpm-build
grep -q CentOS /etc/redhat-release && \
yum -y install centos-release-scl
yum-config-manager --enable rhel-server-rhscl-7-rpms
yum -y install devtoolset-7
yum -y upgrade
SRPM=https://copr-be.cloud.fedoraproject.org/results/masakifuruta/et/srpm-builds/01618165/et-6.0.11-2.fc32.src.rpm
wget -c ${SRPM}
rm -fv /root/rpmbuild/RPMS/x86_64/et-*.el7.x86_64.rpm
rpmbuild --rebuild ./${SRPM##*/} || \
( rpmbuild --rebuild ./${SRPM##*/} 2>&1 | sed -e '/needed/!d' -e 's/is.*//g' | perl -pe "s/\n/ /g" | xargs yum -y install; rpmbuild --rebuild ./${SRPM##*/} )
rpm -Uvh /root/rpmbuild/RPMS/x86_64/et-*.el7.x86_64.rpm
systemctl enable --now et
systemctl disable --now firewalld
| true |
5ddff0c0b58f655fb36dfc1a90f98ef0cb84453c | Shell | w21917179/Random_Stuff | /asf_log_To_Line.sh | UTF-8 | 860 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#將ASF的輸出傳到line
a=(docker ps -aq --no-trunc -f "name=asf") #取得container id
count=$(wc /var/lib/docker/containers/$a/$a-json.log | awk '{print $1}') #計算原本檔案行數
#當行數不同,也代表檔案有變動時,取出第 (count+1)行後傳送到Line Notify,直到最新行
while [ true ]
do
if [ "$(wc /var/lib/docker/containers/$a/$a-json.log | awk '{print $1}')" != "$count" ]; then
count=$(($count+1))
message=$(tail -n +$count /var/lib/docker/containers/$a/$a-json.log| head -n 1 | jq -r '.log'| sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g")
#echo $message
curl -s -X POST -H 'Authorization: Bearer *******************************************' -F "message=$message" https://notify-api.line.me/api/notify > /dev/null
fi
sleep 1
done
| true |
231fde7e9a5713287d0c2d24d1971f2374858f3c | Shell | jleoramirezm/Linux | /install.customize.sh | UTF-8 | 1,155 | 3.171875 | 3 | [] | no_license | #!/bin/bash
# Reset color
RS="\e[0m"
# Basic Colors
BLACK="\e[0;30m"
RED="\e[0;31m"
GREEN="\e[0;32m"
YELLOW="\e[0;33m"
BLUE="\e[0;34m"
PURPLE="\e[0;35m"
CYAN="\e[0;36m"
WHITE="\e[0;37m"
function_message_title () {
echo -e "${CYAN}"
echo -e "# | ::::::::::::::::::::::::::::::::::::::::::::: | #"
echo -e "# | ${RS} $1 ${CYAN}"
echo -e "# | ::::::::::::::::::::::::::::::::::::::::::::: | #"
echo -e "${RS}"
}
SCRIPT_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# - **update**
sudo apt update
wait
# - **Themes Libra Flattastic**
function_message_title '- **Install Themes Libra Flattastic**'
sudo add-apt-repository -y ppa:noobslab/themes
sudo apt-get update
sudo apt-get install -y libra-theme
sudo apt-get install -y flattastic-suite
# - **Grub Customizer**
function_message_title '- **Grub Customizer**'
sudo add-apt-repository -y ppa:danielrichter2007/grub-customizer
sudo apt update
sudo apt-get install -y grub-customizer
wait
# - **Cairo-Dock**
function_message_title '- **Cairo-Dock**'
sudo add-apt-repository -y ppa:cairo-dock-team/ppa
sudo apt update
sudo apt-get install -y cairo-dock cairo-dock-plug-ins
wait
| true |
0cbbe63f573cf466644cd95f4b61bfa398cef92d | Shell | amrithadevadiga/Sorce | /sandata-HDF-Ambari27/HDF-Ambari27/ambari-27-setup/appjob | UTF-8 | 2,186 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#
# Copyright (c) 2016, BlueData Software, Inc.
#
SELF=$(readlink -nf $0)
export CONFIG_BASE_DIR=$(dirname ${SELF})
JOBID=''
JOBTYPE=''
JOBCMDLINE=''
JOBSTART='false'
parse_args() {
while getopts ":si:t:c:" opt; do
case ${opt} in
s)
JOBSTART='true'
;;
i)
JOBID=${OPTARG}
;;
t)
JOBTYPE=${OPTARG}
;;
c)
shift $((OPTIND - 2))
JOBCMDLINE=$@
;;
esac
done
if [[ -z ${JOBID} || -z ${JOBTYPE} || -z ${JOBCMDLINE} ]]; then
echo "ERROR: -i, -t and -c command line options are mandatory."
exit 1
fi
}
parse_args $@
APPJOB_LOG_DIR=/var/log/bluedata/
[[ ! -e ${APPJOB_LOG_DIR} ]] && mkdir -p ${APPJOB_LOG_DIR}
export LOG_FILE_PATH="${APPJOB_LOG_DIR}/${JOBID}.out"
source ${CONFIG_BASE_DIR}/logging.sh
source ${CONFIG_BASE_DIR}/utils.sh
NODEGROUP="$(invoke_bdvcli --get node.nodegroup_id)"
################################################################################
# Add application specific job invocation code below. #
################################################################################
# Master switch for various jobs types this distro can handle.
if [[ "${JOBTYPE}" == 'hadoopcustom' ]]; then
log_exec sudo -u hdfs -E hadoop jar ${JOBCMDLINE}
elif [[ "${JOBTYPE}" == 'hadoopstreaming' ]]; then
STREAMJAR="$(invoke_bdvcli --get cluster.config_metadata.${NODEGROUP}.streaming_jar)"
log_exec sudo -u hdfs -E hadoop jar ${STREAMJAR} ${JOBCMDLINE}
elif [[ "${JOBTYPE}" == 'pig' ]]; then
log_exec "sudo -u hdfs -E pig -f ${JOBCMDLINE}"
elif [[ "${JOBTYPE}" == 'hive' ]]; then
log_exec "sudo -u hive -E hive -f ${JOBCMDLINE}"
elif [[ "${JOBTYPE}" == 'hbase' ]]; then
log_exec sudo -u hbase -E hbase shell --noninteractive < ${JOBCMDLINE}
elif [[ "${JOBTYPE}" == 'impala' ]]; then
log_exec sudo -u impala -E impala-shell -f ${JOBCMDLINE}
else
log_error "Unknow Job type: ${JOBTYPE}"
exit 3
fi
# We never reach here if any of the commands above failed.
exit 0;
| true |
9db45083d8986fb3e0a97472371bbd65dea336a0 | Shell | laspg/NEMURO-GoM | /MITgcm/tools/build_options/linux_amd64_mpif90_coaps | UTF-8 | 2,082 | 2.984375 | 3 | [] | no_license | #!/bin/bash
#
# $Header: /u/gcmpack/MITgcm/tools/build_options/linux_amd64_ifort,v 1.10 2013/07/22 18:40:45 jmc Exp $
# $Name: $
#
# Composed and tested by ce107 on ross/weddell (Opteron system)
# Should work fine on EM64T and other AMD64 compatible Intel systems
# a) Processor specific flags:
# 1) for more speed on Core2 processors replace -xW with -xT
# 2) for more speed on Pentium4 based EM64T processors replaces -xW with -xP
# b) For more speed, provided your data size doesn't exceed 2GB you can
# remove -fPIC which carries a performance penalty of 2-6%.
# c) Provided that the libraries you link to are compiled with -fPIC this
# optfile should work.
# d) You can replace -fPIC with -mcmodel=medium which may perform faster
# than -fPIC and still support data sizes over 2GB per process but all
# the libraries you link to must be compiled with -fPIC or -mcmodel=medium
# e) Changed from -O3 to -O2 to avoid buggy Intel v.10 compilers. Speed
# impact appears to be minimal.
#
# MPI : DON'T FORGET to set environment variable MPI_INC_DIR to the include
# directory of your MPI implementation
#-------
# run with OpenMP: needs to set environment var. OMP_NUM_THREADS
# and generally, needs to increase the stack-size:
# - sh,bash:
# > export OMP_NUM_THREADS=2
# > export KMP_STACKSIZE=400m
# - csh,tcsh:
# > setenv OMP_NUM_THREADS 2
# > setenv KMP_STACKSIZE 400m
# NOTE: with MPI+OpenMP, need to set KMP_STACKSIZE in ~/.tcshrc (but curiously,
# works without OMP_NUM_THREADS in ~/.tcshrc).
#-------
if test "x$MPI" = xtrue ; then
CC=mpicc
FC=mpif77
F90C=mpif90
LINK="$F90C -i-dynamic"
else
CC=gcc
FC=f95
F90C=f95
LINK="$F90C -i-dynamic"
fi
DEFINES='-DWORDLENGTH=4'
CPP='cpp -traditional -P'
F90FIXEDFORMAT='-fixed -Tf'
EXTENDED_SRC_FLAG='-extend_source'
GET_FC_VERSION="--version"
OMPFLAG='-openmp'
NOOPTFLAGS='-O0 -g'
NOOPTFILES=''
FFLAGS="$FFLAGS -fconvert=swap -m64"
FFLAGS="$FFLAGS -fPIC"
FOPTIM='-O2'
F90FLAGS=$FFLAGS
F90OPTIM=$FOPTIM
CFLAGS='-fPIC'
INCLUDEDIRS=''
INCLUDES=''
LIBS='-L/usr/lib64'
| true |
25f08f49984258361e9b5f096d9ac1a225157bd5 | Shell | adbadb/hadoop-playground | /init.d/namenode.sh | UTF-8 | 241 | 2.984375 | 3 | [] | no_license | #!/bin/bash
set -eo pipefail
# first time start, namenode should be formatted
if [ ! -f "${HADOOP_STORAGE_DIR}/.done" ]; then
/usr/bin/hdfs namenode -format && touch "${HADOOP_STORAGE_DIR}/.done"
fi
sleep 5
exec /usr/bin/hdfs namenode
| true |
8b71f6fd7baca53fdbc4cf01477c2034718d14e8 | Shell | unsuitable001/usnippet | /addendline.sh | UTF-8 | 275 | 2.84375 | 3 | [
"MIT"
] | permissive | for i in $(find . -type f ! -path "*/*.egg-info/*"\
! -path "./.*"\
! -path "*.min.*"\
! -path "*.svg" -exec grep -Iq . {} \; -and -print); do
if [ "$(tail -c 1 $i)" != "" ]; then
echo "$i needs endline... adding it"
echo "" >> $i
fi
done
| true |
e88cd15a885b7bfdd7b6d8796895e4d77303cb75 | Shell | RileyMShea/docker | /ci/gpuci/run.sh | UTF-8 | 3,163 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
# Overwrite HOME to WORKSPACE
export HOME=$WORKSPACE
# Install gpuCI tools
curl -s https://raw.githubusercontent.com/rapidsai/gpuci-tools/main/install.sh | bash
source ~/.bashrc
cd ~
# Show env
gpuci_logger "Exposing current environment..."
env
# Login to docker
gpuci_logger "Logging into Docker..."
echo $DH_TOKEN | docker login --username $DH_USER --password-stdin &> /dev/null
# Select dockerfile based on matrix var
DOCKERFILE="${DOCKER_PREFIX}_${LINUX_VER}-${IMAGE_TYPE}.Dockerfile"
gpuci_logger "Using Dockerfile: generated-dockerfiles/${DOCKERFILE}"
# Debug output selected dockerfile
gpuci_logger ">>>> BEGIN Dockerfile <<<<"
cat generated-dockerfiles/${DOCKERFILE}
gpuci_logger ">>>> END Dockerfile <<<<"
# Get build info ready
gpuci_logger "Preparing build config..."
BUILD_TAG="cuda${CUDA_VER}-${IMAGE_TYPE}-${LINUX_VER}"
# Check if PR build and modify BUILD_IMAGE and BUILD_TAG
if [ ! -z "$PR_ID" ] ; then
echo "PR_ID is set to '$PR_ID', updating BUILD_IMAGE..."
BUILD_REPO=`echo $BUILD_IMAGE | tr '/' ' ' | awk '{ print $2 }'`
BUILD_IMAGE="rapidsaitesting/${BUILD_REPO}-pr${PR_ID}"
# Check if FROM_IMAGE to see if it is a root build
if [[ "$FROM_IMAGE" == "gpuci/rapidsai" ]] ; then
echo ">> No need to update FROM_IMAGE, using external image..."
else
echo ">> Need to update FROM_IMAGE to use PR's version for testing..."
FROM_REPO=`echo $FROM_IMAGE | tr '/' ' ' | awk '{ print $2 }'`
FROM_IMAGE="rapidsaitesting/${FROM_REPO}-pr${PR_ID}"
fi
fi
# Setup initial BUILD_ARGS
BUILD_ARGS="--squash \
--build-arg FROM_IMAGE=${FROM_IMAGE} \
--build-arg CUDA_VER=${CUDA_VER} \
--build-arg IMAGE_TYPE=${IMAGE_TYPE} \
--build-arg LINUX_VER=${LINUX_VER}"
# Add BUILD_BRANCH arg for 'main' branch only
if [ "${BUILD_BRANCH}" = "main" ]; then
BUILD_ARGS+=" --build-arg BUILD_BRANCH=${BUILD_BRANCH}"
fi
# Check if PYTHON_VER is set
if [ -z "$PYTHON_VER" ] ; then
echo "PYTHON_VER is not set, skipping..."
else
echo "PYTHON_VER is set to '$PYTHON_VER', adding to build args/tag..."
BUILD_ARGS+=" --build-arg PYTHON_VER=${PYTHON_VER}"
BUILD_TAG="${BUILD_TAG}-py${PYTHON_VER}"
fi
# Check if RAPIDS_VER is set
if [ -z "$RAPIDS_VER" ] ; then
echo "RAPIDS_VER is not set, skipping..."
else
echo "RAPIDS_VER is set to '$RAPIDS_VER', adding to build args..."
BUILD_ARGS+=" --build-arg RAPIDS_VER=${RAPIDS_VER}"
BUILD_TAG="${RAPIDS_VER}-${BUILD_TAG}" #pre-prend version number
fi
# Ouput build config
gpuci_logger "Build config info..."
echo "Build image and tag: ${BUILD_IMAGE}:${BUILD_TAG}"
echo "Build args: ${BUILD_ARGS}"
gpuci_logger "Docker build command..."
echo "docker build --pull -t ${BUILD_IMAGE}:${BUILD_TAG} ${BUILD_ARGS} -f generated-dockerfiles/${DOCKERFILE} context/"
# Build image
gpuci_logger "Starting build..."
docker build --pull -t ${BUILD_IMAGE}:${BUILD_TAG} ${BUILD_ARGS} -f generated-dockerfiles/${DOCKERFILE} context/
# List image info
gpuci_logger "Displaying image info..."
docker images ${BUILD_IMAGE}:${BUILD_TAG}
# Upload image
gpuci_logger "Starting upload..."
GPUCI_RETRY_MAX=5
GPUCI_RETRY_SLEEP=120
gpuci_retry docker push ${BUILD_IMAGE}:${BUILD_TAG}
| true |
78e92c544e2338789908ded397f1265dfe66daa3 | Shell | imranxpress/docker-training-for-me | /scenario/redis_cluster_sample/my_test.sh | UTF-8 | 2,485 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
slave_nu=$(docker ps | grep redis-cluster_slave_* | wc -l)
sentinel_nu=$(docker ps | grep redis-cluster_sentinel_* | wc -l)
MASTER_IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' redis-cluster_master_1)
echo --------------------------
echo "###### INFORMATION ######"
echo --------------------------
echo Redis Slave Numbers: $slave_nu
echo Redis Sentinel Numbers: $sentinel_nu
echo --------------------------
echo
echo --------------------------
echo Redis master: $MASTER_IP
echo --------------------------
echo
echo --------------------------
for ((i=1;i<=$slave_nu;i++));
do
SLAVE_IP_[$i]=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' redis-cluster_slave_$i)
echo Redis Slave $i: "${SLAVE_IP_[$i]}"
echo --------------------------
done
echo
echo -----------------------------
for ((i=1;i<=$sentinel_nu;i++));
do
SENTINEL_IP_[$i]=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' redis-cluster_sentinel_$i)
echo Redis Sentinel $i: "${SENTINEL_IP_[$i]}"
echo -----------------------------
done
echo --------------------------
echo Initial status of sentinel
echo --------------------------
#docker exec redis-cluster_sentinel_1 redis-cli -p 26379 info Sentinel
echo Current master is
#docker exec redis-cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
docker-compose exec sentinel redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
echo ------------------------------------------------
echo Stop redis master
docker pause redis-cluster_master_1
echo Wait for 15 seconds
sleep 15
echo Current infomation of sentinel
#docker exec redis-cluster_sentinel_1 redis-cli -p 26379 info Sentinel
#docker-compose exec sentinel redis-cli -p 26379 info sentinel
echo Current master is
#docker exec redis-cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
docker-compose exec sentinel redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
echo ------------------------------------------------
echo Restart Redis master
docker unpause redis-cluster_master_1
#sleep 5
echo Current infomation of sentinel
#docker exec redis-cluster_sentinel_1 redis-cli -p 26379 info Sentinel
#docker-compose exec sentinel redis-cli -p 26379 info sentinel
echo Current master is
#docker exec redis-cluster_sentinel_1 redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
docker-compose exec sentinel redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
| true |
2cea77fdc3210b5a23ac9b43f3462d3676d1b2fe | Shell | qnnming/pkgsrc | /minix/pbulk-ng.sh | UTF-8 | 12,546 | 3.796875 | 4 | [] | no_license | #!/bin/sh
# This script tries to optimise time building for jailed pbulk builds
# at the expense of disk space.
#
# A full (all packages) build requires currently about:
# - 1.0GB pkgsrc itself
# - 0.3GB chroot-minix
# - 1.3GB chroot-bootstrap
# - 1.3GB chroot-pbulk (w/o distfiles)
# - 24.0GB distfiles (sources of the packages)
# - 20.0GB during build, to store temporary files and produced packages
# ==========
# ~48.0GB total
#
# I (LSC) recommend a partition of at least 100GB for a full build.
#
# this will create the following hierarchy (assuming defaults) :
# /usr/bbuild
# /usr/bbuild/chroot-minix Pristine minix rootfs
# /usr/bbuild/chroot-bootstrap Pristine minix + bootstrapped pbulk tools
# /usr/bbuild/chroot-pbulk Root system with build pkgsrc.
#
# Upon successful completion the following will be copied into:
# /usr/bbuild/save/YYYYMMDD-HHmmss.logs pbulk-log files
# /usr/bbuild/save/YYYYMMDD-HHmmss.packages generated packages
# /usr/bbuild/save/YYYYMMDD-HHmmss.disfiles fetched distfiles
#
# Exit at the first error
set -e
# Some useful constant
YES="yes"
NO="no"
# Defaults
: ${TOPDIR=/usr/bbuild}
: ${SAVEDIR=/usr/bbuild/save}
: ${TOOLDIR=/usr/bbuild/tooldir.$(uname -p)}
: ${OBJDIR=/usr/bbuild/obj.$(uname -p)}
: ${MINIXSRCDIR=/usr/src}
: ${PKGSRCDIR=/usr/pkgsrc}
: ${DISTFILESDIR=/usr/pkgsrc/distfiles}
: ${USETOOLS=yes}
: ${ROOT_MINIX=${TOPDIR}/chroot-minix}
: ${ROOT_BOOTSTRAP=${TOPDIR}/chroot-bootstrap}
: ${ROOT_PBULK=${TOPDIR}/chroot-pbulk}
# By default copy the local pkgsrc repository
: ${PKGSRC_COPY=${YES}}
: ${PKGSRC_REMOTE=pkgsrc}
: ${PKGSRC_URL=git://git.minix3.org/pkgsrc.git}
: ${PKGSRC_BRANCH=minix-master}
# Destination pkgsrc git, absolute path, has to contain ROOT_BOOTSTRAP!
: ${PKGSRC_GIT=${ROOT_BOOTSTRAP}/usr/pkgsrc/.git}
# By default re-use root FS if available
: ${BUILD_ROOT_BOOTSTRAP=${NO}}
: ${BUILD_ROOT_MINIX=${NO}}
: ${BUILD_ROOT_PBULK=${NO}}
# By default copy to a safe place the generated packages, distfiles and logs
: ${SAVE_PACKAGES=${YES}}
: ${SAVE_DISTFILES=${YES}}
: ${SAVE_LOGS=${YES}}
# Use tools through variables, ease the debug process
: ${DRY_RUN=${NO}}
# Some private variables which may used from within the chroots
: ${CMD_BOOTSTRAP=./bootstrap/bootstrap}
: ${CMD_BOOTSTRAP_CLEANUP=./bootstrap/cleanup}
# This cannot be changed without updating pbulk.conf
: ${BOOTSTRAP_PREFIX=/usr/pbulk}
: ${PKGSRC_PREFIX=/usr/pkgsrc}
# Generate a clean PATH for the jails.
CHROOT_PATH=""
for d in ${BOOTSTRAP_PREFIX} /usr/pkg /usr/X11R7 /usr ''
do
CHROOT_PATH=${CHROOT_PATH}:${d}/bin:${d}/sbin
done
CHROOT_PATH=${CHROOT_PATH}:/usr/games
LD_CHROOT_PATH=/usr/pkg/lib:/usr/X11R7/lib:/usr/lib:/lib
if [ ! -d $MINIXSRCDIR ]
then
echo Please install the minix sources in $MINIXSRCDIR.
exit 1
fi
#============================================================================
if [ ${DRY_RUN} = ${YES} ]
then
RM='echo ##: rm '
MV='echo ##: mv '
CP='echo ##: cp '
CD='echo ##: cd '
LN='echo ##: ln '
SED='echo ##: sed '
CHROOT='echo ##: chroot '
MKDIR='echo ##: mkdir '
TAR='echo ##: tar '
EXPORT='echo ##: export '
PKG_ADD='echo ##: pkg_add '
SYNCTREE='echo ##: synctree '
GIT='echo ##: git '
BMAKE='echo ##: bmake '
CMD_RELEASE="echo ##: ${CMD_RELEASE} "
CMD_BOOTSTRAP="echo ##: ${CMD_BOOTSTRAP} "
CMD_BOOTSTRAP_CLEANUP="echo ##: ${CMD_BOOTSTRAP_CLEANUP} "
CMD_RESET_ERRORS="echo ##: test ! -f /usr/pbulk-logs/meta/error || rm /usr/pbulk-logs/meta/error "
CMD_BULKBUILD="echo ##: bulkbuild "
CMD_BULKBUILD_RESTART="echo ##: bulkbuild-restart "
DIRNAME='echo _dirname_ '
# Kind of an exception, but as it used to collect
# all the output of a phase, we want it to be echoed,
# instead of saved in a log file
TEE="cat - "
else
RM='rm '
MV='mv '
CP='cp '
CD='cd '
LN='ln '
SED='sed '
DIRNAME='dirname '
CHROOT='chroot '
MKDIR='mkdir '
TAR='tar '
EXPORT='export '
PKG_ADD='pkg_add '
SYNCTREE='synctree '
GIT='git '
BMAKE='bmake '
TEE='tee '
CMD_RESET_ERRORS="test ! -f /usr/pbulk-logs/meta/error || rm /usr/pbulk-logs/meta/error "
CMD_BULKBUILD="bulkbuild "
CMD_BULKBUILD_RESTART="bulkbuild-restart "
fi
# Check at which step which should start :
[ ! -d "${ROOT_MINIX}" ] && BUILD_ROOT_MINIX=${YES}
[ ! -d "${ROOT_BOOTSTRAP}" ] && BUILD_ROOT_BOOTSTRAP=${YES}
[ ! -d "${ROOT_PBULK}" ] && BUILD_ROOT_PBULK=${YES}
# Ensure that all the steps following the first to be generated
# are also re-generated.
[ ${BUILD_ROOT_MINIX} = ${YES} ] && BUILD_ROOT_BOOTSTRAP=${YES}
[ ${BUILD_ROOT_BOOTSTRAP} = ${YES} ] && BUILD_ROOT_PBULK=${YES}
#============================================================================
build_minix() {
echo ":-> Building minix chroot in ${ROOT_MINIX}"
(
exec 2>&1
set -e
echo ":--> Building minix sources [${BUILD_START}]"
${CD} ${MINIXSRCDIR}
HOST_CC=clang HOST_CXX=clang++ ./build.sh \
-m i386 \
-O ${OBJDIR} \
-T ${TOOLDIR} \
-D ${ROOT_MINIX} \
-V SLOPPY_FLIST=yes \
-V MKX11=yes \
-V MKUPDATE=yes \
-V MKLLVM=yes \
-V MKLIBCXX=yes \
-V MKGCCCMDS=no \
-V MKLIBSTDCXX=no \
-V MKKYUA=no \
-V MKATF=no \
distribution
echo ":--> Copying config files"
for f in hosts resolv.conf
do
[ -f /etc/${f} ] && ${CP} /etc/${f} ${ROOT_MINIX}/etc/${f}
done
) | ${TEE} ${TOPDIR}/1-build_minix.log
echo ":-> Building minix chroot done"
return 0
}
build_bootstrap() {
echo ":-> Building bootstrapped chroot"
(
exec 2>&1
set -e
echo ":--> Initializing chroot in ${ROOT_BOOTSTRAP} [${BUILD_START}]"
if [ ${PKGSRC_COPY} = ${YES} ]
then
echo ":--> Copying from ${PKGSRCDIR}"
# Copy and use our local pkgsrc repository as it is
${MKDIR} -p ${ROOT_BOOTSTRAP}${PKGSRC_PREFIX}
${SYNCTREE} -f ${PKGSRCDIR} ${ROOT_BOOTSTRAP}${PKGSRC_PREFIX} >/dev/null
else
echo ":--> Cloning from ${PKGSRC_URL}/${PKGSRC_BRANCH}"
# Copy our own pkgsrc repository there so the new
# repository does not have to retrieve objects we
# already have locally.
${MKDIR} -p ${PKGSRC_GIT}
${SYNCTREE} -f ${PKGSRCDIR}/.git ${PKGSRC_GIT} >/dev/null
${GIT} --git-dir ${PKGSRC_GIT} remote rm ${PKGSRC_REMOTE}
${GIT} --git-dir ${PKGSRC_GIT} remote add ${PKGSRC_REMOTE} ${PKGSRC_URL}
${GIT} --git-dir ${PKGSRC_GIT} fetch ${PKGSRC_REMOTE}
${GIT} --git-dir ${PKGSRC_GIT} checkout -f ${PKGSRC_REMOTE}/${PKGSRC_BRANCH}
fi
# Bonus distfiles
echo ":--> Copying prefetched distfiles from ${DISTFILESDIR}"
${MKDIR} -p ${ROOT_BOOTSTRAP}${PKGSRC_PREFIX}/distfiles
${SYNCTREE} -f ${DISTFILESDIR} ${ROOT_BOOTSTRAP}${PKGSRC_PREFIX}/distfiles >/dev/null
# Ensure that the package directoy is clean and exists
${RM} -rf ${ROOT_BOOTSTRAP}${PKGSRC_PREFIX}/packages/$(uname -r)/
${MKDIR} -p ${ROOT_BOOTSTRAP}${PKGSRC_PREFIX}/packages/$(uname -r)/$(uname -p)/All
echo ":--> Bootstrapping pbulk"
${CHROOT} ${ROOT_BOOTSTRAP} sh -c \
"(
set -e
${EXPORT} PATH=${CHROOT_PATH}
${EXPORT} LD_LIBRARY_PATH=${LD_CHROOT_PATH}
${CD} ${PKGSRC_PREFIX}
# First stage, PBULK bootstrap & installation
# Trim the .ifdef BSD_PKG_MK and .endif lines to make a 'fragment'
# and adapt a few path to the ones expected for pbulk
${SED} \
-e '/.*BSD_PKG_MK/d' \
-e 's@VARBASE?=.*@VARBASE= '${BOOTSTRAP_PREFIX}'/var@' \
-e 's@PKG_DBDIR?=.*@PKG_DBDIR= '${BOOTSTRAP_PREFIX}'/pkgdb@' \
-e 's@WRKOBJDIR?=.*@WRKOBJDIR= '${BOOTSTRAP_PREFIX}'/work@' \
./minix/mk.conf.minix \
> ./minix/mk.conf.minix.pbulk.frag
echo ':--> Building pbulk kit'
${CMD_BOOTSTRAP} \
--prefix=${BOOTSTRAP_PREFIX} \
--varbase=${BOOTSTRAP_PREFIX}/var \
--pkgdbdir=${BOOTSTRAP_PREFIX}/pkgdb \
--workdir=${BOOTSTRAP_PREFIX}/work \
--mk-fragment=./minix/mk.conf.minix.pbulk.frag
# Install pbulk into /usr/pbulk
echo ':--> Building and installing pbulk'
${BMAKE} -C ./devel/pth package-install
${BMAKE} -C ./pkgtools/pbulk package-install
${SED} -e 's/OP_SYS_VER/'$(uname -r)'/g' ./minix/pbulk.conf > ${BOOTSTRAP_PREFIX}/etc/pbulk.conf
# First stage: done
echo ':--> Bootstrap cleanup'
${CMD_BOOTSTRAP_CLEANUP}
# Second stage, pkgsrc bootstrap & installation
# Trim the .ifdef BSD_PKG_MK and .endif lines to make a 'fragment'
${SED} -e '/.*BSD_PKG_MK/d' \
./minix/mk.conf.minix \
> ./minix/mk.conf.minix.frag
echo ':--> Building binary pkgsrc kit'
${CMD_BOOTSTRAP} \
--varbase=/usr/pkg/var \
--pkgdbdir=/usr/pkg/var/db/pkg \
--mk-fragment=./minix/mk.conf.minix.frag \
--workdir=${PKGSRC_PREFIX}/work \
--gzip-binary-kit=${BOOTSTRAP_PREFIX}/bootstrap.tar.gz
${RM} -rf ./packages/$(uname -r)/
${MKDIR} -p ./packages/$(uname -r)/$(uname -p)/All
# Use the same mk.conf that our users instead of the hybrid
# auto-generated mk.conf from bootstrap.
${TAR} -C /tmp -xzf ${BOOTSTRAP_PREFIX}/bootstrap.tar.gz
${CP} ./minix/mk.conf.minix /tmp/usr/pkg/etc/mk.conf
${TAR} -C /tmp -hzcf ${BOOTSTRAP_PREFIX}/bootstrap.tar.gz usr
${RM} -rf /tmp/usr
# Second stage: done
)"
echo ":--> Bootstrapping pbulk done"
) | ${TEE} ${TOPDIR}/2-build_bootstrap.log
echo ":-> Building bootstrapped chroot done"
return 0
}
pbulk_start() {
echo ":-> Building packages from scratch"
(
exec 2>&1
set -e
${CHROOT} ${ROOT_PBULK} sh -c \
"(
set -e
${EXPORT} PATH=${CHROOT_PATH}
${EXPORT} LD_LIBRARY_PATH=${LD_CHROOT_PATH}
${CD} ${PKGSRC_PREFIX}
echo ':--> Starting build ['${BUILD_START}']'
${CMD_BULKBUILD}
)"
) | ${TEE} ${TOPDIR}/3-pbulk.log
echo ":-> Building packages from scratch done"
return 0
}
pbulk_restart() {
echo ":-> Building packages from previous build"
(
exec 2>&1
set -e
${CHROOT} ${ROOT_PBULK} sh -c \
"(
set -e
${EXPORT} PATH=${CHROOT_PATH}
${EXPORT} LD_LIBRARY_PATH=${LD_CHROOT_PATH}
${CD} ${PKGSRC_PREFIX}
echo ':--> Resetting error file'
${CMD_RESET_ERRORS}
echo ':--> Restarting build ['${BUILD_START}']'
${CMD_BULKBUILD_RESTART}
)"
) | ${TEE} ${TOPDIR}/3-pbulk.log
echo ":-> Building packages from previous build done"
}
#============================================================================
# Initializations are done, start applying the requested actions on the system
BUILD_START=$(date)
echo -e "\n:: pbulk started on ${BUILD_START}"
if [ ${BUILD_ROOT_MINIX} = ${YES} ]
then
echo -e "\n:> Generating minix root fs."
${RM} -rf ${ROOT_MINIX}
# Ensure presence of destination directory
${MKDIR} -p ${ROOT_MINIX}
build_minix
fi
if [ ${BUILD_ROOT_BOOTSTRAP} = ${YES} ]
then
echo -e "\n:> Bootstrapping pkgsrc."
# Ensure the new chroot is clean.
${MKDIR} -p ${ROOT_BOOTSTRAP}
${SYNCTREE} -f ${ROOT_MINIX} ${ROOT_BOOTSTRAP} >/dev/null
build_bootstrap
fi
if [ ${BUILD_ROOT_PBULK} = ${YES} ]
then
echo -e "\n:> Initializing pbulk root."
# Ensure the new chroot is clean.
${MKDIR} -p ${ROOT_PBULK}
${SYNCTREE} -f ${ROOT_BOOTSTRAP} ${ROOT_PBULK} >/dev/null
echo -e "\n:> Building packages from scratch."
pbulk_start
else
# We want to re-use a previous pbulk.
# Just make sure that any modification within the pkgsrc tree is visible
# find param to make synctree keep files in packages and distfiles
# Also requires regenerating the bootstrap tarball, as well as updating
# ROOT_PBULK/usr/pbulk/etc/pbulk.conf
#${SYNCTREE} -f ${PKGSRCDIR} ${ROOT_PBULK}${PKGSRC_PREFIX} >/dev/null
echo -e "\n:> Restarting build of packages."
pbulk_restart
fi
_build_end=$(date '+%Y%m%d%H%M.%S')
BUILD_END=$(date -j ${_build_end})
# We have to do this here, otherwise the date field would be empty
: ${TIMESTAMP=$(date -j '+%Y%m%d-%H%M%S' ${_build_end})}
: ${ROOT_LOGS=${SAVEDIR}/${TIMESTAMP}.logs}
: ${ROOT_DISTFILES=${SAVEDIR}/${TIMESTAMP}.distfiles}
: ${ROOT_PACKAGES=${SAVEDIR}/${TIMESTAMP}.packages}
if [ ${SAVE_LOGS} = ${YES} ]
then
${MKDIR} -p ${ROOT_LOGS}
${CP} -pfr ${TOPDIR}/1-build_minix.log ${ROOT_LOGS}/
${CP} -pfr ${TOPDIR}/2-build_bootstrap.log ${ROOT_LOGS}/
${CP} -pfr ${TOPDIR}/3-pbulk.log ${ROOT_LOGS}/
${SYNCTREE} -uf ${ROOT_PBULK}/usr/pbulk-logs ${ROOT_LOGS}/pbulk-logs
fi
if [ ${SAVE_DISTFILES} = ${YES} ]
then
${SYNCTREE} -uf ${ROOT_PBULK}${PKGSRC_PREFIX}/distfiles ${ROOT_DISTFILES}
fi
if [ ${SAVE_PACKAGES} = ${YES} ]
then
${SYNCTREE} -uf ${ROOT_PBULK}${PKGSRC_PREFIX}/packages ${ROOT_PACKAGES}
fi
echo -e "\n:: pbulk finished:"
echo ":> started on : ${BUILD_START}"
echo ":> finished on : ${BUILD_END}"
echo ":> Build logs : ${ROOT_LOGS}"
echo ":> Distfiles : ${ROOT_DISTFILES}"
echo ":> Packages : ${ROOT_PACKAGES}"
| true |
3af29720a47b734bb4b80ece61b2b9aebf36f5ff | Shell | cmk/icfpc2018 | /bin/solvers/iwiwi-002+chokudai-008 | UTF-8 | 309 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash -eu
[ -z "$1" -o -z "$2" ] && exit 1
echo ---- DESTRUCTION ---- 1>&2
"$(dirname ${BASH_SOURCE})/iwiwi-002" "" "$2" | grep -v 'HALT'
echo ---- CONSTRUCTION ---- 1>&2
mono "$(dirname ${BASH_SOURCE})/../chokudai-solver/008.exe" "$1" | "$(dirname "${BASH_SOURCE}")/../run_postproc" "$1" /dev/stdin
| true |
8a703bc15eab9ebfbde9e89f18408af2c1a18c31 | Shell | luizbrito/shellscript-tools | /remove_duplicidade/remove_duplicidade.sh | UTF-8 | 2,899 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
echo " "
echo " #######################################################################"
echo " # #"
echo " # REMOVE DUPLICIDADE #"
echo " # #"
echo " # Procedimento para remoção de linhas duplicadas de 2 arquivos. #"
echo " # A saida será um arquivo que possua linhas apenas em um dos #"
echo " # arquivos apenas o que não existe em #"
echo " # #"
echo " #######################################################################"
echo " "
#Valida os parametros de entrada
if [ $# -lt 3 ]; then
echo " "
echo " ######################################################################"
echo " # #"
echo " # Utilizar o padrão: #"
echo " # ./remove_duplicados.sh arquivo1 arquivo2 saida #"
echo " # #"
echo " # NUMERO DE PARAMETROS: $# INFORMADOS. #"
echo " # #"
echo " # - O Shell deve ser utilizado para realizar a remoção das linhas #"
echo " # duplicadas das ordens já carregadas no arquivo processado na hora #"
echo " # anterior. #"
echo " # #"
echo " # - Deve ser passado 3 parametros. #"
echo " # 1° - ARQUIVO_2021030101_1.CSV (arquivo base) #"
echo " # 2° - ARQUIVO_2021030101_2.CSV (diferença com o anterior) #"
echo " # 3° - ARQUIVO_SAIDA_2021030101_3.CSV #"
echo " # #"
echo " ######################################################################"
exit -1
fi
if [ $# -gt 2 ]; then
COUNT=0
for ARG in $*-1; do
COUNT=`expr $COUNT + 1`
if [ -e "$ARG" ] ; then
echo "O arquivo - $ARG - existe"
else
echo "O arquivo - $ARG - não existe"
fi
done
fi
# Cria arquivo temporario e Remove primeira linha
sed '1 d' $1 > $1_tmp
sed '1 d' $2 > $2_tmp
# Remove a ultima linha
sed '$d' $1_tmp > $1_tmp2
sed '$d' $2_tmp > $2_tmp2
# Copia o header
head -1 $2 > $3
# Copia as diferenças
diff $1_tmp2 $2_tmp2 | grep '> ' | sed 's/> //' >> $3
# Adicionar rodapé
echo "`cat $3``echo -e '\n90000'`" > $3
# limpa os arquivos temporarios # se quise debugar comente a linha abaixo
rm $1_tmp $2_tmp $1_tmp2 $2_tmp2
| true |
f6100a92df1fb8127163c5bfb94d0312d59700f8 | Shell | caryyu/gitea-helm-chart | /scripts/init.sh | UTF-8 | 247 | 3.15625 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
SCRIPT_PATH=/etc/gitea/scripts
function git_repair() {
mkdir -p /data/git
chown git /data/git
chgrp git /data/git
}
function main() {
git_repair
su git -c "sh $SCRIPT_PATH/init-conf.sh"
}
main | true |
f9d94143d333a466ed521f883af88495b3dc03fa | Shell | Rrhul0/ArchInstall | /create-root.sh | UTF-8 | 2,878 | 3.734375 | 4 | [] | no_license | #!/bin/bash
echo "this script will not change disk partition size, add or delete so we suggest to do partition manually before running this script"
sleep 5
echo "testing your internet connection"
ping -c 1 google.com >/dev/null
if [[ $? -eq 0 ]]
then
echo 'connected to internet'
else
echo 'not connected to internet'
echo 'if you have wifi try using "man iwctl"'
exit 0
fi
echo "IF YOU SEE ANY OTHER PROCESS THAT YOU NOT WANT WE SUGGEST YOU TO RESTART THIS SCRIPT RIGHT AWAY" > finalview
echo "#!/bin/bash" > finalscript
echo "make sure your partitions ready"
echo "mounting partitions in /mnt"
lsblk
echo "enter EFI partition name like sda1 or sda2 or lvme(commonly this partition is in size of 200MB to 1GB and found in starting of disk)"
read efi
echo "enter root partition name just like before like sda1 or sda2 or lvme"
read root
echo "enter filesystem type for root partition eg. ext4, btrfs 'if not sure what to use just use ext4' WARNING:All data of this partition will be destroyed"
read fsroot
echo "> your root partition (/dev/$root) will be formated with $fsroot file system WARNING:All data inside this partition will be destroyed" >> finalview
echo "mkfs.$fsroot /dev/$root
mount /dev/$root /mnt" >> finalscript
echo "> your EFI partition (/dev/$efi) will be mounted at /boot/efi and will not be formated" >> finalview
echo "mkdir -p /mnt/boot/efi
mount /dev/$efi /mnt/boot/efi" >> finalscript
echo "enter home partition name (if not have dedicated home partiton just leave it empty)"
lsblk
read home
if [[ -n $home ]]
then
echo "Want to format your root partition? 'Yes OR No' Recommandation:NO"
read fhome
if [[ $fhome = yes ]] || [[ $fhome = Yes ]] || [[ $fhome = Y ]] || [[ $fhome = y ]]
then
echo "enter filesystem type for home partition eg. ext4, btrfs 'if not sure what to use just use ext4' WARNING:All data of this partition will be destroyed"
read fshome
echo "> your home partition (/dev/$home) will be formated with $fshome and will be mounted at /home" >> finalview
echo "mkfs.$fshome /dev/$home" >> finalscript
else
echo "> your home partition (/dev/$home) will be mounted at /home" >> finalview
fi
echo "mkdir -p /mnt/home
mount /dev/$home /mnt/home" >> finalscript
fi
echo "> installing base(base for archlinux), linux-firmware(firmware for linux), linux(kernel), nano(for text editor)" >> finalview
echo "pacstrap /mnt base linux-firmware linux nano" >> finalscript
echo "> at final fstab file will be placed at /etc/fstab for automatic mount root and home at system startup" >> finalview
echo "genfstab -U /mnt >> /mnt/etc/fstab" >> finalscript
echo "press any key to start the processes" >> finalview
echo 'echo "If you not see any error your archlinux root must be created successfully"' >> finalscript
clear
cat finalview
read stop
bash finalscript
| true |
f02b1a6383f6755283fd0ddb9cbdb43a11b755a8 | Shell | Gurulhu/SYNWhale | /images/snort/docker-entrypoint.sh | UTF-8 | 603 | 2.734375 | 3 | [] | no_license | #!/bin/bash
if [ ! -z "$OINKCODE" ]; then
cd /opt/pulledpork;
cp etc/pulledpork.conf etc/pulledpork.conf.bkp;
sed "s/<oinkcode>/"${OINKCODE}"/g" etc/pulledpork.conf.bkp > etc/pulledpork.conf;
mkdir -p /usr/local/etc/snort/rules/iplists;
./pulledpork.pl -c etc/pulledpork.conf -g;
tar xzf /tmp/snortrules-snapshot*.tar.gz -C /etc/snort/;
cp /etc/snort/etc/snort.conf /etc/snort/etc/snort.conf.bkp;
sed "s/decompress_swf/#decompress_swf/g" /etc/snort/etc/snort.conf.bkp > /etc/snort/etc/snort.conf;
snort -c /etc/snort/etc/snort.conf -A full -i any;
else echo "ERROR: OINKCODE not set.";
fi
/bin/bash
| true |
5a9a9d5838de9732121af241d11fb08f3c5f7865 | Shell | devrafadias/shell-script | /udemy/aula39/script.sh (39) | UTF-8 | 969 | 2.828125 | 3 | [] | no_license | #!/bin/bash
#dialog --msgbox 'Curso Completo de Shell Script' 5 40
#dialog \
# --title 'Shell Script' \
# --sleep 3 \
# --infobox 'Aguarde 3 segundos...' \
# 5 40
#dialog \
# --title 'Entrada de texto' \
# --inputbox 'Digite sua linguagem preferida: ' \
# 0 0
#dialog \
# --title 'Seleção' \
# --checklist 'O que você gosta de fazer?' \
# 0 0 0 \
# bola 'Jogar bola' on \
# bicicleta 'Andar de bicileta' off \
# tenis 'Jogar tenis' off \
# volei 'Jogar volei' on
#dialog \
# --title 'Password' \
# --passwordbox 'Digite a sua senha: ' \
# 0 0
#dialog \
# --title 'Pergunta' \
# --radiolist 'Qual seu nivel de Shell Script' \
# 0 0 0 \
# Iniciante 'até 2 anos' on \
# Intermediario 'até 4 anos' off \
# Avancado 'até 8 anos' off \
# ChuckNorris 'mais de 8 anos' off
#dialog \
# --title 'Curso de Shell Script' \
# --textbox /etc/passwd \
# 0 0
dialog \
--title 'Curso de Shell Script' \
--yesno 'Voce gostou do curso?' \
0 0 | true |
4af705a50e887a8b2962445a27a4257b4fb67a52 | Shell | fmi-tools/fmi-library | /dev/git-hooks/install.sh | UTF-8 | 1,051 | 4.1875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# This script installs a wrapper to the hook scripts to the .git's hook dir.
# By using a wrapper, we only have to call this script once instead of every
# time the file has been updated.
set -eu -o pipefail
# --- LOCAL_FUNCTIONS ---------------------------------------------- #
# echoes all arguments to stderr
err() {
>&2 echo "$0: ERROR: $@"
}
# --- INPUT_VALIDATION ---------------------------------------------- #
n_args_exp=0
if [[ $# -ne $n_args_exp ]]; then
err "invalid number of input args, exp: $n_args_exp, act: $#"
exit 1
fi
# --- SCRIPT_START -------------------------------------------------- #
src_dir="$(dirname "$(realpath "$0")")"
dst_dir="$src_dir/../../.git/hooks"
hook_dst="$dst_dir/pre-commit"
if [ -f "$hook_dst" ]; then
echo "warning: skipping already existing hook script: $hook_dst"
else
cat <<\EOF > "$dst_dir/pre-commit"
#!/bin/sh
# This script is just a wrapper for the hook in the repo.
dir_abs="$(dirname "$(realpath "$0")")"
. "$dir_abs/../../dev/git-hooks/pre-commit"
EOF
fi
| true |
6cc437bd0238c4d5d90206575e8f2e35c39db77a | Shell | fgouget/wine-tools | /winetest/winetest.cron | UTF-8 | 2,484 | 3.875 | 4 | [] | no_license | #!/bin/sh
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Cron job for generating winetest reports. Use it like
# */5 * * * * winetest.cron WORKDIR
name0=`basename "$0"`
tools=`dirname "$0"`
case "$tools" in
/*) ;; # Nothing to do
*) tools=`pwd`/"$tools" ;;
esac
workdir="$1"
if [ -z "$workdir" ]
then
echo "$name0:error: you must specify the work directory as the first parameter" >&2
exit 1
fi
lock="/tmp/winetest.lock"
# expiration age (in days) before results get archived
expire=120
if [ ! -f "$lock" ]
then
touch "$lock"
cd "$workdir"
refresh_index=""
refresh_errors=""
while true
do
"$tools/dissect"
case $? in
0) refresh_index=1 ;;
1) refresh_errors=1 ;;
*) break ;;
esac
done
if [ -n "$refresh_index" ]
then
while "$tools/gather"; do true; done
fi
if [ ! -d data/tests -o ! -d old-data ]
then
mkdir -p data/tests old-data
refresh_index=1
refresh_errors=1
fi
[ -n "$refresh_index" ] && "$tools/build-index"
[ -n "$refresh_errors" ] && "$tools/build-errors"
# archive old results
(
set -e
cd old-data
dir=`find . -maxdepth 1 -mtime "+$expire" -type d -print -quit`
test -n "$dir"
tar cfj "$dir.tar.bz2" "$dir"
touch -r "$dir" "$dir.tar.bz2"
rm -rf "$dir"
)
# remove old test builds
(
set -e
mkdir -p builds
cd builds
find . -mtime "+$expire" -name "winetest*.exe" -print0 | \
xargs -0 rm -f
)
# remove old queue files
find queue -maxdepth 1 -mtime +30 -name "err*" -print0 | xargs -0 rm -rf
find queue -maxdepth 1 -mtime +30 -name "CGI*" -print0 | xargs -0 rm -f
rm "$lock"
fi
| true |
5c43ec5113b8b2aa479accfb9e28828c39e1f35a | Shell | jhd/group21 | /loadbalancer/files/swap_files/time.sh | UTF-8 | 189 | 2.546875 | 3 | [] | no_license | #!/bin/bash
case $3 in
''|*[!0-9]*) let interval=5 ;;
*) let interval=$3 ;;
esac
php /var/www/files/swap_files/swap.php $1
sleep $interval
php /var/www/files/swap_files/swap.php $2
| true |
b5a3aafadc41e0287021f3384b4e32188eea8ffc | Shell | abigailStev/cross_correlation | /loop_ccf.sh | UTF-8 | 6,255 | 3.25 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
################################################################################
##
## Bash script to run ccf.py, plot_ccf.py, plot_multi.py, and plot_2Dccf.py.
##
## Runs ccf.py for many obsIDs.
##
## Don't give command line arguments. Change things in this script below.
##
## Change the directory names and specifiers before the double '#' row to best
## suit your setup.
##
## Notes: bash 3.* and conda 4.0.7+ with python 2.7.*
## must be installed in order to run this script. For the gif-making to
## work, ImageMagick must be installed (open source)
##
## Written by Abigail Stevens, A.L.Stevens at uva.nl, 2015-2016
##
################################################################################
##########################################
## Checking the number of input arguments
##########################################
if (( $# != 0 )); then
echo -e "\tDo not give command line arguments. Usage: ./loop_ccf.sh\n"
exit
fi
################################################################################
home_dir=$(ls -d ~)
day=$(date +%y%m%d) # make the date a string and assign it to 'day'
exe_dir="$home_dir/Dropbox/Research/cross_correlation"
out_dir="$exe_dir/out_ccf"
# prefix="j1808-2002"
prefix="j1808-1HzQPO"
# prefix="GX339-BQPO"
# obsID="95335-01-01-06"
# prefix="4u1636superburst"
obsID_list="$home_dir/Dropbox/Lists/${prefix}_obsIDs_goodSN.lst"
# bkgd_spec="$home_dir/Reduced_data/$prefix/evt_bkgd_rebinned.pha"
ec_table_file="$xte_exe_dir/e-c_table.txt"
chan_bin_file="$home_dir/Reduced_data/${prefix}/chan.txt"
energies_file="$home_dir/Reduced_data/${prefix}/energies.txt"
dt=128
numsec=128
testing=0 # 0 for no, 1 for yes
filtering=0 # 0 for no, 1 for yes
tlen=100
obs_epoch=5
t_ext="fits"
p_ext="png"
plots_1d="$out_dir/${prefix}_gif_1d_goodSN.txt"
plots_2d="$out_dir/${prefix}_gif_2d_goodSN.txt"
gif_name_1d="$out_dir/${day}_t${dt}_${numsec}sec_1d_goodSN.gif"
gif_name_2d="$out_dir/${day}_t${dt}_${numsec}sec_2d_goodSN.gif"
################################################################################
################################################################################
if [ -e "$plots_1d" ]; then rm "$plots_1d"; fi; touch "$plots_1d"
if [ -e "$plots_2d" ]; then rm "$plots_2d"; fi; touch "$plots_2d"
if [ ! -e "$energies_file" ]; then
if [ -e "$ec_table_file" ] && [ -e "$chan_bin_file" ]; then
python "$xte_exe_dir"/channel_to_energy.py "$ec_table_file" \
"$chan_bin_file" "$energies_file" "$obs_epoch"
else
echo -e "\tERROR: channel_to_energy.py not run. ec_table_file and/or \
chan_bin_file do not exist."
fi
fi
for obsID in $( cat $obsID_list ); do
red_dir="$home_dir/Reduced_data/${prefix}/$obsID"
# red_dir="$home_dur/Dropbox/Research/sample_data"
in_file="$red_dir/GTId_eventlist.fits"
if [ ! -d "$out_dir" ]; then mkdir -p "$out_dir"; fi
if (( $testing == 0 )); then
out_file="$out_dir/${obsID}_${day}_t${dt}_${numsec}sec"
plot_root="$out_dir/${obsID}_${day}_t${dt}_${numsec}sec"
elif (( $testing == 1 )); then
out_file="$out_dir/test_${obsID}_${day}_t${dt}_${numsec}sec"
plot_root="$out_dir/test_${obsID}_${day}_t${dt}_${numsec}sec"
fi
##################
## Running ccf.py
##################
for (( i=0; i<64; i++ )); do
tmp_file="$out_dir/ccf_segs_${i}.dat"
if [ -e "$tmp_file" ]; then rm "$tmp_file"; fi; touch "$tmp_file"
done
if [ -e "$in_file" ] && [ -e "$bkgd_spec" ]; then
time python "$exe_dir"/ccf.py "${in_file}" "${out_file}.${t_ext}" \
-b "$bkgd_spec" -n "$numsec" -m "$dt" -t "$testing" -f "$filtering"
elif [ -e "$in_file" ]; then
time python "$exe_dir"/ccf.py "${in_file}" "${out_file}.${t_ext}" \
-n "$numsec" -m "$dt" -t "$testing" -f "$filtering"
else
echo -e "\tERROR: ccf.py was not run. Eventlist and/or background \
energy spectrum doesn't exist."
fi
#############
## Plotting
############
if [ -e "${out_file}.${t_ext}" ]; then
multi_plot="${plot_root}_multiccfs.${p_ext}"
plot_file_2d="${plot_root}_2Dccf.${p_ext}"
plot_fits="${plot_root}_2Dccf.fits"
########################################
## Plotting 1D single and multiple CCFs
########################################
python "$exe_dir"/plot_ccf.py "${out_file}.${t_ext}" \
-o "${plot_root}" -p "${prefix}/${obsID}"
# if [ -e "${plot_root}_chan_06.${p_ext}" ]; then open "${plot_root}_chan_06.${p_ext}"; fi
echo "${plot_root}_chan_06.${p_ext}" >> $plots_1d
## Could also use stars here instead of the chan num
python "$exe_dir"/plot_multi.py "${out_file}.${t_ext}" "$multi_plot" \
-p "${prefix}"
# if [ -e "$multi_plot" ]; then open "$multi_plot"; fi
###################
## Plotting 2D CCF
###################
if [ -e "${out_file}.${t_ext}" ]; then
python "$exe_dir"/plot_2d.py "${out_file}.${t_ext}" \
-o "${plot_file_2d}" -p "${prefix}" -l "$tlen" -e "$energies_file"
# if [ -e "${plot_file_2d}" ]; then open "${plot_file_2d}"; fi
echo "$plot_file_2d" >> $plots_2d
fi
detchans=$(python -c "import tools; print int(tools.get_key_val('${out_file}.fits', 0, 'DETCHANS'))")
# if [ -e "$out_dir/temp.dat" ]; then
# fimgcreate bitpix=-32 \
# naxes="${tlen},${detchans}" \
# datafile="$out_dir/temp.dat" \
# outfile="$plot_fits" \
# nskip=1 \
# history=true \
# clobber=yes
# else
# echo -e "\tERROR: FIMGCREATE did not run. 2Dccf temp file does not exist."
# fi
#
# if [ -e "$plot_fits" ]; then
# echo "FITS 2D ccf ratio image: $plot_fits"
# else
# echo -e "\tERROR: FIMGCREATE was not successful."
# fi
else
echo -e "\tERROR: Plots were not made. CCF output file does not exist."
fi
done
################################################
## Making the plots into a gif with ImageMagick
################################################
convert @"$plots_1d" "$gif_name_1d"
if [ -e "$gif_name_1d" ]; then
echo "GIF made! $gif_name_1d"
open "$gif_name_1d"
fi
convert @"$plots_2d" "$gif_name_2d"
if [ -e "$gif_name_2d" ]; then
echo "GIF made! $gif_name_2d"
open "$gif_name_2d"
fi
################################################################################
## All done!
################################################################################
| true |
1b57138fd3e3cae3c3aa36eb26bad61d83877891 | Shell | fishlamp-obsolete-ignore-these-repos/fishlamp-scripts | /old/fishlamp-tag-version.sh | UTF-8 | 1,185 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# fail if error
set -e
if [ ! -f ".fishlamp-root" ]; then
echo "##! please run from root of fishlamp repo"
exit 1;
fi
branch=`git rev-parse --abbrev-ref HEAD`
if [[ "$branch" != "master" ]]; then
echo "##! please tag main branch, you're on $branch"
exit 1;
fi
file="Version.plist"
header="Frameworks/Core/Classes/FishLampVersion.h"
packmule_version_file="Tools/PackMule/PackMule/Info.plist"
# bump the version
version=`version-get $file`
version=`version-bump-build "$version"`
# set versions in file
version-set "$file" "$version" > /dev/null 2>&1
version-set "$packmule_version_file" "$version" > /dev/null 2>&1
git add "$file"
git add "$packmule_version_file"
echo "# updated version in $file and $packmule_version_file to $version"
# generate header file
echo "// version $version tagged $(DATE)" > "$header"
echo "#ifndef FishLampVersion" >> "$header"
echo "#define FishLampVersion @\"$version\"" >> "$header"
echo "#endif" >> "$header"
git add "$header"
echo "# updated $header"
git commit -a -m "Tagged version $version"
tag="v$version"
git tag "$tag"
echo "# added tag \"$tag\""
git push --tags origin $branch
echo "# all done"
| true |
a575d4b0963251b9b48bdba9239ff536458dffd9 | Shell | arkivm/aurora | /scripts/redis.sh | UTF-8 | 1,435 | 3.40625 | 3 | [
"BSD-2-Clause"
] | permissive | # NOTE: This file isn't standalone, it needs the definition of $OUTDIR and
# $SCRIPTDIR. This is by design, since this is supposed to be a helper library.
# Redis benchmark parameters
#Script for the Redis benchmarks. Run at the base Redis directory.
function rdstart {
# Dump the conf into a file in the output directory
python3 "$CONFIGDUMPSCRIPT" "$REDISCONF" "$REDISCSVCONF"
# XXX Dump the benchmark parameters to the output too
# Run the server in the background
"$SERVERBIN" "$REDISCONF" &
# XXX Possible dtrace script here?
}
# Stop the benchmark
function rdstop {
# Kill the server and stop tracing
pkill "$SERVER"
pkill "$CLIENT"
pkill dtrace
# Clean up the output
#rm -rf "$OUTDIR"
# Clean up any Redis backup files
rm -f *.rdb *.aof
}
# ------------------------------------------------------------------
# The tests to be run by the client
TESTS="SET,GET"
# Number of redis clients, need a lot for throughput
CLIENTNO="16"
# Number of requests
REQUESTS=$((1024 * 1024 * 32))
# Size of request values controls memory usage along with keyspace
VALSIZE="4096"
# Request pipelining depth, amortizes latency
PIPELINE="10"
#Size of the key space, controls total memory usage
KEYSPACE=$((1024 * 1024 * 1024))
function rdbench {
# Run the benchmark
"$CLIENTBIN" -t "$TESTS" -c "$CLIENTNO" -n "$REQUESTS" -d "$VALSIZE" \
-P "$PIPELINE" -k "$KEYSPACE" &
}
| true |
7a61ab7b858fe9095ecac4a7974fb811551bc204 | Shell | kostrzewa/jobscripts | /generators/highstat/jobtemplate.sh | UTF-8 | 2,282 | 3.296875 | 3 | [] | no_license | #!/bin/sh
#
#(otherwise the default shell would be used)
#$ -S /bin/sh
#
#(the running time for this job)
#$ -l h_rt=H_RT
#$ -l s_rt=S_RT
#$ -l h_vmem=3G
#
#(stderr and stdout are merged together to stdout)
#$ -j y
#
# redirect stdout and stderr to /dev/null
#$ -o /dev/null
#
#(send mail on job's end and abort)
#$ -m bae
# queue name and number of cores
#$ -pe QUEUE NCORES
#$ -P etmc
# number of mpi processes
NPROCS=NP
# basename, e.g. hmc0
BASENAME=BN
# e.g. openmp_noreduct
ADDON=AN
# e.g. highstatX
SUBDIR=SD
# "s" for start, "c" for continue
STATE=ST
# numerical counter for number of continue script
# if STATE=c
NCONT=NC
ODIR=OD
EFILE=EF
IFILE=IF
ITOPDIR=ITD
if [[ ${STATE} == "s" ]]; then
if [[ ! -d ${ODIR} ]]; then
mkdir -p ${ODIR}
fi
fi
if [[ ! -d ${ODIR} ]]
then
echo "output directory ${ODIR} could not be found! Aborting!"
exit 111
fi
cd ${ODIR}
# write stdout and stderr into tmp dir, will be copied to output at the end
exec > ${TMPDIR}/stdout.txt.${JOB_ID} 2> ${TMPDIR}/stderr.txt.${JOB_ID}
case ${BASENAME} in
*mpihmc211*)
cp ${ITOPDIR}/roots_mpihmc211.dat ${ODIR}
;;
*hmc2*)
cp ${ITOPDIR}/normierungLocal.dat ${ODIR}
cp ${ITOPDIR}/Square_root_BR_roots.dat ${ODIR}
;;
*ndclover*)
cp ${ITOPDIR}/clover_roots.dat ${ODIR}
cp ${ITOPDIR}/clover_roots_2.dat ${ODIR}
cp ${ITOPDIR}/clover_roots_problematic.dat ${ODIR}
;;
esac
case ${ADDON} in
*MPI*)
export NPN=8
export BINDING="-cpus-per-proc 1 -npersocket 4 -bycore -bind-to-core"
;;
*hybrid*)
export NPN=2
export BINDING="-cpus-per-proc 4 -npersocket 1 -bysocket -bind-to-socket"
;;
*openmp*)
export NPN=1
esac
MPIRUN="/usr/lib64/openmpi-intel/bin/mpirun -wd ${ODIR} -np ${NPROCS} -npernode ${NPN} ${BINDING}"
case ${ADDON} in
*MPI*) export MPIPREFIX=${MPIRUN};;
*mpi*) export MPIPREFIX=${MPIRUN};;
*hybrid*)
export MPIPREFIX=${MPIRUN}
eval `modulecmd sh add intel.2013`
source /usr/local/bin/intel-setup-2013.sh intel64
;;
*openmp*)
export MPIPREFIX=${MPIRUN}
eval `modulecmd sh add intel.2013`
source /usr/local/bin/intel-setup-2013.sh intel64
;;
esac
cp ${IFILE} ${ODIR}
/usr/bin/time -p ${MPIPREFIX} ${EFILE} -f ${IFILE} > ${ODIR}/hmcout.${JOB_ID}.out
cp ${TMPDIR}/std* ${ODIR}
| true |
eabd0d88109b0c3e08b095141293851167e1e7f0 | Shell | Test-Bayes/evals | /tb_5_junit.sh | UTF-8 | 475 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bash
cd ..
# Cloning
git clone git@github.com:Test-Bayes/Test-Bayes-Test.git
cd Test-Bayes-Test
git checkout -b probability-5-junit
git pull origin probability-5-junit
# JAR
cp ../evals/testbayes-1.0.jar ./mylibs/edu/uw/cse/testbayes/
# Running
mvn clean install -DskipTests
mvn -Dtest=ProbabilityTest test
# Saving
cat test-data/output.txt
touch ../evals/tb-5-junit.txt
cat test-data/output.txt > ../evals/tb-5-junit.txt
# Deleting
cd ..
rm -rf Test-Bayes-Test | true |
30ebd35123f68a147a23288d9fbe4006da148ca7 | Shell | ODEX-TOS/packages | /nmap/repos/extra-x86_64/PKGBUILD | UTF-8 | 1,594 | 2.625 | 3 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | # Maintainer: Levente Polyak <anthraxx[at]archlinux[dot]org>
# Contributor: Gaetan Bisson <bisson@archlinux.org>
# Contributor: Angel Velasquez <angvp@archlinux.org>
# Contributor: Hugo Doria <hugo@archlinux.org>
pkgname=nmap
pkgver=7.91
pkgrel=1
pkgdesc='Utility for network discovery and security auditing'
url='https://nmap.org/'
arch=('x86_64')
license=('GPL2')
depends=('glibc' 'pcre' 'openssl' 'lua53' 'libpcap'
'libssh2' 'libssh2.so' 'zlib' 'gcc-libs')
source=("https://nmap.org/dist/${pkgname}-${pkgver}.tar.bz2"
"https://nmap.org/dist/sigs/${pkgname}-${pkgver}.tar.bz2.asc")
sha256sums=('18cc4b5070511c51eb243cdd2b0b30ff9b2c4dc4544c6312f75ce3a67a593300'
'SKIP')
b2sums=('a758e0a20f8243b33b000c23e025b87bdb712390b82982a1aca219c9b98cd55c6ababb810328c7d0cdb5c884ef9bd5b187b9e4929454278342d7ee5ef441cded'
'SKIP')
validpgpkeys=(
'436D66AB9A798425FDA0E3F801AF9F036B9355D0' # Nmap Project Signing Key (http://www.insecure.org/)
)
prepare() {
cd "${pkgname}-${pkgver}"
# ensure we build devendored deps
rm -rf liblua libpcap libpcre macosx mwin32 libssh2 libz
autoreconf -fiv
}
build() {
cd "${pkgname}-${pkgver}"
./configure \
--prefix=/usr \
--with-libpcap=/usr \
--with-libpcre=/usr \
--with-zlib=/usr \
--with-libssh2=/usr \
--with-liblua=/usr \
--without-ndiff \
--without-zenmap
make
}
check() {
cd "${pkgname}-${pkgver}"
make check
}
package() {
cd "${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install
install -Dm 644 README.md docs/nmap.usage.txt -t "${pkgdir}/usr/share/doc/${pkgname}"
install -Dm 644 LICENSE docs/3rd-party-licenses.txt -t "${pkgdir}/usr/share/licenses/${pkgname}"
}
# vim: ts=2 sw=2 et:
| true |
03a8f1c96ea1ae6484a4375cd04020a8c9500ca3 | Shell | Rflageolle/dotfiles | /dotfiles/.tmux/nowplaying.sh | UTF-8 | 515 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env bash
command -v osascript > /dev/null 2>&1 && osascript -e 'set track_str to ""
if application "iTunes" is running then
tell application "iTunes" to if player state is playing then set track_str to "♫ " & name of current track & " ♪ " & artist of current track & " ♫"
end if
if application "Spotify" is running then
tell application "Spotify" to if player state is playing then set track_str to "♫ " & name of current track & " ♪ " & artist of current track & " ♫"
end if
track_str'
| true |
9936bc7aeadbf944f519b36632b62e7b94525720 | Shell | Apodini/ApodiniIoTDeploymentProvider | /scripts/setup-IoT-runner.sh | UTF-8 | 1,508 | 3.125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # This source file is part of the Apodini Template open source project
#
# SPDX-FileCopyrightText: 2021 Paul Schmiedmayer and the project authors (see CONTRIBUTORS.md) <paul.schmiedmayer@tum.de>
#
# SPDX-License-Identifier: MIT
echo "Setting up Raspberry Pi for Deployment"
echo "Updating everything"
apt-get -q update && \
apt-get -q install -y \
binutils \
git \
gnupg2 \
libc6-dev \
libcurl4 \
libedit2 \
libgcc-9-dev \
libpython3.8 \
libsqlite3-0 \
libstdc++-9-dev \
libxml2 \
libz3-dev \
pkg-config \
tzdata \
zlib1g-dev \
&& rm -r /var/lib/apt/lists/*
echo "Downloading Swift"
curl -fsSL https://swift.org/builds/development/ubuntu2004-aarch64/swift-DEVELOPMENT-SNAPSHOT-2021-10-18-a/swift-DEVELOPMENT-SNAPSHOT-2021-10-18-a-ubuntu20.04-aarch64.tar.gz -o latest_toolchain.tar.gz https://swift.org/builds/development/ubuntu2004-aarch64/swift-DEVELOPMENT-SNAPSHOT-2021-10-18-a/swift-DEVELOPMENT-SNAPSHOT-2021-10-18-a-ubuntu20.04-aarch64.tar.gz.sig -o latest_toolchain.tar.gz.sig
echo "Verifying download"
curl -fSsL https://swift.org/keys/all-keys.asc | gpg --import -
gpg --batch --verify latest_toolchain.tar.gz.sig latest_toolchain.tar.gz
echo "Unpacking files"
tar --keep-old-files -xzf latest_toolchain.tar.gz --directory / --strip-components=1
chmod -R o+r /usr/lib/swift
rm -rf latest_toolchain.tar.gz.sig latest_toolchain.tar.gz
swift --version
if [ $? -eq 0 ]; then
echo "Swift installation successful"
else
echo "Swift installation failed"
fi
| true |
b9106ccb3356abbf77d90c9683ae55e576be9707 | Shell | treynr/linkd | /hpc/submit-calculate-ld.sh | UTF-8 | 1,581 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
## file: submit-calculate-ld.sh
## desc: Submit the calculate-ld.sh script to an HPC cluster.
## auth: TR
## Config file searching
if [[ -r "$HOME/.linkd.sh" ]]; then
source "$HOME/.linkd.sh"
elif [[ -r "../.linkd.sh" ]]; then
source "../.linkd.sh"
elif [[ -r "./.linkd.sh" ]]; then
source "./.linkd.sh"
else
echo "ERROR: the .linkd.sh configuration file is missing"
exit 1
fi
super=""
## cmd line processing
while :; do
case $1 in
-h | -\? | --help)
usage
exit
;;
--)
shift
break
;;
-?*)
echo "WARN: unknown option (ignored): $1" >&2
;;
*)
break
esac
shift
done
if [[ "$#" -lt 1 ]]; then
echo "ERROR: You need to supply a file containing a list of SNPs"
exit 1
fi
## SNP list. Each SNP in the list will be compared to all other SNPs on the
## same chromosome and LD calculated between each pairwise comparison.
snps="'$1'"
## Script being submitted to the HPC cluster
script="$SRC_DIR/calculate-ld.sh"
## Check to see what version of PBS/TORQUE is running.
## I have access to two clusters which run wildly different versions and this
## affects the job submission syntax.
version=$(
qstat --version 2>&1 |
sed -r -e ':a;$!{N;ba};s/[^0-9]*([0-9]+)\.([0-9]+)\.([0-9]+).*/\1/g'
)
## Old and busted
if [[ $version -lt 14 ]]; then
qsub -q batch -t 1-24 -v snps="$snps" "$script"
## New hotness
else
qsub -q batch -J 1-24 -v snps="$snps" "$script"
fi
| true |
2aa7768115e7dda0b593834551f6b2858b045ca2 | Shell | FedeGB/precios-cuidados-sisop | /grupo03/src/listener.sh | UTF-8 | 7,098 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#Variables:
CANTCICLOS=0
doCiclo=1
function on_die
{
doCiclo=0
}
trap 'on_die' SIGTERM SIGINT
#Valida que un usuario sea asociado
#Parámetros:
#$1 -> Nombre de usuario
#$2 -> "Y" si se quiere chequear que sea colaborador, cualquier otra cosa no chequea
#Retorna:
#0 Si el usuario es válido
#1 Si el usuario no existe
#2 Si el usuario no es colaborador
function usuario_es_asociado
{
declare local validationData=`cat "$GRUPO/$MAEDIR"/asociados.mae | grep "^[^;]*;[^;]*;$1;[0 | 1];[^@]*@[^@]*\.[^@]*$" | sed "s~^[^;]*;[^;]*;\($1\);\([0 | 1]\);[^@]*@[^@]*\.[^@]*\$~\1-\2~"`
declare local asociado=`echo "$validationData" | sed "s~^\($1\)-[0 | 1]\$~\1~"`
if [[ "$asociado" == "$1" ]]; then
if [[ "$2" == "Y" ]]; then
declare local colaborador=`echo "$validationData" | sed "s~^$1-\([0 | 1]\)\$~\1~"`
if [[ "$colaborador" == "" || "$colaborador" == 0 ]]; then
return 2
fi
fi
return 0
else
return 1
fi
}
#Determina si el archivo es una lista de compras o no
#Retorna 1 en caso verdadero 0 en caso contrario.
#Guarda en $prob por qué se rechazó (si corresponde "" sino)
function es_lista_compras
{
prob=""
declare local usuario=`echo "$1" | grep "^[^\.]*\.[^- ]\{3\}$" | sed 's~^\([^\.]*\)\.[^- ]\{3\}$~\1~'`
if [[ "$usuario" == "" ]]; then
prob="Formato invalido"
return 0
else
usuario_es_asociado "$usuario"
res=$?
if [[ $res -ne 0 ]]; then
prob="Asociado inexistente"
return 0
fi
fi
return 1
}
#Función que devuelve 1 si el parámetro 1 es mayor o igual al parámetro 2 y menor o igual al parámetro 3
#0 en caso contrario. Si ($2 <= $1 <= $3) => 1, sino 0
#$1 -> Parámetro a chequear
#$2 -> Cota menor
#$3 -> Cota mayor
function in_range
{
if [ $1 -lt $2 ];
then
return 0
elif [ $1 -gt $3 ];
then
return 0
else
return 1
fi
}
#Valida que la fecha pasada por parámetro esté entre 2014 01 01 (>) y el año, mes y día actual (<=)
#Formato de la fecha aaaammdd
#Devuelve 1 si la fecha es válida, 0 en caso contrario
function validar_fecha
{
if [[ `echo "$1" | wc -m` -ne 9 ]]; then return 0; fi;
declare local compValue=`echo $1 | grep "^[0-9]\{4\}\(\(\(01\|03\|05\|07\|08\|10\|12\)\(0[1-9]\|[12][0-9]\|3[01]\)\)\|\(\(04\|06\|09\|11\)\(0[1-9]\|[12][0-9]\|30\)\)\|02\(0[1-9]\|1[0-9]\|2[0-8]\)\)"`
if [[ "$compValue" == "" ]]; then return 0; fi;
if [ $1 -le 20140101 ]; then return 0; fi;
compValue=`echo $1 | grep "^[0-9]\{4\}[0-9]\{2\}[0-9]\{2\}$" | sed 's~^\([0-9]\{4\}\).*$~\1~'`
in_range $compValue 2014 `date +%Y`
if [ "$?" == 0 ]; then return 0; fi;
if [[ $compValue -lt `date +%Y` ]]; then return 1; fi;
compValue=`echo $1 | grep "^[0-9]\{4\}[0-9]\{2\}[0-9]\{2\}$" | sed 's~^[0-9]\{4\}\([0-9]\{2\}\).*$~\1~'`
in_range $compValue 01 `date +%m`
if [ "$?" == 0 ]; then return 0; fi;
if [[ $compValue -lt `date +%m` ]]; then return 1; fi;
compValue=`echo $1 | grep "^[0-9]\{4\}[0-9]\{2\}[0-9]\{2\}$" | sed 's~^[0-9]\{4\}[0-9]\{2\}\([0-9]\{2\}\)$~\1~'`
in_range $compValue 01 `date +%d`
if [ "$?" == 0 ]; then return 0; fi;
if [[ $compValue -lt `date +%d` ]]; then return 1; fi;
return 0
}
#Determina si el archivo es una lista de precios o no
#Retorna 1 en caso verdadero 0 en caso contrario
#Guarda en $prob por qué se rechazó (si corresponde "" sino)
function es_lista_precios
{
prob=""
declare local validationData=`echo "$1" | grep "^[^ ]*-[0-9]\{8\}\..*$" | sed 's~^[^ ]*-\([0-9]\{8\}\)\.\(.*\)$~\1-\2~'`
declare local fecha=`echo "$validationData" | sed "s~^\([0-9]\{8\}\)-.*$~\1~"`
validar_fecha $fecha
if [[ $? == 0 ]]; then
#eval "$2"="Fecha invalida"
prob="Fecha invalida"
return 0
fi
declare local colaborador=`echo "$validationData" | sed 's~^[0-9]\{8\}-\(.*\)$~\1~'`
usuario_es_asociado "$colaborador" "Y"
declare local res=$?
if [[ $res == 1 ]]; then
prob="Asociado inexistente"
return 0
elif [[ $res == 2 ]]; then
prob="Colaborador inexistente"
return 0
fi
return 1
}
#Chequea si hay archivos en $1 y en caso de haber dispara el proceso $2 si no se están ejecutando
#ni el proceso $2 ni el proceso $3
function disparar_proceso
{
declare local procName=`echo "$2" | tr [:lower:] [:upper:]`
if [[ `find "$1" -maxdepth 1 -type f | wc -l` -ne 0 ]]; then
#Si se está ejecutando $2 o $3 entonces pospongo la ejecución.
if [[ ! -z `pgrep "$2"` || ! -z `pgrep "$3"` ]]; then
logging.sh listener "Invocacion de $procName pospuesta para el proximo ciclo"
else
Start.sh listener -b "$2"
res=$?
declare local pid=$(pgrep "$2")
if [[ $res -ne 0 ]]; then
logging.sh listener "Invocacion de $procName pospuesta para el proximo ciclo"
else
logging.sh listener "$procName corriendo bajo el no.: $pid"
echo "$procName ejecutado, PID: $pid"
fi
fi
fi
}
#Si el ambiente no está inicializado salgo con error.
if [[ $ENVINIT -eq 0 ]]; then
logging.sh listener "Ambiente no inicializado" ERR
exit 1
fi
#Ciclo infinito
while [[ $doCiclo -eq 1 ]]; do
#Grabar en el log el nro de ciclo
CANTCICLOS=`expr $CANTCICLOS + 1`
logging.sh listener "Nro de Ciclo: $CANTCICLOS"
IFS=$'\n'
set -f
#Para cada archivo en $NOVEDIR ver que sea lista de compras o precios, sino rechazar
#Archivos con pinta de lista de compras
for arch in `ls -1 "$GRUPO/$NOVEDIR/" | grep "^[^\.]*\....$"`;
do
declare local str=`file "$GRUPO/$NOVEDIR/$arch" | sed 's-.*\(text\)$-\1-'`
if [[ "$str" != "text" ]]; then
Mover.sh "$GRUPO/$NOVEDIR/$arch" "$GRUPO/$RECHDIR/$arch" listener
logging.sh listener "Archivo rechazado: Tipo de archivo invalido"
continue
fi
es_lista_compras "$arch"
declare local res=$?
if [[ $res -eq 1 ]]; then
Mover.sh "$GRUPO/$NOVEDIR/$arch" "$GRUPO/$ACEPDIR/$arch" listener
else
Mover.sh "$GRUPO/$NOVEDIR/$arch" "$GRUPO/$RECHDIR/$arch" listener
logging.sh listener "Archivo rechazado: $prob"
fi
done
#Archivos con pinta de lista de precios
for arch in `ls -1 "$GRUPO/$NOVEDIR/" | grep "^[^\.]*-[^\.]*\..*$"`;
do
declare local str=`file "$GRUPO/$NOVEDIR/$arch" | sed 's-.*\(text\)$-\1-'`
if [[ "$str" != "text" ]]; then
Mover.sh "$GRUPO/$NOVEDIR/$arch" "$GRUPO/$RECHDIR/$arch" listener
logging.sh listener "Archivo rechazado: Tipo de archivo invalido"
continue
fi
declare local res
es_lista_precios "$arch"
echo $?
res=$?
if [[ $res -eq 1 ]]; then
Mover.sh "$GRUPO/$NOVEDIR/$arch" ""$GRUPO"/"$MAEDIR"/precios/$arch" listener
else
Mover.sh "$GRUPO/$NOVEDIR/$arch" "$GRUPO/$RECHDIR/$arch" listener
logging.sh listener "Archivo rechazado: $prob"
fi
done
#Rechazar archivos que no tengan pinta de nada
for arch in `ls -1 "$GRUPO/$NOVEDIR/" | grep -v "^[^\.]*-[^\.]*\..*$" | grep -v "^[^\.]*\....$"`;
do
Mover.sh "$GRUPO/$NOVEDIR/$arch" "$GRUPO/$RECHDIR/$arch" listener
logging.sh listener "Archivo rechazado: Estructura de nombre de archivo no identificada"
done
#Ver si hay que llamar a masterlist
disparar_proceso ""$GRUPO"/"$MAEDIR"/precios/" masterlist rating
#Ver si hay que llamar a rating
disparar_proceso ""$GRUPO"/"$ACEPDIR"/" rating masterlist
#Dormir
sleep 30
done
logging.sh listener "Fin de ejecución"
set +f
exit 0
| true |
2619daed12a59c76ed5bff864add2a16b4f7fdad | Shell | mdahamshi/my_scripts | /powerSaveToggle.sh | UTF-8 | 407 | 3.09375 | 3 | [] | no_license | #!/bin/bash
state=$(cat /sys/devices/system/cpu/cpu1/online)
if [ $state -eq 1 ]
then
for cpu in /sys/devices/system/cpu/cpu*/online
do
echo 0 > $cpu
done
echo "CPU 1,2,3 DOWN !"
notify-send "CPU 1,2,3 DOWN !"
else
for cpu in /sys/devices/system/cpu/cpu*/online
do
echo 1 > $cpu
done
echo "CPU 1,2,3 UP !"
notify-send "CPU 1,2,3 UP !"
fi
| true |
09c7131bc2ddd1bc23cb2b1d792cfbc958ad5637 | Shell | prabhujdacps/ansiblescripts | /Ansible_Connect.sh | UTF-8 | 2,800 | 3.8125 | 4 | [] | no_license | ansiblePlaybook=""
if [[ "$1" == "env" ]];then
pythonCommand="python3 scripts/cps_promotion/Ansible_Customer_Env.py $2 $3"
if [[ -f "$2" ]];then
$pythonCommand
else
echo "invalid file $2"
fi
exit 0
fi
if [[ "$1" == "--help" ]];then
echo "./Ansible_Connect.sh <InventoryFilePath> <Type> <Nodes> <Action(Optional)> <Tags(Optional)>"
exit 1
fi
validateHostFile(){
for serverName in $(echo $2 | tr "," "\n")
do
hostFile="$1/hosts.ini"
hostFileHomePath="$1"
hostVarFile=$hostFileHomePath"/host_vars/"$serverName".yaml"
if [[ ! -f "$hostVarFile" ]];then
echo "ERROR: Host file $hostVarFile is not present in inventory directory for Node=$serverName and file:$hostFileHomePath"
exit 1
fi
done
}
validateHostFile $1 $3
serverPlayBook="MRT_Install.yml"
applicationPlayBook="MRT_ApplicationDeploy.yml"
clusterPlaybook="MRT_Cluster.yml"
serverActionPlaybook="MRT_Action.yml"
exchangeAssetPlaybook="ExchangeUtil.yml"
util="roles/utils/utils.yml"
configPushPlaybook="CPS_ConfigPublish.yml"
armAPIPlaybook="ARM_APIManager.yml"
if [[ ! -f "${hostFile}" ]];then
echo "ERROR: Inventory file is not present in the ${hostFile}"
exit 1
fi
if [[ "$2" == "server" ]];then
ansiblePlaybook=$serverPlayBook
elif [[ "$2" == "application" ]];then
ansiblePlaybook=$applicationPlayBook
elif [[ "$2" == "cluster" ]];then
ansiblePlaybook=$clusterPlaybook
elif [[ "$2" == "server_action" ]];then
ansiblePlaybook=$serverActionPlaybook
elif [[ "$2" == "asset" ]];then
ansiblePlaybook=$exchangeAssetPlaybook
elif [[ "$2" == "util" ]];then
ansiblePlaybook=$util
elif [[ "$2" == "config" ]];then
ansiblePlaybook=$configPushPlaybook
elif [[ "$2" == "api" ]];then
ansiblePlaybook=$armAPIPlaybook
else
echo "ERROR: Pass valid argument of type[server/server_action/application/asset/util/config/api]"
exit 1
fi
echo "Ansible playbook :${ansiblePlaybook}"
AnsibleCommand="ansible-playbook -i ${hostFile} $ansiblePlaybook"
## Action validation with $4 /$5
if [[ "$2" != "server_action" ]];then
if [[ "$3" != "" ]];then
AnsibleCommand=$AnsibleCommand" -e $2=$3"
fi
if [[ "$4" == action* ]];then
AnsibleCommand=$AnsibleCommand" -e $2_$4"
fi
if [[ "$5" == action* ]];then
AnsibleCommand=$AnsibleCommand" -e $2_$5"
fi
if [[ "$4" == -t* ]];then
AnsibleCommand=$AnsibleCommand" $4"
fi
if [[ "$5" == -t* ]];then
AnsibleCommand=$AnsibleCommand" $5"
fi
else
if [[ "$4" == action* ]];then
AnsibleCommand=$AnsibleCommand" -e server=$3 -e mrt_$4"
else
echo " Please pass ./Ansible_Connect.sh <InventoryFilePath> server_action <servers> action=<>"
fi
fi
echo "Ansible command : $AnsibleCommand"
$AnsibleCommand
status=$?
if [ "$status" -eq 0 ]
then
echo "Ansible execution successful"
exit 0
else
echo "ERROR:${status} Ansible execution failed...!!!"
exit ${status}
fi
| true |
e5e06c85eb10539271efc6acbeeb96af33334f9f | Shell | bauricio/dots | /bootstrap_mac.sh | UTF-8 | 2,212 | 2.625 | 3 | [] | no_license | #!/bin/bash
#### Development basics ####
# xcode command line tools
if ! type rvm > /dev/null; then
curl -sSL https://get.rvm.io | bash
fi
if ! type brew > /dev/null; then
ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)"
fi
brew install git
brew tap phinze/cask
brew install brew-cask
#Download vagrant
brew cask install vagrant
# sublime
brew cask install sublime-text
# iterm2
brew cask install iterm2
# intellij
brew cask install intellij-idea
brew install node
brew install tree
brew install ack
brew install bash-completion
brew install ssh-copy-id
brew install wget
brew install selecta
# use solarize colors on iterm2
# set global shortcut for iterm2
# create bash and zsh profiles, where bash -l
# make (alt ->) jump words
# Caps->Control
# F keys as functions
defaults write com.apple.keyboard.fnState -boolean true
defaults write -g ApplePressAndHoldEnabled 0
DOTS_DIR="$HOME/.dots";
if [ ! -d "$DOTS_DIR" ]; then
git clone https://github.com/bauricio/dots $DOTS_DIR
$DOTS_DIR/rake install
fi
VIM_DIR="$HOME/.vim";
if [ ! -d "$VIM_DIR" ]; then
git clone https://github.com/bauricio/vim-files $VIM_DIR
$VIM_DIR/install.sh
fi
OH_MY_ZSH_DIR="$HOME/.oh-my-zsh"
if [ ! -d "$OH_MY_ZSH_DIR" ]; then
git clone https://github.com/bauricio/oh-my-zsh $OH_MY_ZSH_DIR
chsh -s /bin/zsh
fi
#### Productivity ####
# screen hero
brew cask install screenhero
# evernote
brew cask install evernote
# caffeine
brew cask install caffeine
# alfred
brew cask install alfred
# add cask to alfred search scope
brew cask alfred link
# change shortcut (remove spotlight default and replace by alfred)
/usr/libexec/PlistBuddy "$HOME/Library/Preferences/com.apple.symbolichotkeys.plist" -c 'Delete AppleSymbolicHotKeys:64' > /dev/null 2>&1
/usr/libexec/PlistBuddy "$HOME/Library/Preferences/com.apple.symbolichotkeys.plist" -c 'Add AppleSymbolicHotKeys:64:enabled bool false'
defaults write com.alfredapp.Alfred hotKey -int 64
# The Unarchiver
brew cask install the-unarchiver
# Dropbox
brew cask install dropbox
# set dock to hide and minify
# clear dock to have only:
#### Fun ####
brew cask install steam
brew cask install spotify
brew cask install simple-comic
| true |
78ff7db2b26b904ee184c808b6419399bb09ad6b | Shell | BioKom/tools | /translate_html_sondz.sh | UTF-8 | 1,473 | 3.390625 | 3 | [] | no_license | #!/bin/bash
#
# @author Betti Oesterholz
# @mail webmaster@BioKom.info
#
# Copyright (C) @c GPL3 2008 Betti Oesterholz
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License (GPL) as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this script If not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
#
# This Script converts german speciale characters to the html control
# sequence in all "*.html" and "*.htm" files in the actual directory.
#
# History:
# 23.10.2011 Oesterholz created
#
for datei in $(ls *.htm 2>/dev/null) $(ls *.html 2>/dev/null)
do
echo "Konverting file: ${datei}"
sed -e "s/\xe4/\ä/g" -e "s/\xf6/\ö/g" -e "s/\xfc/\ü/g" -e "s/\xc4/\Ä/g" -e "s/\xd6/\Ö/g" -e "s/\xdc/\Ü/g" -e "s/\xdf/\ß/g" \
-e "s/\xc3\xa4/\ä/g" -e "s/\xc3\xb6/\ö/g" -e "s/\xc3\xbc/\ü/g" -e "s/\xc3\x84/\Ä/g" -e "s/\xc3\x96/\Ö/g" -e "s/\xc3\x9c/\Ü/g" -e "s/\xc3\x9f/\ß/g" ${datei} > ${datei}.2
mv -f ${datei}.2 ${datei}
done
| true |
c661a5c25ceda4abeaf63493786523233cf994d3 | Shell | tnakaicode/jburkardt | /square_symq_rule/square_symq_rule.sh | UTF-8 | 301 | 2.828125 | 3 | [] | no_license | #!/bin/bash
#
cp square_symq_rule.hpp /$HOME/include
#
g++ -c -I/$HOME/include square_symq_rule.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling square_symq_rule.cpp"
exit
fi
#
mv square_symq_rule.o ~/libcpp/$ARCH/square_symq_rule.o
#
echo "Library installed as ~/libcpp/$ARCH/square_symq_rule.o"
| true |
cc7afb5835072da71bcc973cac83ddd9d032adae | Shell | Emile442/Epitech | /Tek1/CPE/Bsq/functional_tests.sh | UTF-8 | 1,062 | 3.109375 | 3 | [] | no_license | #!/bin/sh
echo "==========[BSQ_TESTS]=========="
echo "------[TEST CLEAN & INIT]------"
rm -rf tf_trace
mkdir tf_trace
echo "tmp_folder: OK"
rm trace.log
touch trace.log
echo "trace: OK"
echo " "
echo "-----------[BUILD]-------------"
make re
echo " "
echo "==========[TRACE_BSQ]==========" >> trace.log
echo $(date '+%d/%m/%Y %H:%M:%S') >> trace.log
echo " " >> trace.log
echo "------------[TRACE]------------"
echo " "
for file in maps-intermediate/mouli_maps/*; do
echo "------" >> trace.log
echo "------"
# timeout 20 ./bsq maps-intermediate/mouli_maps/${file##*/} > tf_trace/${file##*/}
timeout 60 ./bsq maps-intermediate/mouli_maps/${file##*/} > tf_trace/${file##*/}
if [[ $(diff tf_trace/${file##*/} maps-intermediate/mouli_maps_solved/${file##*/}) = "" ]]; then
echo "${file##*/}: SUCCESS!" >> trace.log
echo "${file##*/}: SUCCESS!"
else
echo "${file##*/}: ERROR!" >> trace.log
echo "${file##*/}: ERROR!"
fi
echo " " >> trace.log
echo " " >> trace.log
echo " "
echo " "
done | true |
bba481f80d4e4b98a85180b1877473096353d155 | Shell | standardgalactic/MRO | /src/scripts/tree.sh | UTF-8 | 630 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
#
# This simple CGI script helps create a tree browser for ONTIE
cd ../..
URL="http://example.com?${QUERY_STRING}"
ID=$(urlp --query --query_field=id "${URL}")
PROJECT=$(urlp --query --query_field=project-name "${URL}")
BRANCH=$(urlp --query --query_field=branch-name "${URL}")
# Check that the sqlite database exists
if ! [[ -s build/mro.db ]]; then
rm build/mro.db > /dev/null 2>&1
make build/mro.db > /dev/null 2>&1
fi
if [[ ${ID} ]]; then
python3 -m gizmos.tree build/mro.db ${ID}
else
python3 -m gizmos.tree build/mro.db
fi
echo "<a href=\"/${PROJECT}/branches/${BRANCH}\"><b>Return Home</b></a>" | true |
152419537ada4eb34757a03c4a4e4e82be2c56ae | Shell | zenblender/boom-waffle | /bin/0-boot-to-wifi.sh | UTF-8 | 3,960 | 4.46875 | 4 | [] | no_license | #!/bin/bash -el
script_path="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${script_path}/_shared-scripts.sh"
cleanup() {
local exit_code=$1
local previous_command=$BASH_COMMAND
[[ $exit_code -ne 0 ]] && echo "INFO: Script exited with code $exit_code from command $previous_command"
exit $exit_code
}
trap 'cleanup $?' EXIT
usage() {
yellow "usage: ./bin/0-boot-to-wifi.sh --ssid <ssid> --wifi-password <wifi-password> [--disk <disk to flash>]"
yellow "Flashes Raspbian Stretch to an SD card (optional), enables SSH, and configures wifi"
yellow "If you omit the --disk option, it will not flash the card (this assumes you have already flashed the SD card)"
yellow "Find your device with 'diskutil list'. It will be something like '/dev/disk2' (don't pick the wrong one!)"
}
get_cache_path() {
local component=$1
local cache_path="${script_path}/../.cache/${component}"
mkdir -p "${cache_path}"
# cyan "made cache path ${cache_path}"
printf "${cache_path}\n"
}
get_etcher_bin_path() {
printf "$(get_cache_path etcher)/etcher-cli-1.4.4-darwin-x64-dist/etcher"
}
ensure_etcher_cli_available() {
local etcher_cache_path="$(get_cache_path etcher)"
local etcher_tar_filename="etcher-cli-1.4.4-darwin-x64.tar.gz"
yellow "Ensuring etcher-cli is available"
if [[ ! -f "${etcher_cache_path}/${etcher_tar_filename}" ]]; then
yellow "Downloading etcher to ${etcher_cache_path}/${etcher_tar_filename}"
curl -Lo "${etcher_cache_path}/${etcher_tar_filename}" "https://github.com/resin-io/etcher/releases/download/v1.4.4/${etcher_tar_filename}"
fi
local etcher_bin_path="$(get_etcher_bin_path)"
if [[ ! -f "${etcher_bin_path}" ]]; then
yellow "Extracting etcher tar.gz to ${etcher_cache_path}"
tar -xzf "${etcher_cache_path}/${etcher_tar_filename}" -C "${etcher_cache_path}"
fi
if [[ -f "${etcher_bin_path}" ]]; then
green "Etcher is available at ${etcher_bin_path}"
fi
}
get_raspbian_img_path() {
local raspbian_cache_path="$(get_cache_path raspbian)"
printf "${raspbian_cache_path}/raspbian_lite_latest.zip"
}
download_raspbian() {
local raspbian_cache_path="$(get_cache_path raspbian)"
yellow "Downloading Raspbian image"
curl -Lo "$(get_raspbian_img_path)" https://downloads.raspberrypi.org/raspbian_lite_latest
}
ensure_raspbian_is_available() {
set +e
[[ ! -f "$(get_raspbian_img_path)" ]] && {
yellow "Raspbian is not downloaded"
download_raspbian
}
set -e
}
flash_sd_card() {
local etcher_cache_path="${script_path}/../.cache/etcher"
local etcher_bin_path="$(get_etcher_bin_path)"
local disk_id=$1
ensure_etcher_cli_available
ensure_raspbian_is_available
yellow "Flashing Raspbian to disk: $(cyan "${disk_id}")"
diskutil info "${disk_id}"
yellow "If this info looks correct, enter your password to continue"
sudo "${etcher_bin_path}" --drive "${disk_id}" "$(get_raspbian_img_path)"
}
[[ $# -lt 4 ]] && { usage; exit 1; }
# Parse options
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--ssid) ssid="$2"; shift; shift;;
--wifi-password) wifi_password="$2"; shift; shift;;
--disk) disk_to_flash="$2"; shift; shift;;
*) echo "Unknown option: $1"; usage; exit 1;;
esac
done
[[ -n $ssid ]] && [[ -n $wifi_password ]] || { usage; exit 1; }
[[ -n "${disk_to_flash}" ]] && {
flash_sd_card "${disk_to_flash}"
green "Finished flashing SD card"
yellow "Enter your password to mount the disk and run the playbook"
sudo diskutil mountDisk "${disk_to_flash}"
}
cd $script_path/../ansible
yellow "Running playbook boot-to-wifi.yml"
ansible-playbook -i '127.0.0.1,' --extra-vars "ssid='${ssid}' wifi_password='${wifi_password}'" boot-to-wifi.yml
green "Finished running playbook boot-to-wifi.yml"
if [[ -n "${disk_to_flash}" ]]; then
yellow "Enter your password to unmount the disk"
sudo diskutil unmountDisk "${disk_to_flash}"
green "Device ${disk_to_flash} was unmounted. You can remove it now"
fi
| true |
3c4ce0b84650d91021b00a1be00ca09828f519cd | Shell | zchee/go-zsh-completions | /src/_go | UTF-8 | 41,282 | 3.0625 | 3 | [] | no_license | #compdef go
# ------------------------------------------------------------------------------
# Copyright (c) 2016 Github zsh-users - http://github.com/zsh-users
# Copyright (c) 2013-2015 Robby Russell and contributors (see
# https://github.com/robbyrussell/oh-my-zsh/contributors)
# Copyright (c) 2010-2014 Go authors
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the zsh-users nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL ZSH-USERS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
# Description
# -----------
#
# Completion script for go 1.5 (http://golang.org).
#
# ------------------------------------------------------------------------------
# Authors
# -------
#
# * Mikkel Oscar Lyderik <mikkeloscar@gmail.com>
# * oh-my-zsh authors:
# https://github.com/robbyrussell/oh-my-zsh/blob/master/plugins/golang/golang.plugin.zsh
# * Go authors
#
# ------------------------------------------------------------------------------
#
# go.googlesource.com/go
#
# ------------------------------------------------------------------------------
typeset -A opt_args
_go() {
local -a commands
local -a __build_flags
local -a __test_build_flags __test_binary_flags __test_profile_flags
commands=(
'build:compile packages and dependencies'
'clean:remove object files'
'doc:show documentation for package or symbol'
'env:print Go environment information'
'fix:run go tool fix on packages'
'fmt:run gofmt on package sources'
'generate:generate Go files by processing source'
'get:download and install packages and dependencies'
'install:compile and install packages and dependencies'
'list:list packages'
'run:compile and run Go program'
'test:test packages'
'tool:run specified go tool'
'version:print Go version'
'vet:run go tool vet on packages'
'help:get more information about a command'
)
_go_files() {
_files -g "*.go(-.)"
}
__go_packages() {
local gopaths
declare -a gopaths
gopaths=("${(s/:/)$(go env GOPATH)}")
gopaths+=("$(go env GOROOT)")
for p in $gopaths; do
_alternative ':go packages:_path_files -W "$p/src" -/'
done
_alternative '*:go file:_go_files'
}
__build_flags() {
_arguments \
'-a[force rebuilding of packages that are already up-to-date]' \
'-n[print the commands but do not run them]' \
'-p[number of builds that can be run in parallel]:number' \
'-race[enable data race detection]' \
'-v[print the names of packages as they are compiled]' \
'-work[print temporary work directory and keep it]' \
'-x[print the commands]' \
'-asmflags=[arguments for each go tool asm invocation]: :->asmflags' \
'-buildmode=[build mode to use]: :->buildmode' \
'-compiler[name of compiler to use]:name' \
'-gccgoflags[arguments for gccgo]:args' \
'-gcflags=[arguments to pass on each go tool compile invocation]: :->gcflags' \
'-installsuffix[suffix to add to package directory]:suffix' \
'-ldflags=[arguments to pass on each go tool link invocation]: :->ldflags' \
'-linkshared[link against shared libraries]' \
'-pkgdir[install and load all packages from dir]:dir' \
'-tags=[list of build tags to consider satisfied]:tags' \
'-toolexec[program to use to invoke toolchain programs]:args' \
'-debug-actiongraph[Undocumented, unstable debugging flags. action graph]' \
'-debug-deprecated-importcfg[Undocumented, unstable debugging flags. deprecated import config]'
case $state in
asmflags)
local -a __asm_flags
__asm_flags=(
'-D[predefined symbol with optional simple value -D=identifier=value; can be set multiple times]:value'
'-I[include directory; can be set multiple times]:value'
'-S[print assembly and machine code]'
'-debug[dump instructions as they are parsed]'
'-dynlink[support references to Go symbols defined in other shared libraries]'
'-e[no limit on number of errors reported]'
'-o[output file; default foo.o for /a/b/c/foo.s as first argument]:string'
'-shared[generate code that can be linked into a shared library]'
'-trimpath[remove prefix from recorded source file paths]:string'
)
_values \
'asmflags' \
${__asm_flags[@]}
;;
buildmode)
local -a __buildmode
__buildmode=(
'archive[Build the listed non-main packages into .a files]'
'c-archive[Build the listed main package, plus all packages it imports, into a C archive file]'
'c-shared[Build the listed main packages, plus all packages that they import, into C shared libraries]'
'default[Listed main packages are built into executables and listed non-main packages are built into .a files]'
'shared[Combine all the listed non-main packages into a single shared library that will be used when building with the -linkshared option]'
'exe[Build the listed main packages and everything they import into executables]'
'pie[Build the listed main packages and everything they import into position independent executables (PIE)]'
'plugin[Build the listed main packages, plus all packages that they import, into a Go plugin]'
)
_values \
'buildmode' \
${__buildmode[@]}
;;
gcflags)
local -a __gcflags
__gcflags=(
'-%[debug non-static initializers]'
'-+[compiling runtime]'
'-B[disable bounds checking]'
'-C[disable printing of columns in error messages]'
'-D[set relative path for local imports]:path:_files'
'-E[debug symbol export]'
'-I[add directory to import search path]:directory:_directories'
'-K[debug missing line numbers]'
'-N[disable optimizations]'
'-S[print assembly listing]'
'-V[print compiler version]'
'-W[debug parse tree after type checking]'
'-asmhdr[write assembly header to file]:file:_files'
'-bench[append benchmark times to file]:file:_files'
'-blockprofile[write block profile to file]:file:_files'
'-buildid[record id as the build id in the export metadata]:id'
'-c[concurrency during compilation, 1 means no concurrency (default 1)]:num concurrency'
'-complete[compiling complete package (no C or assembly)]'
'-cpuprofile[write cpu profile to file]:file:_files'
'-d[print debug information about items in list; try -d help]:list'
'-dolinkobj[generate linker-specific objects; if false, some invalid code may compile (default true)]'
'-dwarf[generate DWARF symbols (default true)]'
'-dynlink[support references to Go symbols defined in other shared libraries]'
'-e[no limit on number of errors reported]'
'-f[debug stack frames]'
'-goversion[required version of the runtime]:go version'
'-h[halt on error]'
'-i[debug line number stack]'
'-importcfg[read import configuration from file]:file:_files'
'-importmap[add definition of the form source=actual to import map]:definition'
'-installsuffix[set pkg directory suffix]:suffix'
'-j[debug runtime-initialized variables]'
'-l[disable inlining]'
'-linkobj[write linker-specific object to file]:file:_files'
'-live[debug liveness analysis]'
'-m[print optimization decisions]'
'-memprofile[write memory profile to file]:file:_files'
'-memprofilerate[set runtime.MemProfileRate to rate]:rate'
'-msan[build code compatible with C/C++ memory sanitizer]'
'-mutexprofile[write mutex profile to file]:file:_files'
'-nolocalimports[reject local (relative) imports]'
'-o[write output to file]:file:_files'
'-p[set expected package import path]:path:_files'
'-pack[write package file instead of object file]'
'-r[debug generated wrappers]'
'-race[enable race detector]'
'-s[warn about composite literals that can be simplified]'
'-shared[generate code that can be linked into a shared library]'
'-std[compiling standard library]'
'-traceprofile[write an execution trace to file]:file:_files'
'-trimpath[remove prefix from recorded source file paths]:prefix'
'-u[reject unsafe code]'
'-v[increase debug verbosity]'
'-w[debug type checking]'
'-wb[enable write barrier (default true)]'
)
_values \
'gcflags' \
${__gcflags[@]}
;;
ldflags)
local -a __ldflags
__ldflags=(
'-B[add an ELF NT_GNU_BUILD_ID note when using ELF]:note'
'-C[check Go calls to C code]'
'-D[set data segment address (default -1)]:address'
'-E[set entry symbol name]:entry'
'-H[set header type]:type'
'-I[use linker as ELF dynamic linker]:linker'
'-L[add specified directory to library path]:directory:_path_files -/'
'-R[set address rounding quantum (default -1)]:quantum'
'-T[set text segment address (default -1)]:address'
'-V[print version and exit]'
'-X[add string value definition of the form importpath.name=value]:definition'
'-a[disassemble output]'
'-buildid[record id as Go toolchain build id]:id'
'-buildmode[set build mode]:mode:(archive c-archive c-shared default shared exe pie)'
'-c[dump call graph]'
'-cpuprofile[write cpu profile to file]:file:_files'
'-d[disable dynamic executable]'
'-dumpdep[dump symbol dependency graph]'
'-extar[archive program for buildmode=c-archive]:string'
'-extld[use linker when linking in external mode]:linker'
'-extldflags[pass flags to external linker]:flags'
'-f[ignore version mismatch]'
'-g[disable go package data checks]'
'-h[halt on error]'
'-installsuffix[\[suffix\]: set package directory suffix]:suffix'
'-k[set field tracking symbol]:symbol'
'-libgcc[compiler support lib for internal linking; use "none" to disable]:string'
'-linkmode[set link mode (internal, external, auto)]:mode(internal external auto)'
'-linkshared[link against installed Go shared libraries]'
'-memprofile[write memory profile to file]:file:_files'
'-memprofilerate[set runtime.MemProfileRate to rate]:rate'
'-msan[enable MSan interface]'
'-n[dump symbol table]'
'-o[write output to file]:file:_files'
'-r[set the ELF dynamic linker search path to dir1:dir2:...]:path:_path_files'
'-race[enable race detector]'
'-s[disable symbol table]'
'-shared[generate shared object (implies -linkmode external)]'
'-tmpdir[use directory for temporary files]:directory:_path_files -/'
'-u[reject unsafe packages]'
'-v[print link trace]'
'-w[disable DWARF generation]'
)
_values \
'ldflags' \
${__ldflags[@]}
;;
esac
}
__test_build_flags=(
'-args[Pass the remainder of the command line to the test binary]'
'-c[Compile the test binary to pkg.test but do not run it]'
"-exec=[Run the test binary using 'xprog']:xprog"
"-json[Convert test output to JSON suitable for automated processing. See 'go doc test2json' for the encoding details]"
'-i[Install packages that are dependencies of the test]'
'-o[Compile the test binary to the named file]:binary file name:_files'
'-vet[Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks]:list'
)
# TODO(zchee): Support for regexp keyword
__test_binary_flags=(
"-bench[Run (sub)benchmarks matching a regular expression]:regexp of Benchmark functions:(.)"
'-benchmem[Print memory allocation statistics for benchmarks]'
'-count[Run each test and benchmark n times (default: 1)]:count'
'-cover[Enable coverage analysis]'
'-covermode[Set the mode for coverage analysis for the packages being tested (default: set)]:(set count atomic)'
'-coverpkg[Apply coverage analysis in each test to the given list of packages]: :__go_packages'
'-cpu[Specify a list of GOMAXPROCS values for which the tests or benchmarks should be executed]:(1 2 4)'
'-parallel=[Allow parallel execution of test functions that call t.Parallel]:number of parallel'
'-run[Run only those tests and examples matching the regular expression]:regexp of Tests or Examples'
'-short[Tell long-running tests to shorten their run time]'
'-timeout[If a test runs longer than arg time, panic (default: 10m)]:timeout (default: 10m)'
'-v[output log all tests as they are run and print all text from Log and Logf]'
)
__test_profile_flags=(
'-benchtime[Run enough iterations of each benchmark to take arg time]:bench time (specified as a time.Duration: e.g. 1h30s)'
'-blockprofile[Write a goroutine blocking profile to the specified file]:profile file path:_files'
'-blockprofilerate[Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate]:block profile rate'
'-coverprofile[Write a coverage profile to the file after all tests have passed]:coverage profile file path:_files'
'-cpuprofile[Write a CPU profile to the specified file before exiting]:cpu profile file path:_files'
'-memprofile[Write a memory profile to the file after all tests have passed]:memory profile file:_files'
'-memprofilerate[Enable more precise memory profiles by setting runtime.MemProfileRate]:memory profile rate'
'-mutexprofile[Enable more precise (and expensive) memory profiles]:mutex profile file:_files'
'-mutexprofilefraction[Sample 1 in n stack traces of goroutines holding a contended mutex]:mutex fraction'
'-outputdir[Place output files from profiling in the specified directory]:output directory:_path_files -/'
'-trace[Write an execution trace to the specified file before exiting]:output trace file path:_files'
)
_arguments \
"1: :{_describe 'command' commands}" \
'*:: :->args'
case $state in
args)
case $words[1] in
build)
_arguments \
'-o[force build to write to named output file]:file:_files' \
'-i[installs the packages that are dependencies of the target]' \
'*: :__go_packages'
_alternative ':build flags:__build_flags'
;;
clean)
_arguments \
'-cache[clean to remove the entire go build cache, in addition to cleaning specified packages (if any)]' \
'-testcache[clean to expire all test results in the go build cache]' \
'-i[remove corresponding installed archive or binary]' \
'-r[apply clean recursively on all dependencies]' \
'*:go packages:__go_packages'
_alternative ':build flags:__build_flags'
;;
doc)
_arguments \
'-c[respect case when matching symbols]' \
'-cmd[treat a command (package main) like a regular package]' \
'-u[show docs for unexported and exported symbols and methods]' \
'*: :__go_packages'
;;
env)
local -a _envs
_envs=(
'GOARCH:The architecture, or processor, for which to compile code.'
"GOBIN:The directory where 'go install' will install a command."
'GOCACHE:The directory where the go command will store cached information for reuse in future builds.'
'GOEXE:It’s the value of the executable suffix. It’s set automatically in build.go.'
'GOHOSTARCH:The name of the host operating system.'
'GOHOSTOS:The name of the compilation architecture.'
'GOOS:The operating system for which to compile code.'
'GOPATH:Specifies the location of your workspace.'
'GORACE:Options for the race detector.'
'GOROOT:The root of the go tree.'
'GOTMPDIR:The directory where the go command will write temporary source files, packages, and binaries.'
'GOTOOLDIR:Use by things that want to get to the tool dir. It is explicitly NOT something that is pulled from the environment.'
"GCCGO:The gccgo command to run for 'go build -compiler=gccgo'."
'CC:The command to use to compile C code.'
'CXX:The command to use to compile C++ code.'
'CGO_ENABLED:Whether the cgo command is supported. Either 0 or 1.'
'CGO_CFLAGS:Flags that cgo will pass to the compiler when compiling C code.'
'CGO_CPPFLAGS:Flags that cgo will pass to the compiler when compiling C or C++ code.'
'CGO_CXXFLAGS:Flags that cgo will pass to the compiler when compiling C++ code.'
'CGO_FFLAGS:Flags that cgo will pass to the compiler when compiling Fortran code.'
'CGO_LDFLAGS:Flags that cgo will pass to the compiler when linking.'
'PKG_CONFIG:Path to pkg-config tool.'
'GOGCCFLAGS:Flags that gccgo command.'
)
_arguments \
"*: :{_describe 'envs' _envs}" \
;;
fix)
_arguments \
'*: :__go_packages'
;;
fmt)
_arguments \
'-n[prints commands that would be executed]' \
'-x[prints commands as they are executed]' \
'*: :__go_packages'
;;
generate)
_arguments \
'-run=[specifies a regular expression to select directives]:regex' \
'-x[print the commands]' \
'-n[print the commands but do not run them]' \
'-v[print the names of packages as they are compiled]' \
"*:args:{ _alternative ': :__go_packages' _files }"
;;
get)
_arguments \
'-d[instructs get to stop after downloading the packages]' \
'-f[force get -u not to verify that each package has been checked from vcs]' \
'-fix[run the fix tool on the downloaded packages]' \
'-insecure[permit fetching/resolving custom domains]' \
'-t[also download the packages required to build tests]' \
'-u[use the network to update the named packages]' \
'*: :__go_packages'
_alternative ':build flags:__build_flags'
;;
install)
_arguments \
'-i[installs the dependencies of the named packages as well]' \
'*: :__go_packages'
_alternative ':build flags:__build_flags'
;;
list)
_arguments \
'-e[changes the handling of erroneous packages]' \
'-f[specifies an alternate format for the list]:format' \
'-json[causes package data to be printed in JSON format]' \
"*:go file:{ _alternative ': :__go_packages' _files }"
_alternative ':build flags:__build_flags'
;;
run)
_arguments \
'-exec[invoke the binary using xprog]:xporg' \
'1:go run file:_go_files'
_alternative ':build flags:__build_flags'
;;
test)
_arguments \
${__test_build_flags[@]} \
${__test_binary_flags[@]} \
${__test_profile_flags[@]} \
'*: :__go_packages'
_alternative ':build flags:__build_flags'
;;
tool)
local -a tools
tools=(
'addr2line:minimal simulation of the GNU addr2line tool'
'api:computes the exported API of a set of Go packages'
'asm:assembles the source file into an object file'
'buildid:Buildid displays or updates the build ID stored in a Go package or binary. By default, buildid prints the build ID found in the named file'
'cgo:enables the creation of Go packages that call C code'
'compile:compiles a single Go package comprising the files named on the command line'
'cover:analyzing the coverage profiles generated by go test -coverprofile'
'dist:bootstrapping tool for the Go distribution'
'doc:Show the documentation for the package, symbol, and method'
'fix:finds Go programs that use old APIs and rewrites them to use newer ones'
'link:reads the Go archive or object for a package, and combines them into an executable binary'
'nm:lists the symbols defined or used by an object file, archive, or executable'
'objdump:disassembles executable files'
'pack:simple version of the traditional Unix ar tool'
'pprof:interprets and displays profiles of Go programs'
'trace:viewing trace files'
'vet:examines Go source code and reports suspicious constructs'
)
_arguments \
'-n[print command that would be executed]' \
"1: :{_describe 'tool' tools}" \
'*:: :->args'
case $state in
args)
case $words[1] in
addr2line)
_arguments \
'*:binary:_object_files'
;;
api)
_arguments \
'-allow_new[allow API additions (default true)]' \
'-c[optional comma-separated filename(s) to check API against]:string' \
'-contexts[optional comma-separated list of <goos>-<goarch>[-cgo] to override default contexts]:string' \
'-except[optional filename of packages that are allowed to change without triggering a failure in the tool]:string' \
'-next[optional filename of tentative upcoming API features for the next release]:string' \
'-v[verbose debugging]'
;;
asm)
_arguments \
${__asm_flags[@]}
;;
buildid)
_arguments \
'-w[rewrites the build ID found in the file to accurately record a content hash of the file]:_files'
;;
cgo)
_arguments \
'-debug-define[print relevant #defines]' \
'-debug-gcc[print gcc invocations]' \
'-dynimport[if non-empty, print dynamic import data for that file]:output filename (string)' \
'-dynlinker[record dynamic linker information in -dynimport mode]' \
'-dynout[write -dynimport output to this file]:output filename (string)' \
'-dynpackage[set Go package for -dynimport output]:string' \
'-exportheader[where to write export header if any exported functions]:export header filename (string)' \
'-gccgo[generate files for use with gccgo]' \
'-gccgopkgpath[-fgo-pkgpath option used with gccgo]:-fgo-pkgpath (string)'\
'-gccgoprefix[-fgo-prefix option used with gccgo]:-fgo-prefix (string)' \
'-godefs[for bootstrap: write Go definitions for C file to standard output]' \
'-import_runtime_cgo[import runtime/cgo in generated code]' \
'-import_syscall[import syscall in generated code]' \
'-importpath[import path of package being built]:import path (string)' \
'-objdir[object directory]:directory path (string)' \
'-srcdir[source directory]:directory path (string)' \
'*:go file:_files -g "*.go(-.)"'
;;
compile)
_arguments \
${__gcflags[@]} \
'*:file:_path_files -g "*.go"'
;;
cover)
if (( CURRENT == 2 )); then
_arguments \
'-func=[output coverage profile information for each function]:string' \
'-html=[generate HTML representation of coverage profile]:file:_files' \
'-mode=[coverage mode]:mode:(set count atomic)'
fi
_arguments \
'-o[file for output]:file:_files' \
'-var=[name of coverage variable to generate]:coverage var name' \
'*:file:_path_files -g "*.go"'
;;
#----------------------------
doc)
_arguments \
'-c[respect case when matching symbols]' \
'-cmd[treat a command (package main) like a regular package]' \
'-u[show docs for unexported and exported symbols and methods]' \
;;
fix)
_arguments \
'-diff[display diffs instead of rewriting files]' \
'-force[force fixes to run even if the code looks updated]:string' \
'-r[restrict the rewrites]:string' \
'*:files:_files'
;;
link)
_arguments \
'-B[add an ELF NT_GNU_BUILD_ID note when using ELF]:note' \
'-C[check Go calls to C code]' \
'-D[set data segment address (default -1)]:address' \
'-E[set entry symbol name]:entry' \
'-H[set header type]:type' \
'-I[use linker as ELF dynamic linker]:linker' \
'-L[add specified directory to library path]:directory' \
'-R[set address rounding quantum (default -1)]:quantum' \
'-T[set text segment address (default -1)]:address' \
'-V[print version and exit]' \
'-W[disassemble input]' \
'-X[add string value definition]:definition' \
'-a[disassemble output]' \
'-buildid[record id as Go toolchain build id]:id' \
'-buildmode[set build mode]:mode' \
'-c[dump call graph]' \
'-cpuprofile[write cpu profile to file]:file' \
'-d[disable dynamic executable]' \
'-extld[use linker when linking in external mode]:linker' \
'-extldflags[pass flags to external linker]:flags' \
'-f[ignore version mismatch]' \
'-g[disable go package data checks]' \
'-h[halt on error]' \
'-installsuffix[set package directory suffix]:suffix' \
'-k[set field tracking symbol]:symbol' \
'-linkmode[set link mode]:mode:(internal external auto)' \
'-linkshared[link against installed Go shared libraries]' \
'-memprofile[write memory profile to file]:file' \
'-memprofilerate[set runtime.MemProfileRate to rate]:rate' \
'-n[dump symbol table]' \
'-o[write output to file]:file' \
'-r[set the ELF dynamic linker search path to dir1:dir2:...]:path' \
'-race[enable race detector]' \
'-s[disable symbol table]' \
'-shared[generate shared object (implies -linkmode external)]' \
'-tmpdir[use directory for temporary files]:directory' \
'-u[reject unsafe packages]' \
'-v[print link trace]' \
'-w[disable DWARF generation]' \
'*:files:_files'
;;
objdump)
_arguments \
'-s[only dump symbols matching this regexp]:regexp' \
'*:files:_files'
;;
pack)
_arguments '1:ops:(c p r t x)' '::verbose:(v)' ':files:_files'
;;
pprof)
local -a output_format output_file_parameter output_granularity comparison sorting
local -a dynamic_profile profile_trimming sample_index sample_heap sample_contention
local -a filtering miscellaneous
# Output format (only set one)
output_format=(
'(-disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-callgrind[Outputs a graph in callgrind format]'
'(-callgrind -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-disasm=[Output annotated assembly for functions matching regexp or address]:functions regexp pattern'
'(-callgrind -disasm -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-dot[Outputs a graph in DOT format]'
'(-callgrind -disasm -dot -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-eog[Visualize graph through eog]'
'(-callgrind -disasm -dot -eog -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-evince[Visualize graph through evince]'
'(-callgrind -disasm -dot -eog -evince -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-gif[Outputs a graph image in GIF format]'
'(-callgrind -disasm -dot -eog -evince -gif -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-gv[Visualize graph through gv]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-list=[Output annotated source for functions matching regexp]:functions regexp pattern'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -peak -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-pdf[Outputs a graph in PDF format]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -png -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-peek=[Output callers/callees of functions matching regexp]:functions regexp pattern'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -proto -ps -raw -svg -tags -text -top -tree -web -weblist)-png[Outputs a graph image in PNG format]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -ps -raw -svg -tags -text -top -tree -web -weblist)-proto[Outputs the profile in compressed protobuf format]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -raw -svg -tags -text -top -tree -web -weblist)-ps[Outputs a graph in PS format]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -svg -tags -text -top -tree -web -weblist)-raw[Outputs a text representation of the raw profile]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -tags -text -top -tree -web -weblist)-svg[Outputs a graph in SVG format]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -text -top -tree -web -weblist)-tags[Outputs all tags in the profile]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -top -tree -web -weblist)-text[Outputs top entries in text form]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -tree -web -weblist)-top[Outputs top entries in text form]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -web -weblist)-tree[Outputs a text rendering of call graph]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -weblist)-web[Visualize graph through web browser]'
'(-callgrind -disasm -dot -eog -evince -gif -gv -list -pdf -peak -png -proto -ps -raw -svg -tags -text -top -tree -web)-weblist=[Output annotated source in HTML for functions matching regexp or address]:functions regexp pattern'
)
output_file_parameter=(
'-output:Generate output on file f (stdout by default)'
)
# Output granularity (only set one)
output_granularity=(
'(-files -lines -addresses)-functions[Report at function level \[default\]]'
'(-functions -lines -addresses)-files[Report at source file level]'
'(-functions -files -addresses)-lines[Report at source line level]'
'(-functions -files -lines)-addresses[Report at address level]'
)
comparison=(
'-base:Show delta from this profile'
'-drop_negative:Ignore negative differences'
)
sorting=(
'-cum:Sort by cumulative data'
)
dynamic_profile=(
'-seconds:Length of time for dynamic profiles'
)
profile_trimming=(
'-nodecount:Max number of nodes to show'
'-nodefraction:Hide nodes below <f>*total'
'-edgefraction:Hide edges below <f>*total'
)
sample_index=(
'-sample_index:Index of sample value to display'
'-mean:Average sample value over first value'
)
sample_heap=(
'-inuse_space:Display in-use memory size'
'-inuse_objects:Display in-use object counts'
'-alloc_space:Display allocated memory size'
'-alloc_objects:Display allocated object counts'
)
sample_contention=(
'-total_delay:Display total delay at each region'
'-contentions:Display number of delays at each region'
'-mean_delay:Display mean delay at each region'
)
filtering=(
'-runtime:Show runtime call frames in memory profiles'
'-focus:Restricts to paths going through a node matching regexp'
'-ignore:Skips paths going through any nodes matching regexp'
'-tagfocus:Restrict to samples tagged with key:value matching regexp'
'-tagignore:Discard samples tagged with key:value matching regexp'
)
miscellaneous=(
'-call_tree:Generate a context-sensitive call tree'
'-unit:Convert all samples to unit u for display'
'-divide_by:Scale all samples by dividing them by f'
'-buildid:Override build id for main binary in profile'
'-tools:Search path for object-level tools'
'-help:This message'
)
_arguments \
'1:target binary:_files' \
'*:profile data:_files' \
$output_format \
$output_granularity
_describe -t output_file_parameter "Output file parameters (for file-based output formats):" output_file_parameter
# _describe -t output_granularity "Output granularity (only set one):" output_granularity
_describe -t comparison "Comparison options:" comparison
_describe -t sorting "Output options:" sorting
_describe -t dynamic_profile "Dynamic profile options:" dynamic_profile
_describe -t profile_trimming "Profile trimming options:" profile_trimming
_describe -t sample_index "Sample value selection option (by index):" sample_index
_describe -t sample_heap "Sample value selection option (for heap profiles):" sample_heap
_describe -t sample_contention "Sample value selection option (for contention profiles):" sample_contention
_describe -t filtering "Filtering options:" filtering
_describe -t miscellaneous "miscellaneous:" miscellaneous
;;
trace)
_arguments \
'-http=[HTTP service address]:addr' \
'*:files:_files'
;;
vet)
_arguments \
'-all[check everything]' \
'-asmdecl[check assembly against Go declarations]' \
'-assign[check for useless assignments]' \
'-atomic[check for common mistaken usages of the sync/atomic]' \
'-bool[check for mistakes involving boolean operators]' \
'-buildtags[check that +build tags are valid]' \
'-composites[check that composite literals used field-keyed elements]' \
'-compositewhitelist[use composite white list]' \
'-copylocks[check that locks are not passed by value]' \
'-methods[check that canonically named methods are canonically defined]' \
'-nilfunc[check for comparisons between functions and nil]' \
'-printf[check printf-like invocations]' \
'-printfuncs[print function names to check]:string' \
'-rangeloops[check that range loop variables are used correctly]' \
'-shadow[check for shadowed variables]' \
'-shadowstrict[whether to be strict about shadowing]' \
'-shift[check for useless shifts]' \
'-structtags[check that struct field tags have canonical format]' \
'-tags[list of build tags to apply when parsing]:list' \
'-test[for testing only: sets -all and -shadow]' \
'-unreachable[check for unreachable code]' \
'-unsafeptr[check for misuse of unsafe.Pointer]' \
'-unusedfuncs[list of functions whose results must be used]:string' \
'-unusedresult[check for unused result of calls to functions in -unusedfuncs]' \
'-unusedstringmethods[list of methods whose results must be used]:string' \
'-v[verbose]' \
'*:files:_files'
;;
yacc)
_arguments \
'-o[output]:output' \
'-v[parsetable]:parsetable' \
'*:files:_files'
;;
esac
;;
esac
;;
vet)
_values \
'vet flags' \
'-n[print commands that would be executed]' \
'-x[prints commands as they are executed]' \
'*:build flags:__go_packages'
_alternative ':build flags:__build_flags'
;;
help)
local -a topics
topics=(
'c:calling between Go and C'
'buildmode:description of build modes'
'filetype:file types'
'gopath:GOPATH environment variable'
'environment:environment variables'
'importpath:import path syntax'
'packages:description of package lists'
'testflag:description of testing flags'
'testfunc:description of testing functions'
)
_arguments "1: :{_describe 'command' commands -- topics}"
;;
esac
;;
esac
}
_go
# vim:ft=zsh:sts=2:sw=2:ts=2:et
| true |
d59e7bcab5d577a2c93a86b64dff2ae2fc073d5f | Shell | Wall404/ENV-INSTALL | /install-enviroment.sh | UTF-8 | 1,032 | 2.71875 | 3 | [] | no_license | #!/bin/bash
echo -e "\e[34m Enviroment Installation \e[39m"
apt-get update
apt-get upgrade -y
echo -e "\e[34m Install Docker \e[39m"
apt install docker.io -y
systemctl enable docker
$ groupadd docker
$ usermod -aG docker ${USER}
# su -s ${USER}
echo -e "\e[34m Install Kind \e[39m"
git clone https://github.com/kubernetes-sigs/kind.git $HOME/kind
apt install make -y
make -C $HOME/kind build
# export PATH=$PATH:$HOME/kind/bin/
echo -e "\e[34m Run python file \e[39m"
python3 ./pathsetter.py
echo -e "\e[34m Install Kubernetes \e[39m"
apt install curl -y
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add
apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
apt install kubeadm -y
echo -e "\e[34m Check kubeadm version \e[39m"
kubeadm version
echo -e "\e[34m Disable swap memory \e[39m"
swapoff -a
echo -e "\e[34m Run deploy example \e[39m"
sh example-deploy.sh
# En caso de error con VirtualBox
# correr este comando en PowerShell
# bcdedit /set hypervisorlaunchtype off | true |
21fc8865fd79c3ab80790cd30e6caf8c6b6e307c | Shell | utk231/CircleCi-ECS-Anchor | /scripts/build/tag.sh | UTF-8 | 1,460 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
[[ -n $ARTIFACTORY_DOCKER_REGISTRY_USER ]] || (echo "ARTIFACTORY_DOCKER_REGISTRY_USER must be set as an environment variable in CircleCI." && exit 1)
[[ -n $ARTIFACTORY_DOCKER_REGISTRY_PASSWORD ]] || (echo "ARTIFACTORY_DOCKER_REGISTRY_PASSWORD must be set as an environment variable in CircleCI." && exit 1)
# [[ -n $TEAM_EMAIL ]] || (echo "TEAM_EMAIL must be set as an environment variable in CircleCI." && exit 1)
[[ -n $VERSION ]] || (echo "VERSION must be set before invoking this script." && exit 1)
set +x
echo "Tagging containers..."
curl -f -X PUT -u $ARTIFACTORY_DOCKER_REGISTRY_USER:$ARTIFACTORY_DOCKER_REGISTRY_PASSWORD "<manifest.json path>?properties=Retention=1year"
echo "Retention tag complete"
curl -f -X PUT -u $ARTIFACTORY_DOCKER_REGISTRY_USER:$ARTIFACTORY_DOCKER_REGISTRY_PASSWORD "<manifest.json path>?properties=Notification=$TEAM_EMAIL"
echo "Notification tag complete"
curl -f -X PUT -u $ARTIFACTORY_DOCKER_REGISTRY_USER:$ARTIFACTORY_DOCKER_REGISTRY_PASSWORD "<manifest.json path>?properties=App=<project>"
echo "App tag complete"
curl -f -X PUT -u $ARTIFACTORY_DOCKER_REGISTRY_USER:$ARTIFACTORY_DOCKER_REGISTRY_PASSWORD "<manifest.json path>?properties=Function=api"
echo "Function tag complete"
curl -f -X PUT -u $ARTIFACTORY_DOCKER_REGISTRY_USER:$ARTIFACTORY_DOCKER_REGISTRY_PASSWORD "<manifest.json path>?properties=Contact=$TEAM_EMAIL"
echo "Contact tag complete"
echo "Container successfully tagged!"
| true |
df561232ea47e5ae61e52c136d34bfa3f64193ac | Shell | phts/bin | /git-prune-local-branches | UTF-8 | 196 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env bash
# https://stackoverflow.com/a/6127884/2462524
MAIN_BRANCH=master
IGNORE_BRANCHES="$MAIN_BRANCH"
git branch --merged | egrep -v "(^\*|$IGNORE_BRANCHES)" | xargs git branch -d
| true |
959dc2d4057344499abeb1b795cc346c279b8512 | Shell | DeadEternity/dotfiles | /.scripts/rofi_bc.sh | UTF-8 | 293 | 3.265625 | 3 | [] | no_license | #!/bin/bash
answer=$( echo "$@" | bc -l | sed 's/^\.\(.*$\)/0.\1/' )
action=$( echo -e "Clear\nCopy" | rofi -theme $HOME/.config/rofi/bc_config.rasi -dmenu -p "${answer}")
case $action in
"Clear") $0 ;;
"Copy") echo -n "${answer}" | xclip ;;
"") ;;
*) $0 "${answer} ${action}" ;;
esac
| true |
829ad85d191547bc1bc58c28cdfe508fea18e1fe | Shell | carlos-gaitan/nucleo | /bin/admin.sh | UTF-8 | 4,762 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# Robert Martin-Legene <robert@nic.ar>
if [ -z "${BFAHOME}" ]; then echo "\$BFAHOME not set. Did you source bfa/bin/env ?" >&2; exit 1; fi
source ${BFAHOME}/bin/libbfa.sh || exit 1
defaultsyncmode="fast"
function modefilter
{
case "$mode" in
"full"|"fast"|"light")
;;
*)
echo "Unsupported mode."
mode=""
return
;;
esac
true
}
function admin_syncmode
{
echo "Available synchronization modes:"
echo " full : verify all blocks and all transactions since genesis (most secure)"
echo " fast : verify all blocks but not all transactions (faster than full, but less certain)"
echo " light: Makes this node into a light node which downloads almost"
echo " nothing, but relies on fast and full nodes in the network"
echo " to answer it's requests. This is the fastest and uses least"
echo " local resources, but outsources all trust to another node."
echo "Default mode is fast, because for many, it is a healthy compromise"
echo "between speed and paranoia. You can change the setting, according to"
echo "your needs."
mode=$( cat ${BFANODEDIR}/syncmode 2>/dev/null || true )
mode=${mode:-${defaultsyncmode}}
orgmode=$mode
modefilter
echo "Your current mode is set to ${mode}"
killed=0
mode=
echo
while [ -z "${mode}" ]
do
read -p "Which mode do you wish? : " mode
modefilter
done
echo "Remembering your choice."
echo $mode > ${BFANODEDIR}/syncmode
if [ "$orgmode" = "fast" -a "$mode" = "full" ]
then
echo "You increased your paranoia level. The proper thing to do now,"
echo "would be to delete your version of what you synchronized with"
echo "fast mode, and revalidate everything in the entire blockchain."
echo "This probably takes quite a long time and also requires downloading"
echo "all blocks from the entire blockchain again."
yesno n "Do you wish to delete all downloaded blocks and resynchronize?"
if [ "$REPLY" = "y" ]
then
if [ -r "${BFANODEDIR}/geth.pid" ]
then
pid=$( cat ${BFANODEDIR}/geth.pid )
kill -0 $pid 2>/dev/null &&
echo "Killing running geth." &&
killed=1
while ! kill $pid 2>/dev/null
do
sleep 1
done
fi
rm -fr ${BFANODEDIR}/geth/chainstate ${BFANODEDIR}/geth/lightchainstate
geth --cache 0 --datadir ${BFANODEDIR} init ${BFAHOME}/src/genesis.json
test $killed -eq 1 &&
echo &&
echo "The startup.sh should restart your geth shortly."
fi
else
echo "No further action taken."
fi
}
function admin_bootnode
{
keyfile=${BFANETWORKDIR}/bootnode/key
echo "Only very few wants to actually run a boot node."
echo "If you have a keyfile for a bootnode, then you will"
echo "automatically start one, when restarting your system."
if [ -f $keyfile ]
then
echo "You are set up to run a boot node."
echo "Deleting your bootnode keyfile disables your bootnode."
yesno n "Do you want to delete your bootnode keyfile?"
if [ "$REPLY" = "y" ]
then
rm $keyfile
fi
pidfile=${BFANETWORKDIR}/bootnode/pid
if [ -r $pidfile ]
then
pid=`cat $pidfile`
kill -0 $pid &&
echo "Terminating your bootnode." &&
kill `cat $pidfile` ||
true
fi
else
echo "You are not set up to run a boot node."
yesno n "Do you want to create a keyfile for a bootnode?"
if [ "$REPLY" = "y" ]
then
bootnode -genkey $keyfile
fi
echo "You can now start your bootnode by running start.sh"
fi
}
function create_account
{
num=$( ls -1 ${BFANODEDIR}/keystore/*--* 2>/dev/null | wc -l )
if [ $num -gt 0 ]
then
if [ $num -eq 1 ]
then
plural=""
else
plural="s"
fi
yesno n "You already have ${num} account${plural}. Do you wish to create an extra?"
unset plural num
if [ "$REPLY" = "n" ]
then
return
fi
fi
unset num
geth --cache 0 --datadir ${BFANODEDIR} --password /dev/null account new
}
case "$1" in
bootnode)
admin_bootnode
;;
syncmode)
admin_syncmode
;;
account)
create_account
;;
*)
echo Usage: `basename $0` "{bootnode|syncmode|account}"
trap '' ERR
exit 1
esac
| true |
351eedc9852678d67b2ecead93bc606f70ac78dd | Shell | duobin/swarm_evolve | /scrimmage/setup/funcs.sh | UTF-8 | 1,673 | 3.390625 | 3 | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | #!/bin/bash
# ---------------------------------------------------------------------------
# @section LICENSE
#
# Copyright (c) 2016 Georgia Tech Research Institute (GTRI)
# All Rights Reserved
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ---------------------------------------------------------------------------
# @file filename.ext
# @author Kevin DeMarco <kevin.demarco@gtri.gatech.edu>
# @author Eric Squires <eric.squires@gtri.gatech.edu>
# @version 1.0
# ---------------------------------------------------------------------------
# @brief A brief description.
#
# @section DESCRIPTION
# A long description.
# ---------------------------------------------------------------------------
export THIRD_PARTY_BUILD_DIR=$(readlink -f "./third-party-build")
function runasuser {
ORIGINAL_USER=$(who am i | awk '{print $1}')
su $ORIGINAL_USER -m -c "$1"
}
function mkdir_check {
DIR=$(readlink -f "$1")
if [ ! -d ${DIR} ]; then
mkdir ${DIR}
fi
echo "${DIR}"
}
function make_or_install {
if [ "$1" == "install" ]; then
make install
/sbin/ldconfig
else
make
fi
}
| true |
ccbfb8676ce5ecf6830b7869939d1d947816e492 | Shell | icanfly76/mail_center | /send_mail_keep.sh | UTF-8 | 689 | 3.1875 | 3 | [] | no_license | #!/bin/bash
#########################################################################
# File Name: sms_keep.sh
# Author: mougong
#########################################################################
#!/bin/bash
num=1
iNum=1
#echo $$
while(( $num < 5 ))
do
sn=`ps -ef | grep send_mail.sh | grep -v grep |awk '{print $2}'`
#echo $sn
if [ "${sn}" = "" ] #如果为空,表示进程未启动
then
let "iNum++"
#echo $iNum
#cp statsms.log /www/mail_sms/log/statsms_$iNum.log.bak
#rm statsms.log
nohup /www/mail_sms/send_mail.sh > /www/mail_sms/log/statmail.log 2>&1 & #后台启动进程
#echo start ok !
#else
#echo running
fi
sleep 5
done
| true |
5b6badd056413731124ea6bf691bccb5166f5dae | Shell | JuSiZeLa/wasp | /docker/wasp-docker-image/docker-entrypoint.sh | UTF-8 | 332 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# update /etc/hosts with line to resolve host's hostname to container interface
CONTAINER_IP=$(getent hosts $HOSTNAME | awk '{print $1}')
echo $CONTAINER_IP $HOST_HOSTNAME >> /etc/hosts
# remove pid file from previous runs
rm RUNNING_PID
# start wasp
exec /opt/wasp/bin/wasp -Dconfig.file=wasp-container.conf | true |
cabffb3d66b87cc0d251caf3d1d11f987880f219 | Shell | lvusyy/my_script | /upAAAAtocf.sh | UTF-8 | 5,971 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#!/bin/sh
# 使用方法 ./upAAAAtocf.sh 二级域名
cloudflare_Email='lvusyy@qq.com'
cloudflare_Key='xxxxxxxxxxxxxxxx'
cloudflare_domian='showip.xyz'
cloudflare_host=''
cloudflare_domian2=''
cloudflare_host2=''
cloudflare_domian6=$1
cloudflare_host6=`ifconfig eth0 | awk '/inet6/{print $2}' | grep -v "fe80"`
cloudflare_interval=10
IPv6=1
domain_type="AAAA"
hostIP=$cloudflare_host6
Zone_ID=""
DOMAIN=$cloudflare_domian
HOST=$1
Zone_ID=""
get_Zone_ID() {
# 获得Zone_ID
Zone_ID=$(curl -L -k -s -X GET "https://api.cloudflare.com/client/v4/zones" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json")
Zone_ID=$(echo $Zone_ID|grep -o "id\":\"[0-9a-z]*\",\"name\":\"$DOMAIN\",\"status\""|grep -o "id\":\"[0-9a-z]*\""| awk -F : '{print $2}'|grep -o "[a-z0-9]*")
}
arDdnsInfo() {
if [ "$IPv6" = "1" ]; then
domain_type="AAAA"
else
domain_type="A"
fi
case $HOST in
\*)
host_domian="\\$HOST.$DOMAIN"
;;
\@)
host_domian="$DOMAIN"
;;
*)
host_domian="$HOST.$DOMAIN"
;;
esac
# 获得Zone_ID
echo `get_Zone_ID`
# 获得最后更新IP
recordIP=$(curl -L -k -s -X GET "https://api.cloudflare.com/client/v4/zones/$Zone_ID/dns_records" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json")
RECORD_ID=$(echo $recordIP | sed -e "s/"'"ttl":'"/"' \n '"/g" | grep "type\":\"$domain_type\"" | grep -o "id\":\"[0-9a-z]\{32,\}\",\"type\":\"[^\"]*\",\"name\":\"$host_domian\",\"content\":\""|grep -o "id\":\"[0-9a-z]\{32,\}\",\""| awk -F : '{print $2}'|grep -o "[a-z0-9]*")
recordIP=$(echo $recordIP | sed -e "s/"'"ttl":'"/"' \n '"/g" | grep "type\":\"$domain_type\"" | grep -o "name\":\"$host_domian\",\"content\":\"[^\"]*\""| awk -F 'content":"' '{print $2}' | tr -d '"' |head -n1)
# 检查是否有名称重复的子域名
if [ "$(echo $RECORD_ID | grep -o "[0-9a-z]\{32,\}"| wc -l)" -gt "1" ] ; then
logger -t "【cloudflare动态域名】" "$HOST.$DOMAIN 获得最后更新IP时发现重复的子域名!"
for Delete_RECORD_ID in $RECORD_ID
do
logger -t "【cloudflare动态域名】" "$HOST.$DOMAIN 删除名称重复的子域名!ID: $Delete_RECORD_ID"
RESULT=$(curl -L -k -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$Zone_ID/dns_records/$Delete_RECORD_ID" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json")
done
recordIP="0"
echo $recordIP
return 0
fi
if [ "$IPv6" = "1" ]; then
echo $recordIP
return 0
else
case "$recordIP" in
[1-9]*)
echo $recordIP
return 0
;;
*)
echo "Get Record Info Failed!"
#logger -t "【cloudflare动态域名】" "获取记录信息失败!"
return 1
;;
esac
fi
}
arNslookup6() {
mkdir -p /tmp/arNslookup
nslookup $1 | tail -n +3 | grep "Address" | awk '{print $3}'| grep ":" | sed -n '1p' > /tmp/arNslookup/$$ &
I=5
while [ ! -s /tmp/arNslookup/$$ ] ; do
I=$(($I - 1))
[ $I -lt 0 ] && break
sleep 1
done
killall nslookup
if [ -s /tmp/arNslookup/$$ ] ; then
cat /tmp/arNslookup/$$ | sort -u | grep -v "^$"
rm -f /tmp/arNslookup/$$
fi
}
# 更新记录信息
# 参数: 主域名 子域名
arDdnsUpdate() {
I=3
RECORD_ID=""
if [ "$IPv6" = "1" ]; then
domain_type="AAAA"
else
domain_type="A"
fi
case $HOST in
\*)
host_domian="\\$HOST.$DOMAIN"
;;
\@)
host_domian="$DOMAIN"
;;
*)
host_domian="$HOST.$DOMAIN"
;;
esac
while [ "$RECORD_ID" = "" ] ; do
I=$(($I - 1))
[ $I -lt 0 ] && break
# 获得Zone_ID
get_Zone_ID
# 获得记录ID
RECORD_ID=$(curl -L -k -s -X GET "https://api.cloudflare.com/client/v4/zones/$Zone_ID/dns_records" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json")
RECORD_ID=$(echo $RECORD_ID | sed -e "s/"'"ttl":'"/"' \n '"/g" | grep "type\":\"$domain_type\"" | grep -o "id\":\"[0-9a-z]\{32,\}\",\"type\":\"[^\"]*\",\"name\":\"$host_domian\",\"content\":\""|grep -o "id\":\"[0-9a-z]\{32,\}\",\""| awk -F : '{print $2}'|grep -o "[a-z0-9]*")
# 检查是否有名称重复的子域名
if [ "$(echo $RECORD_ID | grep -o "[0-9a-z]\{32,\}"| wc -l)" -gt "1" ] ; then
logger -t "【cloudflare动态域名】" "$HOST.$DOMAIN 更新记录信息时发现重复的子域名!"
for Delete_RECORD_ID in $RECORD_ID
do
logger -t "【cloudflare动态域名】" "$HOST.$DOMAIN 删除名称重复的子域名!ID: $Delete_RECORD_ID"
RESULT=$(curl -L -k -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$Zone_ID/dns_records/$Delete_RECORD_ID" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json")
done
RECORD_ID=""
fi
#echo "RECORD ID: $RECORD_ID"
sleep 1
done
if [ "$RECORD_ID" = "" ] ; then
# 添加子域名记录IP
RESULT=$(curl -L -k -s -X POST "https://api.cloudflare.com/client/v4/zones/$Zone_ID/dns_records" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json" \
--data '{"type":"'$domain_type'","name":"'$HOST'","content":"'$hostIP'","ttl":120,"proxied":false}')
RESULT=$(echo $RESULT | grep -o "success\":[a-z]*,"|awk -F : '{print $2}'|grep -o "[a-z]*")
echo "创建dns_records: $RESULT"
else
# 更新记录IP
RESULT=$(curl -L -k -s -X PUT "https://api.cloudflare.com/client/v4/zones/$Zone_ID/dns_records/$RECORD_ID" \
-H "X-Auth-Email: $cloudflare_Email" \
-H "X-Auth-Key: $cloudflare_Key" \
-H "Content-Type: application/json" \
--data '{"type":"'$domain_type'","name":"'$HOST'","content":"'$hostIP'","ttl":120,"proxied":false}')
RESULT=$(echo $RESULT | grep -o "success\":[a-z]*,"|awk -F : '{print $2}'|grep -o "[a-z]*")
echo "更新dns_records: $RESULT"
fi
if [ "$(printf "%s" "$RESULT"|grep -c -o "true")" = 1 ];then
echo "$(date) -- Update success"
return 0
else
echo "$(date) -- Update failed"
return 1
fi
}
arDdnsUpdate
| true |
2c4284b41e800f9da6ad27806d6816f5aeb1e65f | Shell | chauncey-garrett/tmux-fingers | /scripts/send-input.sh | UTF-8 | 198 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
tmux wait-for -L fingers-input
echo "$1" >> /tmp/fingers-command-queue
tmux wait-for -U fingers-input
exit 0
| true |
69eac79d41873d246bdbcc3e81fe41f80d1b07e2 | Shell | metazoic/hierlearning | /process_results | UTF-8 | 319 | 2.84375 | 3 | [] | no_license | #!/bin/sh
if [ $# -lt 3 ]
then
echo "Usage: $0 <world> <agent> <total runs>"
exit 1
fi
octave --silent --eval "coalesce("\""results/reward_${1}_${2}"\"",${3});"
octave --silent --eval "coalesce("\""results/duration_${1}_${2}"\"",${3});"
rm -fr results/reward_${1}_${2}_[0-9]*
rm -fr results/duration_${1}_${2}_[0-9]*
| true |
d6dca7bfd5bb1ac5920c38d40ebde035f05d2273 | Shell | mreinhardt/dotfiles | /.zsh/.zshenv | UTF-8 | 430 | 2.84375 | 3 | [] | no_license | export EDITOR=$(which nvim)
export PAGER=$(which less)
fpath=( "${ZDOTDIR:-$HOME}/.zfunctions" $fpath )
source ${ZDOTDIR:-$HOME}/.zshenv.grml
if [[ -x $(command -v brew) ]]; then OS_HAS_BREW=1; fi
if [[ -x $(command -v apt-get) ]]; then OS_HAS_APT=1; fi
if [[ -x $(command -v yum) ]]; then OS_HAS_YUM=1; fi
if [[ -n $VIRTUAL_ENV && -e "${VIRTUAL_ENV}/bin/activate" ]]; then
source "${VIRTUAL_ENV}/bin/activate"
fi
| true |
6c763d10d67972d2f02e1a295a6209f67cd48f23 | Shell | lvzhidong/ec | /devrun.sh | UTF-8 | 134 | 2.578125 | 3 | [] | no_license | #!/bin/bash
. venv/bin/activate
export PYTHONPATH=${PYTHONPATH}:../
while [[ 0 ]];
do
python manage.py runserver $*
sleep 1
done
| true |
36d25da2d6e881584a0cafbdff1efb6e46d54d2d | Shell | ivann-galic/archi_linux_project | /generation.sh | UTF-8 | 1,052 | 4.09375 | 4 | [] | no_license | #!/bin/bash
delay=$1
folder_name=$2
infos_log_file=$3
errors_log_file=$4
myUserPath=/home/$USER #get the user's main folder path
mkdir -p $myUserPath/$folder_name
touch $myUserPath/$folder_name/$infos_log_file
touch $myUserPath/$folder_name/$errors_log_file
# Traps the signal when generation.sh is closing and display a message :
function signal_trap_closed() {
echo "generation.sh is closing.";
}
trap signal_trap_closed SIGTERM;
# gets all the results form genTick and redirect it on a specific log file :
./genTick $delay | ./genSensorData 2>&1 | {
while IFS= read -r RAW_LINE; do
line_cut=$(echo $RAW_LINE | cut -d';' -f1);
result_to_write=$(echo $RAW_LINE | cut -d';' -f1,2,3,5,6);
if [[ $line_cut == "sensor" ]];
then
echo $result_to_write >> $myUserPath/$folder_name/$infos_log_file;
elif [[ $line_cut == "error" ]];
then
echo $RAW_LINE >> $myUserPath/$folder_name/$errors_log_file;
elif [[ $RAW_LINE == "Welcome !" ]];
then
continue;
else
echo "Error";
fi
done
} | true |
137a178e2f24b94505ad58fd42896d20b0a7ebc2 | Shell | InnovAnon-Inc/repo | /search-codebash.sh | UTF-8 | 264 | 3.21875 | 3 | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #! /usr/bin/env bash
set -exu
(( ! $# ))
cd "`dirname "$(readlink -f "$0")"`"/..
#grep -R "${@/#/-e }" --exclude-dir=.git -a
declare -a array
for ((i = 1; i <= $# ; i++)) ; do
array+=(-e)
array+=("${!i}")
done
grep -R "${array[@]}" --exclude-dir=.git -a
| true |
a63291c10add43bd19e152aeee4f8cf3cc076c60 | Shell | 7agustibm/dotfiles | /install/brew.sh | UTF-8 | 417 | 2.6875 | 3 | [] | no_license | # Install Homebrew
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew update
brew upgrade
# Install packages
apps=(
tmux
vim
tfenv
git
github/gh/gh
nvm
)
brew install "${apps[@]}"
# Git comes with diff-highlight, but isn't in the PATH
ln -sf "$(brew --prefix)/share/git-core/contrib/diff-highlight/diff-highlight" /usr/local/bin/diff-highlight
| true |
591567f11c2860b923f465d17cb97d1c3c946681 | Shell | Gemini9527/Gemini-API | /Protobuf/generate.sh | UTF-8 | 1,803 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Generating the csharp protobuf and grpc files.
# Need to check wheter the directory already exists
OUTDIR=../Autoferry/Assets/Networking/ProtobufFiles/$1
PYTHON_OUTDIR=../Clients/PythonClients/
SRC_DIR=ProtoFiles/
if [ $# -eq 0 ]; then
echo "Name of protobuf file not given"
exit 1
fi
if [ -d "$OUTDIR" ]; then
echo "Writing compiled protobuf and grpc files to: " $OUTDIR
# if the directory exists, just overwrite the files
./Plugins/protoc -I=$SRC_DIR --csharp_out=$OUTDIR/ $SRC_DIR/$1/$1.proto --grpc_out=$OUTDIR/ --plugin=protoc-gen-grpc=Plugins/grpc_csharp_plugin.exe
else
# if the directory does not exist, make a new directory and generate the files.
echo "The output directory did not exist, creating the directory for you!"
mkdir $OUTDIR
echo "Writing compiled protobuf and grpc files to: " $OUTDIR
./Plugins/protoc -I=$SRC_DIR --csharp_out=$OUTDIR/ $SRC_DIR/$1/$1.proto --grpc_out=$OUTDIR/ --plugin=protoc-gen-grpc=Plugins/grpc_csharp_plugin.exe
fi
# Generating the python protobuf and grpc files.
# Here one must provide the name of the protobuf file.
# This is assumed that host OS is Windows
# Check if second argument is given
if [ $# -ge 2 ]; then
if [ $2 == "python" ]; then
if [ -d "$PYTHON_OUTDIR" ]; then
echo "Writing compiled protobuf and grpc files to: " $PYTHON_OUTDIR
py -m grpc_tools.protoc -I $SRC_DIR --python_out=$PYTHON_OUTDIR --grpc_python_out=$PYTHON_OUTDIR $SRC_DIR/$1/$1.proto
else
echo "The python client output directory did not exist, creating the directory for you!"
mkdir $PYTHON_OUTDIR
py -m grpc_tools.protoc -I $SRC_DIR --python_out=$PYTHON_OUTDIR --grpc_python_out=$PYTHON_OUTDIR $SRC_DIR/$1/$1.proto
fi
fi
fi | true |
88df2c1fb2d352cf62600f94877c6616c965240c | Shell | jiangkehot/mytest | /myvpn/ipsec-vpn-server/pauseVPN | UTF-8 | 329 | 2.53125 | 3 | [] | no_license | #! /bin/bash
# VPN客户端停止脚本
route del default dev ppp0
# 移除之前添加的主机路由,不太严谨,暂时注释掉了,留着手动复制
# for ip in $(route | grep 'UGH' | awk '{print $1}');do route del $ip ;done
# CentOS/RHEL & Fedora
echo "d myvpn" > /var/run/xl2tpd/l2tp-control
strongswan down myvpn
| true |
5a45103a5c475d56b2409845722dbc8efb56d96b | Shell | beansh/dotfiles | /bash_profile | UTF-8 | 244 | 2.796875 | 3 | [
"MIT"
] | permissive | # just source my bashrc when Im too lazy to set my term emulator to /bin/bash instead of /bin/login
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
test -e "${HOME}/.iterm2_shell_integration.bash" && source "${HOME}/.iterm2_shell_integration.bash"
| true |
cc425c28dacaef5bb964cc16001420a5322ab0c4 | Shell | reify-ryan-heimbuch/dotfiles | /zsh/widgets/widget-test-only | UTF-8 | 196 | 2.734375 | 3 | [] | no_license | #!/bin/zsh
# A widget that provides a simple sanity check.
function widget-test-only() {
RBUFFER=''
LBUFFER="widget-test-only: \$KEYS='${KEYS}', \$KEYMAP='${KEYMAP}'"
}
widget-test-only "$@"
| true |
1703aedfb2a763c5a4dc95e26e2a4e758b70d041 | Shell | aniruddhkanojia/qtile-examples | /zordsdavini/bin/dmenu-session | UTF-8 | 609 | 3.171875 | 3 | [] | no_license | #!/bin/bash
#
# a simple dmenu session script
# inspired from https://bbs.archlinux.org/viewtopic.php?id=95984
# by zordsdavini, 2015
#
###
DMENU='dmenu -i -b -p >>> -nb #000 -nf #fff -sb #00BF32 -sf #fff'
choice=$(echo -e "lock\nnolock\nlogout\nshutdown\nreboot\nsuspend\nhibernate" | $DMENU)
case "$choice" in
lock) xautolock -enable ; xautolock -locknow ;;
nolock) xautolock -disable ;;
logout) sudo kill $(pgrep X) & ;;
shutdown) sudo shutdown -h now & ;;
reboot) sudo reboot ;;
suspend) sudo pm-suspend && xautolock -locknow ;;
hibernate) sudo pm-hibernate && xautolock -locknow ;;
esac
| true |
560853323a6173ba16d7b3b3c4b19eceda96f082 | Shell | ebtaleb/DARAPP | /create_db.sh | UTF-8 | 1,361 | 3.9375 | 4 | [] | no_license | #!/bin/bash
db=""
function init_db {
local db_name=$1
mysql -hlocalhost -uroot -pnyanyanya -e "CREATE DATABASE $db_name DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;"
}
if [ $# -eq 0 ]
then
echo "No arguments supplied"
exit 1
fi
if [ $# -lt 2 ]
then
echo "create_db [-cd] db_name"
exit 1
fi
while getopts ":c:d:" opt; do
case "$opt" in
c)
mysqladmin -uroot -pnyanyanya create $OPTARG
db="$OPTARG"
echo "database $db created"
break
;;
d)
mysqladmin -uroot -pnyanyanya drop $OPTARG
db="$OPTARG"
echo "database $db deleted"
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
table="use $db; CREATE TABLE IF NOT EXISTS TRACKS (ID INT (5) NOT NULL AUTO_INCREMENT,TITLE VARCHAR (20) NOT NULL,SINGER VARCHAR (20) NOT NULL,PRIMARY KEY ( ID ));"
user_table="use $db; CREATE TABLE IF NOT EXISTS USERS (ID INT (5) NOT NULL AUTO_INCREMENT,NAME VARCHAR (20) NOT NULL,PASSWORD VARCHAR (20) NOT NULL, EMAIL VARCHAR(60) NOT NULL, PRIMARY KEY ( ID ), UNIQUE (email));"
mysql -hlocalhost -uroot -pnyanyanya -e "$table"
echo "table TRACKS created"
mysql -hlocalhost -uroot -pnyanyanya -e "$user_table"
echo "table USERS created"
| true |
ec25370e9d27650e5466d56157de29deead53411 | Shell | Surplus-Spec/Open_DataPlatform | /Container_Part/Database/influx/this_run.sh | UTF-8 | 1,342 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# read 할 csv 정보 및 redis 정보
# [1] target field name, 2개 이상의 필드 입력할때 공란없이 붙여서 | 로 표시함
field="DRIVE_SPEED|DRIVE_LENGTH_TOTAL"
# [2] time field name
ts="RECORD_TIME"
# [3] id field name
carid="PHONE_NUM"
# [4] influxdb ip
ip="localhost"
# [5] influxdb port
port=8086
# [6] influxdb server username
username = 'cschae'
# [7] influxdb server password
password = 'evev2021'
# [8] influxdb databasename
database_name = 'test1'
# [9] influxdb measurement name
measurement_name = 'test1'
echo ">>===================================================="
echo "실행 관련 주요 정보(this_run.sh)"
echo "target field name : "$field
echo "time field name : "$ts
echo "id field name : "$carid
echo "influxdb ip : "$ip
echo "influxdb port : " $port
echo "influxdb user name : " $username
echo "influxdb server password : " $password
echo "influxdb database name : " $database_name
echo "influxdb measurement name : " $measurement_name
echo "====================================================<<"
# 인자값 9개
# [1] [2] [3] [4] [5] [6] [7] [8] [9]
time python3 influx_put_data.py $field $ts $carid $ip $port $username $password $database_name $measurement_name
echo " *** end script run for PYTHON *** " | true |
9a28a6daaacc48242caf1f349ce4adb961220764 | Shell | KittyKatt/bashIRC | /contrib/test.module | UTF-8 | 240 | 2.703125 | 3 | [] | no_license | #!/bin/bash
function testme() {
nick=$(echo "${input}" | awk '{print $1}' | sed -e 's/://;s/!/ /;' | awk '{print $1}')
msg $(dest chan) "$nick Hey, this test was successful, dumbass. $me"
}
hook=$(fantasy.hook testme testme > /dev/null) | true |
96622c044543d1c9fdeae33b8f3718a05ab5edf2 | Shell | linearregression/lambda-examples | /server-housekeeping/create_lifecycle_hooks.sh | UTF-8 | 1,179 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
. env.sh
# note: re-running this returns the same topic:
TOPIC_ARN=$(aws sns create-topic --region $REGION \
--name $LIFECYCLE_TOPIC \
--output text --query 'TopicArn')
LIFECYCLE_ROLE_ARN=$(aws iam get-role --region $REGION \
--role-name $LIFECYCLE_NOTIF_ROLE \
--output text --query 'Role.Arn')
aws autoscaling put-lifecycle-hook --region $REGION \
--lifecycle-hook-name launch-hook \
--auto-scaling-group-name $ASG_NAME \
--lifecycle-transition "autoscaling:EC2_INSTANCE_LAUNCHING" \
--heartbeat-timeout 60 \
--default-result CONTINUE \
--role-arn $LIFECYCLE_ROLE_ARN \
--notification-target-arn $TOPIC_ARN
echo "added lifecycle launch hook to $ASG_NAME"
aws autoscaling put-lifecycle-hook --region $REGION \
--lifecycle-hook-name destroy-hook \
--auto-scaling-group-name $ASG_NAME \
--lifecycle-transition "autoscaling:EC2_INSTANCE_TERMINATING" \
--heartbeat-timeout 60 \
--default-result CONTINUE \
--role-arn $LIFECYCLE_ROLE_ARN \
--notification-target-arn $TOPIC_ARN
echo "added lifecycle destroy hook to $ASG_NAME"
| true |
9ca4760f7a7bc13a900f07233b10aee70f9db302 | Shell | citrix-openstack/geppetto | /os-vpx-scripts/usr/local/bin/geppetto/init/keystone-init | UTF-8 | 2,730 | 2.828125 | 3 | [] | no_license | #!/bin/sh
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eux
. /etc/openstack/keystone
. /etc/openstack/mysql
sed -e "s,sql_connection = .*,sql_connection = mysql://$MYSQL_USER:$MYSQL_PASS@$MYSQL_HOST/keystone," \
-i /etc/keystone/keystone.conf
TOUCH_FILE="/var/lib/geppetto/keystone-init-run"
keystone_manage="keystone-manage $@"
$keystone_manage database sync
tenant_list=$($keystone_manage tenant list)
is_tenant_1_present=$(echo "$tenant_list" | grep -c 1) || true
if [ $is_tenant_1_present -ne 0 ]
then
echo "There is already a tenant in the db, skipping this setup script"
touch $TOUCH_FILE
exit 0
fi
$keystone_manage service add glance image "Glance Image Service"
$keystone_manage service add keystone identity "Keystone Identity Service"
$keystone_manage service add nova compute "Nova Compute Service"
$keystone_manage service add swift object-store "Swift Service"
$keystone_manage endpointTemplates add RegionOne glance \
http://dummy \
http://dummy \
http://dummy 1 1
$keystone_manage endpointTemplates add RegionOne keystone \
http://dummy \
http://dummy \
http://dummy 1 1
$keystone_manage endpointTemplates add RegionOne nova \
http://dummy \
http://dummy \
http://dummy 1 1
$keystone_manage endpointTemplates add RegionOne swift \
http://dummy \
http://dummy \
http://dummy 1 1
$keystone_manage role add admin
$keystone_manage role add KeystoneAdmin
$keystone_manage role add KeystoneServiceAdmin
$keystone_manage role add Member
$keystone_manage role add netadmin
$keystone_manage role add projectmanager
$keystone_manage role add sysadmin
os-vpx-add-tenant "$KEYSTONE_SUPERUSER_TENANT"
os-vpx-add-user \
"$KEYSTONE_SUPERUSER_TENANT" \
"$KEYSTONE_SUPERUSER_NAME" \
"$KEYSTONE_SUPERUSER_PASS" \
"admin,1 KeystoneAdmin,1 KeystoneServiceAdmin,1 admin,0 Member,0 netadmin,0 projectmanager,0 sysadmin,0" \
"$KEYSTONE_SUPERUSER_TOKEN"
touch $TOUCH_FILE
| true |
b9650e7fd1da36d7064503e0af7fbd8c74ab7581 | Shell | Jeinzi/arch-configuration | /.config/i3/switchScreenSetup.sh | UTF-8 | 669 | 3.890625 | 4 | [] | no_license | #!/bin/bash
entries=(Single-Screen Dual-Screen)
commands=(
~/.screenlayout/single.sh
~/.screenlayout/dual.sh
)
##
# Generate entries, where first is key.
##
function show_entries()
{
for e in "${entries[@]}"
do
echo $e
done
}
# Check for command line arguments.
if [[ -z "$@" ]]; then
# Echo menu entries if nothing was passed.
show_entries
else
# Execute xrandr command corresponding to the chosen menu entry.
i=0
for e in "${entries[@]}"
do
if [[ "$@" == "${entries[$i]}" ]]; then
break
fi;
((i++))
done
$( ${commands[$i]} )
i3-msg restart
touch ~/.config/i3/next
fi
| true |
2a624ea204863ef41ec081d2cb1fd2a8eed6402b | Shell | Abhinav271828/summer21-ISS | /Assignments/Assignment1/Q2.sh | UTF-8 | 689 | 3.828125 | 4 | [] | no_license | #!/bin/bash
echo -n "" > q2_output.txt
read line < "$1"
for word in $line; do
if [[ "$word" =~ ^[0-3] ]]; then #starts with 0/1/2/3 => date
d=${word:0:2}
m=${word:3:2}
y=${word:6:4}
if [[ ($(date +%m) -lt $m) || (($(date +%m) -eq $m) && ($(date +%d) -lt $d))]]
then
echo $(( $(date +%Y) - $y - 1 )) >> q2_output.txt #birthday not over
else
echo $(( $(date +%Y) - $y )) >> q2_output.txt #birthday over
fi
else
echo -n "$word " >> q2_output.txt #not a date; name
fi
done
| true |
4f8d80c1aad2957d22fd45f3835c529326edaec3 | Shell | drupallerina/dotfiles | /profile | UTF-8 | 1,539 | 3.3125 | 3 | [] | no_license | # /etc/profile
#Typically, ~/.profile contains environment variable definitions, and might start some programs
#that you want to run once when you log in or for the whole session;
#Set our umask
umask 022
# Set our default path
PATH="/usr/local/sbin:/usr/local/bin:/usr/bin"
# if packer exists, add it to path
if [[ -d "$HOME/packer" ]]; then
PATH="/usr/local/sbin:/usr/local/bin:/usr/bin:$HOME/packer"
fi
export PATH
# Load profiles from /etc/profile.d
if test -d /etc/profile.d/; then
for profile in /etc/profile.d/*.sh; do
test -r "$profile" && . "$profile"
done
unset profile
fi
# Source global bash config
if test "$PS1" && test "$BASH" && test -r /etc/bash.bashrc; then
. /etc/bash.bashrc
fi
# Termcap is outdated, old, and crusty, kill it.
unset TERMCAP
# Man is much better than us at figuring this out
unset MANPATH
BROWSER=/usr/bin/xdg-open
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
| true |
48c93e230871469559accd6e14194a04301ffa6c | Shell | pixelastic/oroshi | /scripts/bin/bats-test-watch | UTF-8 | 260 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env zsh
# Run tests and watch for one of my binaries
local binaryName="$1"
watch-and-reload \
~/.oroshi/scripts/bin/$binaryName \
~/.oroshi/scripts/bin/__tests__/${binaryName}.bats \
"bats ~/.oroshi/scripts/bin/__tests__/${binaryName}.bats"
| true |
99f6a7718873e2580ba10eaa9b3bb67ff8b2d83b | Shell | CelsoFranco-dev/FairShip | /setUp.sh | UTF-8 | 2,145 | 2.90625 | 3 | [] | no_license |
version="2020-2"
if [ x"$SHIP_CVMFS_SETUP" != x"" ]
then
if [ x"$SHIP_CVMFS_SETUP" == x"$version" ]
then
echo "WARNING!"
echo "WARNING! Trying to setting up again the same environment."
else
echo "ERROR!"
echo "ERROR! Trying to set up a new environment on top of an old one."
echo "ERROR! This is not allowed, hance we will NOT set up the environment"
echo "ERROR! The solution is to exit the current shell and open a new one"
return
fi
fi
SHIP_CVMFS_SETUP=$version
# the source script set the PYTHONPATH to something internal.
# let's store the current python path to avoid breaking anything.
CURRENT_PYTHON_PATH=$(python -c "from __future__ import print_function; import sys; print(':'.join(sys.path)[1:]);")
PYTHONPATH="$PYTHONPATH:$CURRENT_PYTHON_PATH"
# let's source the environment with all the variables
WORK_DIR=/cvmfs/ship.cern.ch/SHiP-2020/2019/August/12/sw/ source /cvmfs/ship.cern.ch/SHiP-2020/2019/August/12/sw/slc7_x86-64/FairShip/latest/etc/profile.d/init.sh
ROOT_INCLUDE_PATH="$ROOT_INCLUDE_PATH:/cvmfs/ship.cern.ch/SHiP-2020/2019/August/12/sw/SOURCES/FairRoot/May30-ship/bdc279b900/base/"
ROOT_INCLUDE_PATH="$ROOT_INCLUDE_PATH:/cvmfs/ship.cern.ch/SHiP-2020/2019/August/12/sw/slc7_x86-64/boost/v1.64.0-alice1-1/include/"
# add aliBuild to the path, so that we can use it without installing it on the user machine
# we add it to the end of the path, so that if a local installation of aliBuild is present we will use that one
PATH="$PATH:/cvmfs/ship.cern.ch/alibuild/bin"
PYTHONPATH="$PYTHONPATH:/cvmfs/ship.cern.ch/alibuild"
# set the standard ShipDist directory as well
SHIPDIST="/cvmfs/ship.cern.ch/SHiP-2020/2019/August/12/shipdist/"
# fixup for genie
PATH="$PATH:$GENIE_ROOT/genie/bin"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$GENIE_ROOT/genie/lib"
ROOT_INCLUDE_PATH="$ROOT_INCLUDE_PATH:$GENIE_ROOT/genie/inc"
LHAPATH="$LHAPDF5_ROOT/share/lhapdf"
# fix the graphics driver issue
export LIBGL_DRIVERS_PATH="/cvmfs/sft.cern.ch/lcg/contrib/mesa/18.0.5/x86_64-centos7/lib64/dri"
| true |
8ce6e622f15ac72fb2ebcf3314de7bc7fe9270e6 | Shell | knutjelitto/LiFo | /Recipes/Core/mpfr | UTF-8 | 412 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
Title="The GNU MPFR Library"
Home="http://www.mpfr.org/"
Name=mpfr
Version=4.0.1
BuildDeps=()
RunDeps=()
Supplies=(https://ftpmirror.gnu.org/gnu/$Name/$Name-$Version.tar.xz)
Build()
{
./configure \
--prefix=/usr \
--disable-nls \
--disable-static \
--enable-thread-safe
make
make html
make install
make install-html
}
| true |
319a3fa98b785e88d4043a8f6efd02b92041591b | Shell | lgoyal1987/CSI6203 | /Portfolio/week5/internetdownloader.sh | UTF-8 | 369 | 3.3125 | 3 | [] | no_license | #!/bin/bash
#Author: Lalit Goyal
ping -c1 8.8.8.8
if [ "$?" -eq 0 ]; then
echo "Internet is connected"
else
echo "Error!!!"
exit 1
fi
#Name of website is taken as input
read -p "Please type the URL of the website for downloading or exit" web
while [ "$web" != "exit" ]; do
wget "$web"
read -p "Please type the URL of the website to download" web
done
echo "Thank You"
exit 0 | true |
aa4a1e78917ddd22c0b82ef0b59f2750213f6f1d | Shell | djibal/training-manual | /script/create-practice-repos | UTF-8 | 9,144 | 4.0625 | 4 | [
"CC-BY-4.0"
] | permissive | #!/usr/bin/env bash
#
# Create practice repos
#################################################################
# NOTE: You must have a personal access token (PAT) #
# saved to your environment variables to use this script. #
# We recommend a dedicated service account (e.g. githubteacher) #
#################################################################
# shellcheck source=script/shared_functions
source ./shared_functions
# shell variables
collab_repo=$1
template_name=$2
practice_repo_name=$3
org_url="https://$ROOT_URL/$CLASS_ORG"
org_endpoint="https://$INSTANCE_URL/repos/$CLASS_ORG"
template_url="https://github.com/githubtraining/$template_name"
template_url_ghes="https://$ROOT_URL/$CLASS_ORG/$template_name"
template_pages_url="https://$CLASS_ORG.github.io/$template_name"
template_pages_url_ghes="https://$ROOT_URL/pages/$CLASS_ORG/$template_name"
# https://superuser.com/a/1415376
# Returns exit code 0 (success) if $1 is a reachable git remote url
repo_is_reachable() {
local repo_url=$1
git ls-remote "$repo_url" CHECK_GIT_REMOTE_URL_REACHABILITY &>/dev/null
}
check_template_url() {
# if root url is not github.com
if [ "$ROOT_URL" != "github.com" ]; then
# if template can be found on GHES
if repo_is_reachable "$template_url_ghes"; then
# use template and GitHub Pages URL from GHES instead of the public template
template_url="$template_url_ghes"
template_pages_url="$template_pages_url_ghes"
# otherwise check if public template can be reached
elif repo_is_reachable "$template_url"; then
echo "Template not found on $ROOT_URL. Using public template instead: $template_url"
else # template could not be reached
print_error "Could not reach template repo. Please grab a copy from $template_url and upload it to your GHES instance."
fi
# if template cannot be reached
elif ! repo_is_reachable "$template_url"; then
print_error "Unable to reach template repo: $template_url"
fi
}
clone_template() {
# create a temporary directory for temporary files
temp_dir=$(mktemp -d)
# delete the temporary directory on script exit
trap 'rm -rf "$temp_dir"' EXIT
# attempt to clone template repo
git clone --bare "$template_url" "$temp_dir" >>log.out 2>&1 || {
# if git clone command failed
print_error "Failed to clone template repository."
exit 1
}
}
generate_repos() {
# Create practice repos based on collaborators in the inital class repo
# :? will display an error if $collaborators is empty or unset
for username in "${collaborators[@]:?}"; do
# if a practice repo has already been created for the user
if repo_is_reachable "$org_url/$practice_repo_name-$username"; then
# ask if the repository should be deleted and recreated
if ask "A $practice_repo_name repo already exists for $username.\nIs it OK to delete and recreate?" N; then
echo "Deleting $CLASS_ORG/$practice_repo_name-$username..."
# delete the existing practice repo
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-X DELETE "$org_endpoint/$practice_repo_name-$username" >>log.out 2>&1
# create a new practice repo
create_repo "$username"
else
echo "OK. Skipping $username..."
fi
else
# create a new practice repository
create_repo "$username"
fi
done
}
git_push() {
# Push to remote repository
echo "Pushing to $CLASS_ORG/$repo_name..."
git push --mirror "https://$TOKEN_OWNER:$TEACHER_PAT@$ROOT_URL/$CLASS_ORG/$repo_name" >>log.out 2>&1 || {
# if git push command failed
print_error "Failed to push commits to $CLASS_ORG/$repo_name."
exit 1
}
}
create_repo() {
student=$1
local repo_name="$practice_repo_name-$student"
local repo_url="https://$ROOT_URL/$CLASS_ORG/$repo_name"
if [[ "$ROOT_URL" == "github.com" ]]; then
local pages_url="https://$CLASS_ORG.github.io/$repo_name"
else
local pages_url="https://$ROOT_URL/pages/$CLASS_ORG/$repo_name"
fi
case $practice_repo_name in
conflict-practice)
local repo_description="Let's resolve some conflicts."
# Create a new repo named $repo_name in $CLASS_ORG
echo "Creating $CLASS_ORG/$repo_name for $student..."
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"name\": \"$repo_name\", \"description\": \"$repo_description\", \"private\": true, \"has_issues\": true, \"has_wiki\": false, \"has_downloads\": true}" \
-X POST "https://$INSTANCE_URL/orgs/$CLASS_ORG/repos" >>log.out 2>&1
git_push
# Create PRs for each branch
echo "Creating practice pull requests for $CLASS_ORG/$repo_name..."
{
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"title\": \"Updates to game manual\", \"head\": \"manual\", \"base\": \"main\", \"body\": \"This pull request edits the wording of some of the language on the main page. It appears that it has also been edited on main, because there's a merge conflict. Please make sure that all of the words are the ones that you'd like to use, and that there aren't any lines of text missing.\n\nIf you need any help resolving this conflict, check out this video:\n\nhttps://user-images.githubusercontent.com/17183625/106972130-1a611700-6705-11eb-8858-a9ef429e2a60.mp4\"}" \
-X POST "$org_endpoint/$repo_name/pulls"
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"title\": \"Minor CSS fixes\", \"head\": \"css-changes\", \"base\": \"main\", \"body\": \"This pull request makes some small changes to the CSS. Pick the CSS that you think makes the most sense given the history of the file on both branches and resolve the merge conflict.\n\nIf you need any help resolving this conflict, check out this video:\n\nhttps://user-images.githubusercontent.com/17183625/106972084-06b5b080-6705-11eb-8f57-d81559307822.mp4\"}" \
-X POST "$org_endpoint/$repo_name/pulls"
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"title\": \"Update README\", \"head\": \"readme-update\", \"base\": \"main\", \"body\": \"This pull request updates the README.md. Resolve the merge conflicts and make sure the final version of the README.md is accurate and descriptive.\n\nIf you need any help resolving this conflict, check out this video:\n\nhttps://user-images.githubusercontent.com/17183625/106972095-0ddcbe80-6705-11eb-9cc8-6df603e22910.mp4\"}" \
-X POST "$org_endpoint/$repo_name/pulls"
} >>log.out 2>&1
;;
github-games)
local repo_description="A fun way to learn about git troubleshooting."
# Create a new practice repo named $repo_name in $CLASS_ORG
echo "Creating $CLASS_ORG/$repo_name for $student..."
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"name\": \"$repo_name\", \"description\": \"$repo_description\", \"homepage\": \"$template_pages_url\", \"private\": true, \"has_issues\": true, \"has_wiki\": false, \"has_downloads\": true}" \
-X POST "https://${INSTANCE_URL}/orgs/${CLASS_ORG}/repos" >>log.out 2>&1
git_push
# Create issues for problems
echo "Creating practice issues for $CLASS_ORG/$repo_name..."
{
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"title\": \"Game broken\", \"body\": \"When attempting to access this at ${pages_url}, I am getting a 404. This could be caused by a couple things:\n\n - GitHub pages needs to be enabled on main. You can fix this in the repository settings.\n- the index.html file is incorrectly named inde.html. We will fix this together in class.\n\n Can you please fix the first bullet, please?\"}" \
-X POST "https://$INSTANCE_URL/repos/$CLASS_ORG/$repo_name/issues"
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"title\": \"URL in description and README broken\", \"body\": \"The URL in the repository description and README are pointing to ${CLASS_ORG}'s copy of the game instead of yours. \n\n Please fix both so they point to your copy of the game at ${pages_url}\"}" \
-X POST "https://$INSTANCE_URL/repos/$CLASS_ORG/$repo_name/issues"
} >>log.out 2>&1
;;
*)
print_error "Practice repo name \'$practice_repo_name\' not recognized."
exit 1
;;
esac
# Invite student as a collaborator
echo "Inviting $student as a collaborator to $CLASS_ORG/$repo_name..."
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" \
-d "{ \"permission\": \"admin\"}" \
-X PUT "$org_endpoint/$repo_name/collaborators/$student" >>log.out 2>&1
# Set default branch to main
curl -s -S -i -u "$TOKEN_OWNER:$TEACHER_PAT" -d "{\"default_branch\":\"main\"}" \
-X PATCH "$org_endpoint/$repo_name" >>log.out 2>&1
print_done "Repo URL: $repo_url"
}
# get list of repo collaborators
get_collaborators "$collab_repo"
# check template url
check_template_url
# clone template repository
clone_template
# switch to temp directory and push it on the stack
pushd "$temp_dir" >>log.out 2>&1 || return
# generate a repo for each collaborator
generate_repos
# switch back to original directory
popd >>log.out 2>&1 || return
print_success "All $practice_repo_name repos can be found here: $org_url"
| true |
4a1cb7839ff6fd4054c58dc303ee22af1360b227 | Shell | RohanNagar/thunder | /scripts/ci/docker-integration-tests.sh | UTF-8 | 1,134 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# Check arguments
if [ "$1" ]; then
echo "Correct number of arguments supplied."
else
echo "Incorrect number of arguments, please make sure you include TEST_NAME".
exit 1
fi
# Get program arguments
TEST_NAME=$1
# Navigate to top level thunder directory
cd "$(dirname "$0")/../.." || exit
echo "Working from directory: $(pwd)"
echo
# Start containers
echo "Starting docker-compose..."
docker-compose -f "scripts/tests/$TEST_NAME/docker-compose.yml" up -d
# Wait for containers to start
echo "Waiting 10 seconds for containers to come up..."
sleep 10
# Run tests
echo "Running k6 integration tests..."
k6 run "scripts/tests/$TEST_NAME/test.js"
TEST_EXIT_CODE=$?
# Clean up
echo "Done running tests. Printing Docker logs and shutting down containers..."
docker-compose -f "scripts/tests/$TEST_NAME/docker-compose.yml" logs
docker-compose -f "scripts/tests/$TEST_NAME/docker-compose.yml" down
# Determine success or failure. k6 should have exited with 0.
if [ "$TEST_EXIT_CODE" -eq 0 ]; then
echo "Successfully finished integration tests."
exit 0
else
echo "There are integration test failures."
exit 1
fi
| true |
bf2c0b3fafb4f46024f2d2e9fa941a18389a9d8d | Shell | alexcorre/dotfiles | /zsh/prompt.zsh | UTF-8 | 1,360 | 3.390625 | 3 | [
"MIT"
] | permissive | autoload colors && colors
# cheers, @ehrenmurdick
# http://github.com/ehrenmurdick/config/blob/master/zsh/prompt.zsh
# See where git actually is and use that
GIT=$(which git)
git_dirty() {
st=$($GIT status --porcelain 2>/dev/null)
if [[ $st == "" ]]
then
echo "[%{$fg_bold[green]%}$(git_prompt_info)%{$reset_color%}]"
else
echo "[%{$fg_bold[red]%}$(git_prompt_info)%{$reset_color%}]"
fi
}
git_prompt_info () {
ref=$($GIT symbolic-ref HEAD 2>/dev/null) || return
# echo "(%{\e[0;33m%}${ref#refs/heads/}%{\e[0m%})"
echo "${ref#refs/heads/}"
}
unpushed () {
$GIT cherry -v @{upstream} 2>/dev/null
}
need_push () {
if [[ $(unpushed) == "" ]]
then
echo " "
else
echo " with %{$fg_bold[magenta]%}unpushed%{$reset_color%} "
fi
}
rb_prompt(){
if $(which rbenv &> /dev/null)
then
echo "[%{$fg_bold[yellow]%}ruby $(rbenv version | awk '{print $1}')%{$reset_color%}]"
else
echo ""
fi
}
directory_name(){
echo "%{$fg_bold[cyan]%}%~/%\%{$reset_color%}"
}
host_name () {
echo "%{$fg_bold[black]%}(%n@%m):%{$reset_color%}"
}
# gcp_prompt() {
# if $(which kubectl &> /dev/null)
# then
# echo "[%{$fg_bold[grey]%}k8s:$(kubectl config current-context)%{$reset_color%}]"
# else
# echo ""
# fi
# }
export PROMPT=$'\n$(host_name)$(directory_name) $(git_dirty)$(need_push)\n$ '
export PROMPT_EOL_MARK=""
| true |
7cc76df8269c2c828b933a3244fb0bff34061e5d | Shell | hilaryweller0/splitAdvection | /revision/makeFigs.sh | UTF-8 | 857 | 2.734375 | 3 | [] | no_license | #!/bin/bash -e
cd makeFigs
for file in stencils sbrc1 sbrc10 sbr_dx sbr_dt overMountains overMountains_dx deform_init deform deform_dx deform_dt; do
echo $file
lyx --export pdflatex -f $file
pdflatex $file
pdfcrop $file.pdf ../figures/$file.pdf
rm $file.pdf
gv ../figures/$file.pdf &
done
cd ..
cp -u HilarysGraphics/solidBodyRotationOnPlane_nonOrthog_50x50_analytic_constant_mesh.pdf figures
eps2png figures/deform.pdf
eps2png figures/sbrc10.pdf
eps2png figures/sbrc1.pdf
eps2png figures/overMountains.pdf
rm figures/deform.pdf figures/sbrc10.pdf figures/sbrc1.pdf figures/overMountains.pdf
#zip figures.zip stencil.pdf solidBodyRotationOnPlane_nonOrthog_50x50_analytic_constant_mesh.pdf sbrc1.png sbrc10.png sbr_dx.pdf sbr_dt.pdf overMountains.png overMountains_dx.pdf deform_init.pdf deform.png deform_dx.pdf deform_dt.pdf
| true |
4d5b1dccdfac62f747484c52991892cebf7864b8 | Shell | pseudomonas0000/dbnsfp-hg19 | /setup-script.sh | UTF-8 | 4,089 | 2.9375 | 3 | [] | no_license | #!/usr/bin/bash
# Download dbNSFP
# v4.1が最新版のようだが、X11が必要のよう?だったため、v4.0cをダウンロードする urlはdbNSFPサイトで要確認
# fieldsが多くファイルサイズが大きいため、v4.0じゃなくてもいいかもしれない (ただし、バグに注意)
# wgetでsoftgentics.ftpからダウンロード 一回目ログイン認証で失敗した 推定21GB、信大回線(300-500KB/s)で14時間 begin 11:40-01:30
wget ftp://dbnsfp:dbnsfp@dbnsfp.softgenetics.com/dbNSFP4.0c.zip
# ダウンロード同様時間がかかるため、スクリプトにしておいても良いかも
# Files:
# dbNSFP4.0c_variant.chr<#>.gz - gzipped dbNSFP variant database files by chromosomes
# dbNSFP4.0_gene.gz - gzipped dbNSFP gene database file
# dbNSFP4.0_gene.complete.gz - gzipped dbNSFP gene database file with complete interaction columns
# dbscSNV1.1.chr<#> - scSNV database v1.1 files by chromosomes
# dbNSFP4.0c.readme.txt - this file
# search_dbNSFP40c.class - companion Java program for searching dbNSFP4.0c
# search_dbNSFP40c.java - the source code of the java program
# LICENSE.txt - the license for using the source code
# search_dbNSFP40c.readme.pdf - README file for search_dbNSFP40c.class
# tryhg19.in - an example input file with hg19 genome positions
# tryhg18.in - an example input file with hg18 genome positions
# tryhg38.in - an example input file with hg38 genome positions
# try.vcf - an example of vcf input file
# unzip and uncompress
unzip dbNSFP4.0c.zip;ls -1 *chr*gz |xargs -I {} bgzip -d -@ 4 {}
# unzip dbNSFP4.0c.zip
# # bgzip uncompress 1時間かからない
# bash bgzip-uncompress.sh
# Building dbNSFP for hg19 using dbNSFP 3.X
cat dbNSFP${version}_variant.chr* | perl dbNSFP_sort.pl 7 8 > dbNSFP${version}_hg19.txt
# 途中でperlスクリプトが終了した Killed: 9 どうやらメモリ不足で勝手にプロセスが終了したっぽい (macOS メモリ8G)
# メモリ64Gで再挑戦する
# snpsift dbnsfpコマンドがどのようにアノテーションされるのか確認する
# chr1のみの dbNSFP.txtファイルを作成し、実行してみる
bgzip dbNSFP4.0c_hg19_chr1.txt -@ 4
tabix -s 1 -b 2 -e 2 dbNSFP4.0c_hg19_chr1.txt.gz
# エラー Failed to parse TBX_GENERIC, was wrong -p [type] used?
# おそらくchrやpositionに"."があるためと考えた
# perlスクリプトでchr列とposition列に"."のある行を削除して、compress and index.
# 必要なfieldsだけ選択してアノテーション
perl remove-hg38.pl dbNSFP4.0c_hg19_chr1.txt
bgzip -@ 4 dbNSFP4.0c_hg19_chr1.hg38-remove.txt
tabix -s 1 -b 2 -e 2 dbNSFP4.0c_hg19_chr1.hg38-remove.txt.gz
snpsift dbnsfp -db dbNSFP4.0c_hg19_chr1.hg38-remove.txt.gz ./test/8.3kjpn.hctd.vcf \
-f aaref,aaalt,genename,cds_strand,aapos,SIFT_score,SIFT_converted_rankscore,SIFT_pred,SIFT4G_score,\
LRT_score,LRT_converted_rankscore,LRT_pred,LRT_Omega,MutationTaster_score,MutationTaster_converted_rankscore,\
MutationTaster_pred,MutationTaster_model,MutationTaster_AAE,MutationAssessor_score,MutationAssessor_rankscore,MutationAssessor_pred,\
FATHMM_score,FATHMM_converted_rankscore,FATHMM_pred,PROVEAN_score,PROVEAN_converted_rankscore,PROVEAN_pred,\
MetaSVM_score,MetaSVM_rankscore,MetaSVM_pred,MetaLR_score,MetaLR_rankscore,MetaLR_pred,M-CAP_score,M-CAP_rankscore,M-CAP_pred,\
fathmm-MKL_coding_score,fathmm-MKL_coding_rankscore,fathmm-MKL_coding_pred,fathmm-MKL_coding_group,GERP++_NR,GERP++_RS,GERP++_RS_rankscore,\
phyloP100way_vertebrate,phyloP100way_vertebrate_rankscore,phyloP30way_mammalian,phyloP30way_mammalian_rankscore,\
phastCons100way_vertebrate,phastCons100way_vertebrate_rankscore,phastCons30way_mammalian,phastCons30way_mammalian_rankscore,\
SiPhy_29way_pi,SiPhy_29way_logOdds,SiPhy_29way_logOdds_rankscore,Interpro_domain \
>test-dbnsfp-anotated.vcf | true |
606aa0a4d61a46d1b33aa344c89b8e3b6fd607cb | Shell | hughesjr/sig-core-t_functional | /tests/p_libvirt/0-install_libvirt.sh | UTF-8 | 239 | 3.21875 | 3 | [] | no_license | #!/bin/bash
# Author: Athmane Madjoudj <athmanem@gmail.com>
if (t_GetArch | grep -qE 'aarch64')
then
echo "Package not included with AArch64, skipping"
exit 0
fi
t_Log "Running $0 - installing libvirt"
t_InstallPackage libvirt
service libvirtd restart
| true |
6928ffad2032a48bc767184532adc1f645d2dc2d | Shell | Ahnaf05/Skmer_Phylogeny_Scripts | /script_metin.sh | UTF-8 | 4,733 | 3.21875 | 3 | [] | no_license | #!/bin/bash
#declare -a gene_array
declare -a file_name
declare -A tmp_GTR
declare -A tmp
declare -A x_AC
declare -A x_AG
declare -A x_AT
declare -A x_CG
declare -A x_CT
declare -A x_GT
declare -A d_skmer
count=0
gtr_index_count=0
GTRfile="GTRMatrix.txt"
mkdir -p "save"
rm $GTRfile
read_matrix() {
local i=0
local line
local j
# Ignore the first 2 lines containing size of the matrix
while read -r line; do
j=0
# split on spaces
for v in `echo $line`; do
tmp[$i,$j]="$v"
j=$((j+1))
done
i=$((i+1))
done
}
rewrite()
{
local j=0
for i in `ls -1 ref_dir`
do
cp "save/$i" "ref_dir/$i"
j=$((j+1))
done
}
copy_matrix()
{
for((i=0;i<count;i++))
do
for((j=0;j<count;j++))
do
$1[$i,$j]=$tmp[$i,$j]
done
done
}
#read_matrix<ref-dist-mat.txt
#echo ${tmp[1,2]}
for filename in `ls -1 ref_dir`
do
#gene_array[count]=$(<"ref_dir/$filename")
cp "ref_dir/$filename" "save/$filename"
file_name[count]=$filename
#echo "${gene_array[count]}"
count=$((count+1))
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=0;i<=count;i++))
do
for((j=0;j<=count;j++))
do
d_skmer[$i,$j]=${tmp[$i,$j]}
done
done
####Calculate x_AC#####
### Replace G with T###
for i in `ls -1 ref_dir`
do
sed -i 's/G/T/g' "ref_dir/$i"
#cat "ref_dir/$i"
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=1;i<=count;i++))
do
for((j=1;j<=count;j++))
do
tmp1=${d_skmer[$i,$j]}
tmp2=${tmp[$i,$j]}
x_AC[$i,$j]=`awk "BEGIN {print $tmp1-$tmp2; exit}"`
done
done
rewrite
####Calculate x_AG#####
### Replace C with T###
for i in `ls -1 ref_dir`
do
sed -i 's/C/T/g' "ref_dir/$i"
#cat "ref_dir/$i"
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=1;i<=count;i++))
do
for((j=1;j<=count;j++))
do
tmp1=${d_skmer[$i,$j]}
tmp2=${tmp[$i,$j]}
x_AG[$i,$j]=`awk "BEGIN {print $tmp1-$tmp2; exit}"`
done
done
rewrite
####Calculate x_AT#####
### Replace C with G###
for i in `ls -1 ref_dir`
do
sed -i 's/C/G/g' "ref_dir/$i"
#cat "ref_dir/$i"
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=1;i<=count;i++))
do
for((j=1;j<=count;j++))
do
tmp1=${d_skmer[$i,$j]}
tmp2=${tmp[$i,$j]}
x_AT[$i,$j]=`awk "BEGIN {print $tmp1-$tmp2; exit}"`
done
done
rewrite
####Calculate x_CG#####
### Replace A with T###
for i in `ls -1 ref_dir`
do
sed -i 's/A/T/g' "ref_dir/$i"
#cat "ref_dir/$i"
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=1;i<=count;i++))
do
for((j=1;j<=count;j++))
do
tmp1=${d_skmer[$i,$j]}
tmp2=${tmp[$i,$j]}
x_CG[$i,$j]=`awk "BEGIN {print $tmp1-$tmp2; exit}"`
done
done
rewrite
####Calculate x_CT#####
### Replace A with G###
for i in `ls -1 ref_dir`
do
sed -i 's/A/G/g' "ref_dir/$i"
#cat "ref_dir/$i"
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=1;i<=count;i++))
do
for((j=1;j<=count;j++))
do
tmp1=${d_skmer[$i,$j]}
tmp2=${tmp[$i,$j]}
x_CT[$i,$j]=`awk "BEGIN {print $tmp1-$tmp2; exit}"`
done
done
rewrite
####Calculate x_GT#####
### Replace A with C###
for i in `ls -1 ref_dir`
do
sed -i 's/A/C/g' "ref_dir/$i"
#cat "ref_dir/$i"
done
skmer reference ref_dir
read_matrix<ref-dist-mat.txt
for((i=1;i<=count;i++))
do
for((j=1;j<=count;j++))
do
tmp1=${d_skmer[$i,$j]}
tmp2=${tmp[$i,$j]}
x_GT[$i,$j]=`awk "BEGIN {print $tmp1-$tmp2; exit}"`
done
done
rewrite
for((i=1;i<=count-1;i++))
do
for((j=i+1;j<=count;j++))
do
idx1=$((i-1))
idx2=$((j-1))
echo "GTR ARRAY $i (${file_name[idx1]}),$j (${file_name[idx2]})">>$GTRfile
echo -e "\n">>$GTRfile
echo -e "0 \c">>$GTRfile
echo -e "${x_AC[$i,$j]} \c">>$GTRfile
echo -e "${x_AG[$i,$j]} \c">>$GTRfile
echo -e "${x_AT[$i,$j]} \c">>$GTRfile
tmp_GTR[0,0]=0
tmp_GTR[0,1]=${x_AC[$i,$j]}
tmp_GTR[0,2]=${x_AG[$i,$j]}
tmp_GTR[0,3]=${x_AT[$i,$j]}
echo -e "\n">>$GTRfile
echo -e "${x_AC[$i,$j]} \c">>$GTRfile
echo -e "0 \c">>$GTRfile
echo -e "${x_CG[$i,$j]} \c">>$GTRfile
echo -e "${x_CT[$i,$j]} \c">>$GTRfile
tmp_GTR[1,0]=${x_AC[$i,$j]}
tmp_GTR[1,1]=0
tmp_GTR[1,2]=${x_CG[$i,$j]}
tmp_GTR[1,3]=${x_CT[$i,$j]}
echo -e "\n">>$GTRfile
echo -e "${x_AG[$i,$j]} \c">>$GTRfile
echo -e "${x_CG[$i,$j]} \c">>$GTRfile
echo -e "0 \c">>$GTRfile
echo -e "${x_GT[$i,$j]} \c">>$GTRfile
tmp_GTR[2,0]=${x_AG[$i,$j]}
tmp_GTR[2,1]=${x_CG[$i,$j]}
tmp_GTR[2,2]=0
tmp_GTR[2,3]=${x_GT[$i,$j]}
echo -e "\n">>$GTRfile
echo -e "${x_AT[$i,$j]} \c">>$GTRfile
echo -e "${x_CT[$i,$j]} \c">>$GTRfile
echo -e "${x_GT[$i,$j]} \c">>$GTRfile
echo -e "0 \c">>$GTRfile
tmp_GTR[3,0]=${x_AT[$i,$j]}
tmp_GTR[3,1]=${x_CT[$i,$j]}
tmp_GTR[3,2]=${x_GT[$i,$j]}
tmp_GTR[3,3]=0
echo -e "\n">>$GTRfile
gtr_index_count=$((gtr_index_count+1))
done
done
| true |
ee2bde34ad03bb642a9931077530bf43395eea79 | Shell | DaraUng/NTI-320 | /build-a | UTF-8 | 1,628 | 3.140625 | 3 | [] | no_license | #!/bin/bash
yum -y install rpm-build make gcc git
# Gcc is a compiler system produced by the GNU Project supporting various programming languages.
mkdir -p /root/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
#Install the rpmbuild and making parents as follow. Build,rpms,sources,specs, srpms
cd ~/
echo '%_topdir %(echo $HOME)/rpmbuild' > ~/.rpmmacros #Setup the path and taking rpmbuild into the rpmmacros.
cd ~/rpmbuild/SOURCES
git clone https://github.com/nic-instruction/NTI-320.git #Get clone from the nicole githup as the reserouce.(Might need to change for my own source)
rm -rf nrpe/
#remove nrpe
cp NTI-320/rpm-info/hello_world_from_source/helloworld-0.1.tar.gz .
cp NTI-320/rpm-info/hello_world_from_source/helloworld.sh .
cp NTI-320/rpm-info/hello_world_from_source/hello.spec .
mv hello.spec ../SPECS/
rpmbuild -v -bb --clean SPECS/hello.spec
#line 19-22 copy both files and move them into ../SPECS/ and after remove the file.
#copy the helloworld-0.1.tar.gz, helloworld.sh, hello. spec and move it to specs.
#ls -l RPMS/x86_64/helloworld-0.1-1.el7.x86_64.rpm to see if the helloworld file exist.
yum -y install RPMS/x86_64/helloworld-0.1-1.el7.x86_64.rpm
#install helloworld
#ls -l /etc/yum.repos.d/
#ls to see the repos
vim /etc/yum.repos.d/epel.repo
cp /root/rpmbuild/RPMS/x86_64/helloworld-0.1-1.el7.x86_64.rpm /home/dara_ung/
chown dara_ung /home/dara_ung/helloworld-0.1-1.el7.x86_64.rpm
#compy the file x86_64.rmp and change the own for the files to dara_ung.
| true |
0b7a99265133c3ae9018d90d35d7618c4cf7c8b4 | Shell | jmmcatee/redoctober | /testdata/ro-ssh-agent-demo.sh | UTF-8 | 1,315 | 2.84375 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | export RO_USER=alice
export RO_PASS=alice
go build github.com/cloudflare/redoctober/
go build github.com/cloudflare/redoctober/cmd/ro/
# Start Papa RO using a systemd socket (On dedicated terminal)
systemd-socket-activate -l 443 \
./redoctober -systemdfds -vaultpath testdata/diskrecord.json \
-certs testdata/server.crt -keys testdata/server.pem
# Add admin and users (See README.md)
# Sign on enough delegates
curl --cacert testdata/server.crt https://localhost:443/delegate \
-d '{"Name":"alice","Password":"alice","Time":"2h34m","Uses":10}'
curl --cacert testdata/server.crt https://localhost:443/delegate \
-d '{"Name":"bob","Password":"bob","Time":"2h34m","Uses":10}'
# Consign a private key to Papa RO
./ro -server localhost:443 -ca testdata/server.crt \
-minUsers 2 -owners alice,bob -usages ssh-sign-with \
-in id_ed25519 -out id_ed25519.encrypted encrypt
# Start RO SSH Agent (On dedicated terminal)
./ro -server localhost:443 -ca testdata/server.crt ssh-agent
# Set the SSH_AUTH_SOCK Environment Variable
export SSH_AUTH_SOCK=/tmp/ro_ssh_[random]/roagent.sock
# Add the encrypted key to the RO SSH Agent
./ro -in testdata/ssh_key.encrypted -pubkey testdata/ssh_key.pub ssh-add
# List public keys available through RO SSH Agent
ssh-add -L
# Profit!
| true |
d86e78b89e768ea58a3b8f6b86296b2785f3b5ed | Shell | Jay4C/Bash_Scripts | /install_openssh.sh | UTF-8 | 721 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# follow these steps : https://devconnected.com/how-to-install-and-enable-ssh-server-on-ubuntu-20-04/
sudo -l
groups
ssh -V
sudo apt-get update
sudo apt-get install openssh-server
sudo systemctl status sshd
netstat -tulpn | grep 22^
sudo ufw allow ssh
sudo ufw status
sudo systemctl list-unit-files | grep enabled | grep ssh
sudo systemctl enable ssh
ll /etc/ssh/
sudo nano /etc/ssh/sshd_config
sudo systemctl restart sshd
sudo systemctl status sshd
netstat -tulpn | grep 2222
# change the port, the username, the ip address
ssh -p <port> <username>@<ip_address>
sudo ifconfig
# change the username
ssh -p 2222 <user>@127.0.0.1
logout
sudo systemctl stop sshd
sudo systemctl status sshd | true |
b20b84a78e88eb60e77d80dd9ca6b6e6de57c160 | Shell | despegar/check-cis | /ubuntu-18.04/4_2_2_1.sh | UTF-8 | 314 | 2.828125 | 3 | [] | no_license | cis_test_name="Ensure journald is configured to send logs to rsyslog (Scored)"
cis_test_pa=(server workstation)
cis_test_spl=1
cis_test_wpl=1
function cis_test_run()
{
cmd=$(grep -E -i "^\s*ForwardToSyslog" /etc/systemd/journald.conf | grep "ForwardToSyslog=yes")
[ -z "$cmd" ] && return 1
return 0
} | true |
151229709fc2002f6744422834fe852ca6f027c5 | Shell | ppc64le/build-scripts | /g/go-control-plane/go-control-plane_UBI_8.5.sh | UTF-8 | 2,004 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | # -----------------------------------------------------------------------------
#
# Package : github.com/envoyproxy/go-control-plane
# Version : v0.9.0, v0.9.7
# Source repo : https://github.com/envoyproxy/go-control-plane.git
# Tested on : UBI 8.5
# Language : GO
# Travis-Check : True
# Script License: Apache License, Version 2 or later
# Maintainer : Amit Baheti (aramswar@in.ibm.com)
#
# Disclaimer : This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
set -e
PACKAGE_NAME=github.com/envoyproxy/go-control-plane
#Setting the default version v0.9.7
PACKAGE_VERSION=${1:-v0.9.7}
PACKAGE_PATH=https://github.com/envoyproxy/go-control-plane.git
#Install golang if not found
if ! command -v go &> /dev/null
then
yum install -y golang
fi
mkdir -p /root/output
OS_NAME=$(cat /etc/os-release | grep ^PRETTY_NAME | cut -d= -f2)
export GOPATH="$(go env GOPATH)"
export PATH=$GOPATH/bin:$PATH
export GO111MODULE=on
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION"
if go get -d -t $PACKAGE_NAME@$PACKAGE_VERSION; then
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME@$PACKAGE_VERSION)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
[ ! -f go.mod ] && go mod init
if ! go test ./...; then
exit 1
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /root/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /root/output/version_tracker
exit 0
fi
fi
| true |
4b3a30aa73132a835bfc2f8162fb10a477a5f8a1 | Shell | bbxyard/bbxyard | /yard/grammar/shell/trap/trap_err.sh | UTF-8 | 616 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# trap error
function error_handler() {
echo "${LINENO}/${FUNCNAME[0]} Hi, I am the error handler and lasterror is: $?"
}
function func1() {
echo "$(date +'%Y-%m-%d %H:%M:%S') ${FUNCNAME[0]} hallo $1"
return $1
}
function func2() {
echo "*** $(date +'%Y-%m-%d %H:%M:%S') ${FUNCNAME[0]} hallo $1"
echo " ==> try to create dir and raise an error"
mkdir dir1/dir2
RET=$?
echo " ==> error must be printed. 实质处理,可以中断处理"
echo "*** ${FUNCNAME[0]} done ***"
return $RET
}
trap error_handler ERR
func1 0
func1 2
func1 22
func2
func1 0
func1 511
# mkdir /bin/haha
| true |
cbeb024706bec7f69baff9b7963ac87aad61a1ef | Shell | mateuszkiebala/master_thesis | /sources/src/spark/src/test/distributed/semi_join/run_semi_join.sh | UTF-8 | 1,696 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
OKGREEN='\033[0;32m'
WARNING='\033[0;31m'
ORANGE='\033[0;35m'
CYAN='\033[1;36m'
COLOR_OFF='\033[0m'
echo "===== Creating semi_join user on HDFS ====="
hdfs dfs -mkdir -p /user/semi_join
USER_PATH="/user/semi_join"
INPUT_R_TEST="input_R"
INPUT_T_TEST="input_T"
INPUT_R_HDFS="$USER_PATH/$INPUT_R_TEST"
INPUT_T_HDFS="$USER_PATH/$INPUT_T_TEST"
echo "===== Creating input directory on HDFS ====="
hdfs dfs -rm -r $INPUT_R_HDFS
hdfs dfs -rm -r $INPUT_T_HDFS
hdfs dfs -mkdir $INPUT_R_HDFS
hdfs dfs -mkdir $INPUT_T_HDFS
echo "===== Copying test input directories to HDFS ====="
hdfs dfs -put $INPUT_R_TEST/* $INPUT_R_HDFS
hdfs dfs -put $INPUT_T_TEST/* $INPUT_T_HDFS
HDFS="hdfs://192.168.0.199:9000"
OUTPUT_HDFS="$USER_PATH/output"
hdfs dfs -rm -r $OUTPUT_HDFS
spark-submit --class minimal_algorithms.spark.examples.semi_join.ExampleSemiJoin --master yarn ../../../../target/spark-1.0.0-SNAPSHOT.jar 10 "$HDFS/$INPUT_R_HDFS" "$HDFS/$INPUT_T_HDFS" "$HDFS/$OUTPUT_HDFS"
run() {
LOGS="result"
CORRECT_OUT_DIR="output"
mkdir -p tmp
rm -rf "tmp/$CORRECT_OUT_DIR"
hdfs dfs -get $OUTPUT_HDFS tmp
rm -rf $LOGS
PASSED=0
ALL=0
for file in tmp/$CORRECT_OUT_DIR/part-*
do
correct_output="$CORRECT_OUT_DIR/output_$ALL.txt"
printf "$file <-> $correct_output" >> $LOGS
if diff -Bb -c $file $correct_output >/dev/null ; then
PASSED=$((PASSED+1))
printf "${OKGREEN} OK ${COLOR_OFF}\n" >> $LOGS
else
printf "${WARNING} FAIL ${COLOR_OFF}\n" >> $LOGS
fi
ALL=$((ALL+1))
done
printf "RESULT: ${CYAN}$PASSED${COLOR_OFF} / ${ORANGE}$ALL${COLOR_OFF}\n" >> $LOGS
}
run
| true |
78f0e1a5269e6f66b65dd5a93952b97f0789b2d1 | Shell | daiaji/filebrowser-get | /get.sh | UTF-8 | 856 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
# Bash3 Boilerplate. Copyright (c) 2014, kvz.io
set -o errexit
set -o pipefail
set -o nounset
if [[ $(id -u) -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
BINNAME=filebrowser
BINPATH=/usr/local/bin
REPOSITORYNAME=filebrowser/filebrowser
OSARCH=$(uname -m)
case $OSARCH in
x86_64)
BINTAG=linux-amd64
;;
i*86)
BINTAG=linux-386
;;
arm64|aarch64)
BINTAG=linux-arm64
;;
arm*)
BINTAG=linux-arm
;;
*)
echo "unsupported OSARCH: $OSARCH"
exit 1
;;
esac
wget $WGETPROXY -qO- https://api.github.com/repos/$REPOSITORYNAME/releases/latest \
| grep browser_download_url | grep "$BINTAG" | cut -d '"' -f 4 \
| wget $WGETPROXY --no-verbose -i- -O- | tar -xzf - $BINNAME -C $BINPATH
chmod 0755 $BINPATH/$BINNAME
| true |
46afb8f2d369a909f921b623bb37f2f0c11a4d13 | Shell | yzzhanga/thingsboard | /msa/tb/docker-cassandra/start-db.sh | UTF-8 | 1,627 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright © 2016-2020 The Thingsboard Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
firstlaunch=${DATA_FOLDER}/.firstlaunch
export PG_CTL=$(find /usr/lib/postgresql/ -name pg_ctl)
if [ ! -d ${PGDATA} ]; then
mkdir -p ${PGDATA}
chown -R postgres:postgres ${PGDATA}
su postgres -c '${PG_CTL} initdb -U postgres'
fi
su postgres -c '${PG_CTL} -l /var/log/postgres/postgres.log -w start'
if [ ! -f ${firstlaunch} ]; then
su postgres -c 'psql -U postgres -d postgres -c "CREATE DATABASE thingsboard"'
fi
cassandra_data_dir=${CASSANDRA_DATA}
cassandra_data_link=/var/lib/cassandra
if [ ! -L ${cassandra_data_link} ]; then
if [ -d ${cassandra_data_link} ]; then
rm -rf ${cassandra_data_link}
fi
if [ ! -d ${cassandra_data_dir} ]; then
mkdir -p ${cassandra_data_dir}
chown -R cassandra:cassandra ${cassandra_data_dir}
fi
ln -s ${cassandra_data_dir} ${cassandra_data_link}
fi
service cassandra start
until nmap $CASSANDRA_HOST -p $CASSANDRA_PORT | grep "$CASSANDRA_PORT/tcp open"
do
echo "Wait for cassandra db to start..."
sleep 5
done
| true |
9f7db1b09491b88d24a6539a3063e73bd9672de9 | Shell | Freccia/fastone | /script/disk/benchmark_disk.sh | UTF-8 | 916 | 3.53125 | 4 | [] | no_license | #!/bin/bash
if [ -z "$1" ];then
echo "usage example: $0 /dev/sda"
exit 1
fi
dev="$1"
if [ ! -b "$dev" ];then
echo "invalide block device $dev"
exit 1
fi
#set -x
device=$(basename ${dev})
bsm=32
count=1
#sample=50
sample=100
partsize=$(cat /proc/partitions |awk '{print $4,$3}'|egrep "^${device} [0-9]+"|awk '{print $2}')
pm=$(echo "${partsize} / 1024" |bc )
sk=$(echo "$pm / ${bsm} / $sample" |bc)
echo "Device: ${device}"
echo "Part-size: ${partsize} bytes"
echo "PM: ${pm}"
echo "BS: ${bsm}M"
echo "Skip: ${sk}"
echo 3 > /proc/sys/vm/drop_caches
dd --help > /dev/null 2>&1
sleep 2
skip=0
set +x
for i in $(seq 0 $sample)
do
echo -n "Zone ${i} $(echo ${skip} |bc)-$(echo ${skip}+${bsm} |bc) "
dd if=${dev} of=/dev/null iflag=nocache bs=${bsm}M count=${count} skip=${skip} 2>&1 |tail -n 1 |awk -F, '{print $3}'
sleep 0.05
skip=$(( skip + sk ))
done
| true |
a8b82414dc6db194e8ff322eabb15c0ec1c5abbf | Shell | brocaar/lora-channel-manager | /packaging/package.sh | UTF-8 | 2,063 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# map GOARCH to ARCH
case $GOARCH in
'amd64') ARCH="x86_64"
;;
'386') ARCH="i386"
;;
'arm') ARCH="armhf"
;;
*)
echo "Unknown target $GOARCH"
exit 1
;;
esac
# validate TARGET
case $TARGET in
'deb') DEB_WANTED="deb"
;;
*)
echo "Unknown target distribution $TARGET"
exit 1
;;
esac
NAME=lora-channel-manager
BIN_DIR=/usr/bin
SCRIPT_DIR=/usr/lib/$NAME/scripts
TMP_WORK_DIR=`mktemp -d`
LOGROTATE_DIR=/etc/logrotate.d
POSTINSTALL_SCRIPT=$TARGET/post-install.sh
PREINSTALL_SCRIPT=$TARGET/pre-install.sh
POSTUNINSTALL_SCRIPT=$TARGET/post-uninstall.sh
LICENSE=MIT
VERSION=`git describe --always`
URL=https://docs.loraserver.io/$NAME/
MAINTAINER=info@brocaar.com
VENDOR="LoRa Server project"
DESCRIPTION="LoRa Channel Manager fetches channel configuration from LoRa Server and updates & restarts the packet-forwarder"
DIST_FILE_PATH="../dist/tar/${NAME//-/_}_${VERSION}_linux_${GOARCH}.tar.gz"
DEB_FILE_PATH="../dist/deb"
COMMON_FPM_ARGS="\
--log error \
-C $TMP_WORK_DIR \
--url $URL \
--license $LICENSE \
--maintainer $MAINTAINER \
--after-install $POSTINSTALL_SCRIPT \
--before-install $PREINSTALL_SCRIPT \
--after-remove $POSTUNINSTALL_SCRIPT \
--architecture $ARCH \
--name $NAME \
--version $VERSION"
if [ ! -f $DIST_FILE_PATH ]; then
echo "Dist file $DIST_FILE_PATH does not exist"
exit 1
fi
# make temp dirs
mkdir -p $TMP_WORK_DIR/$BIN_DIR
mkdir -p $TMP_WORK_DIR/$SCRIPT_DIR
mkdir -p $TMP_WORK_DIR/$LOGROTATE_DIR
# unpack pre-compiled binary
tar -zxf $DIST_FILE_PATH -C $TMP_WORK_DIR/$BIN_DIR
# copy scripts
cp $TARGET/init.sh $TMP_WORK_DIR/$SCRIPT_DIR
cp $TARGET/$NAME.service $TMP_WORK_DIR/$SCRIPT_DIR
cp $TARGET/default $TMP_WORK_DIR/$SCRIPT_DIR
cp $TARGET/logrotate $TMP_WORK_DIR/$LOGROTATE_DIR/$NAME
if [ -n "$DEB_WANTED" ]; then
fpm -s dir -t deb $COMMON_FPM_ARGS --vendor "$VENDOR" --description "$DESCRIPTION" .
if [ $? -ne 0 ]; then
echo "Failed to create Debian package -- aborting."
exit 1
fi
mkdir -p ../dist/deb
mv *.deb ../dist/deb
echo "Debian package created successfully."
fi
| true |
6b10224f090e62403818e11b03917803b673c589 | Shell | sevki/9hd | /bin/osxvers | UTF-8 | 177 | 2.984375 | 3 | [
"LPL-1.02",
"dtoa"
] | permissive | #!/bin/sh
u=`uname`
case "$u" in
Darwin)
sw_vers | awk '$1 == "ProductVersion:" {print $2}' | awk -F. '{printf("CFLAGS=$CFLAGS -DOSX_VERSION=%d%02d%02d\n", $1, $2, $3)}'
esac
| true |
cc812dfce4a07bf44c34955954a4f8a7ded4f011 | Shell | vanyasem/Halium-Arch | /drihybris-git/PKGBUILD | UTF-8 | 979 | 2.578125 | 3 | [] | no_license | # Maintainer: Ivan Semkin (ivan at semkin dot ru)
pkgname=drihybris-git
_pkgname=drihybris
pkgver=r1.fbaf21e
pkgrel=1
pkgdesc='DRIHYBRIS extension (based on DRI3) for buffer sharing on libhybris-based adaptations'
url='https://github.com/NotKit/drihybris'
arch=(i686 x86_64 armv7h aarch64)
license=()
conflicts=(drihybris)
provides=(drihybris)
depends=(xorg-server xproto fontsproto randrproto renderproto)
makedepends=(xorg-server-devel git)
groups=(xorg-drivers xorg)
source=('git+https://github.com/NotKit/drihybris.git')
sha256sums=('SKIP')
pkgver() {
cd ${_pkgname}
echo "r$(git rev-list --count HEAD).$(git describe --always)"
}
prepare() {
cd ${_pkgname}
NOCONFIGURE=1 ./autogen.sh
}
build() {
cd ${_pkgname}
export CPLUS_INCLUDE_PATH=/opt/android/include:/opt/android/hybris/include
export C_INCLUDE_PATH=/opt/android/include:/opt/android/hybris/include
./configure --prefix=/usr
make
}
package() {
cd ${_pkgname}
make DESTDIR="${pkgdir}" install
}
| true |
dfee52cc1312f66033f6d7229c08946c81f02478 | Shell | Twistedben/Ossemble-Demo | /entrypoint.sh | UTF-8 | 395 | 2.703125 | 3 | [] | no_license | #!/bin/sh
# https://stackoverflow.com/a/38732187/1935918
set -e
# Remove a potentially pre-existing server.pid for Rails.
rm -f /tmp/pids/server.pid
#RAILS_ENV=development NODE_ENV=development bundle exec rails assets:precompile
#bundle exec rake db:schema:load 2>/dev/null || bundle exec rake db:setup
# Then exec the container's main process (what's set as CMD in the Dockerfile).
exec "$@" | true |
8782a318de58daed56d2a40cb7c0b595fc8d6abc | Shell | gangjian0917/home | /hotplug.sh | UTF-8 | 609 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# Start conky on both displays
function connect() {
killall conky
conky -d -c /home/raphael/.conky/main-monitor.rc
conky -d -c /home/raphael/.conky/external-monitor.rc
logger 'Conky restarted in dual display mode'
echo 'Conky restarted in dual display mode'
}
# Start conky on main display
function disconnect() {
killall conky
conky -d -c /home/raphael/.conky/main-monitor.rc
logger 'Conky restarted in single display mode'
echo 'Conky restarted in single display mode'
}
DP=$(find /sys/class/drm/*/status | grep DP-2)
if [ x$DP == "x" ]
then
disconnect
else
connect
fi
| true |
8a9190cd669e6ab01fba2ecfe4e2c49b3d45a043 | Shell | uk-gov-mirror/UKHomeOffice.docker-mysql-client | /docker-entrypoint.sh | UTF-8 | 2,768 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
export DEFAULT_PW=${DEFAULT_PW:-changeme}
export ROOT_PASS_SECRET=${ROOT_PASS_SECRET:-/etc/db/db-root-pw}
export APP_DB_NAME_SECRET=${APP_DB_NAME_SECRET:-/etc/db/db-name}
export APP_DB_USER_SECRET=${APP_DB_USER_SECRET:-/etc/db/db-username}
export APP_DB_PASS_SECRET=${APP_DB_PASS_SECRET:-/etc/db/db-password}
export ENABLE_SSL=${ENABLE_SSL:-TRUE}
export DROP_DB=${DROP_DB:-FALSE}
export MYSQL_PORT="${MYSQL_PORT:-3306}"
if [ "${ENABLE_SSL}" == "TRUE" ]; then
export SSL_OPTS="--ssl=true --ssl-ca=/root/rds-combined-ca-bundle.pem"
export REQUIRE_SSL="REQUIRE SSL"
fi
function make_secret_from_env {
file=$1
var=$2
if [ ! -f "${file}" ]; then
echo "${var}">"${file}"
fi
}
function check_root {
echo "Check default PW access..."
err_txt=$(echo "SELECT 1+1;" | mysql --host=${MYSQL_HOST} --port=${MYSQL_PORT} ${SSL_OPTS} 2>/dev/stdout)
if [ $? -ne 0 ]; then
echo $err_txt | grep "ERROR 1045"
if [ $? -eq 0 ]; then
echo "Detected, Access denied error, now attempting reset FROM default password..."
echo "SET PASSWORD FOR 'root'@'%' = PASSWORD('$(cat ${ROOT_PASS_SECRET} )');" | \
mysql --host=${MYSQL_HOST} --port=${MYSQL_PORT} --user="root" --password="${DEFAULT_PW}" ${SSL_OPTS}
if [ $? -ne 0 ]; then
echo "ERROR resetting default password. Home time..."
exit 1
fi
fi
fi
}
mkdir -p /etc/db
make_secret_from_env "${ROOT_PASS_SECRET}" "${ROOT_PASS}"
make_secret_from_env "${APP_DB_NAME_SECRET}" "${APP_DB_NAME}"
make_secret_from_env "${APP_DB_USER_SECRET}" "${APP_DB_USER}"
make_secret_from_env "${APP_DB_PASS_SECRET}" "${APP_DB_PASS}"
MYSQL_HOST=$(eval echo ${MYSQL_HOST})
MYSQL_PORT=$(eval echo ${MYSQL_PORT})
echo "Downloading CA cert for mysql access"
curl -fail http://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem -o /root/rds-combined-ca-bundle.pem
# Allow for no passwords when running mysql as root...
echo "[client]
user=root
password='$(cat ${ROOT_PASS_SECRET})'
">~/.my.cnf
set +e
check_root
set -e
refresh_sql=/tmp/refresh_users_and_db.sql
DB_NAMES=$(cat ${APP_DB_NAME_SECRET})
IFS=',' read -a DB_ARRAY <<< "$DB_NAMES"
for DB_NAME in "${DB_ARRAY[@]}"; do
if [ "${DROP_DB}" == "TRUE" ]; then
DROP_STATEMENT="DROP DATABASE IF EXISTS ${DB_NAME};"
fi
cat >> "${refresh_sql}" <<-EOSQL
${DROP_STATEMENT}
CREATE DATABASE IF NOT EXISTS ${DB_NAME};
grant all on ${DB_NAME}.* to
'$(cat ${APP_DB_USER_SECRET})'@'%' identified by '$(cat ${APP_DB_PASS_SECRET})' ${REQUIRE_SSL};
EOSQL
done
cat >> "${refresh_sql}" <<-EOSQL2
GRANT USAGE ON *.* TO 'root'@'%' ${REQUIRE_SSL};
FLUSH PRIVILEGES;
EOSQL2
echo "Update / create any database users..."
mysql --host=${MYSQL_HOST} --port=${MYSQL_PORT} ${SSL_OPTS} < ${refresh_sql}
| true |
3202d6c4f9fac21ad73536b49c88ed8952e617c4 | Shell | IgorMinar/angular | /.circleci/env-helpers.inc.sh | UTF-8 | 1,451 | 4.21875 | 4 | [
"MIT"
] | permissive | ####################################################################################################
# Helpers for defining environment variables for CircleCI.
#
# In CircleCI, each step runs in a new shell. The way to share ENV variables across steps is to
# export them from `$BASH_ENV`, which is automatically sourced at the beginning of every step (for
# the default `bash` shell).
#
# See also https://circleci.com/docs/2.0/env-vars/#using-bash_env-to-set-environment-variables.
####################################################################################################
# Set and print an environment variable.
#
# Use this function for setting environment variables that are public, i.e. it is OK for them to be
# visible to anyone through the CI logs.
#
# Usage: `setPublicVar <name> <value>`
function setPublicVar() {
setSecretVar $1 "$2";
echo "$1=$2";
}
# Set (without printing) an environment variable.
#
# Use this function for setting environment variables that are secret, i.e. should not be visible to
# everyone through the CI logs.
#
# Usage: `setSecretVar <name> <value>`
function setSecretVar() {
# WARNING: Secrets (e.g. passwords, access tokens) should NOT be printed.
# (Keep original shell options to restore at the end.)
local -r originalShellOptions=$(set +o);
set +x -eu -o pipefail;
echo "export $1=\"${2:-}\";" >> $BASH_ENV;
# Restore original shell options.
eval "$originalShellOptions";
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.