blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a69370138dc762b20ae5a3262a060bd49f29f5b7 | Shell | mateusza-szkolenia/2021-05-alx-linux | /skrypt16.sh | UTF-8 | 194 | 2.875 | 3 | [] | no_license | #!/bin/bash
read -p 'Podaj imie: ' imie
if test "$imie" == "Mateusz"
then
echo "Witaj nauczycielu"
elif test "$imie" == ""
then
echo "Witaj nieznajomy"
else
echo "Witaj $imie"
fi
| true |
b154164a2ddc27dff229a00eca7a7a40d70f8b92 | Shell | paul-b-manning/epiphany | /core/core/src/scripts/azure/run_build.sh | UTF-8 | 9,183 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2019 ABB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Summary:
# This script builds out the required terraform, scripts, json, yaml, etc. files and executes
# them. The goal is to create the required Azure infrastructure using Terraform, collect
# cluster information using 'az' where needed and finally build out the required Ansible
# inventory and other files required to bootstrap the environment.
#
# The bootstrap data files are used regardless of platform. If on-premise the customer
# provides IPs, gateways, firewall data, MAC Addresses (if needed) and more. This data is then
# used to create the bootstrap data files and then Ansible loads all of the required files,
# executes any scripts required and preps for the next layer.
# NOTE: Other scripts may call this script so do not change parameters unless you know the impact!
# MUST HAVE SUDO RIGHTS FOR YOUR ACCOUNT! DON'T USE 'sudo gen_helper.sh' but have sudo rights.
# Can use 'basename' to extract name from parameter 1 and assign it to parameter 2. This can be done
# in a higher level helper script if desired.
# run_build.sh just makes it easier to call gen_templates_azure.sh for Azure
# Exit immediately if something goes wrong.
set -e
export REPO_ROOT=$(git rev-parse --show-toplevel)/core
COMMON_SCRIPTS_BASE=core/src/scripts/common
COMMON_TEMPLATES_BASE=core/src/templates/common
SCRIPTS_BASE=core/src/scripts/azure
TEMPLATES_BASE=core/src/templates/azure
DATA_DIR=$1
OUTPUT_DIR=$2
EPIPHANY_DATA_DIR=$3
DATA=$4
# Set in var name in the event we want to override or allow for passing in a different name.
if [[ -z $DATA ]]; then
DATA=data.yaml
fi
source $REPO_ROOT/$COMMON_SCRIPTS_BASE/base_colors.sh
if [[ -z $DATA_DIR ]]; then
echo_red '====> ERROR: MUST specify a valid DATA directory! <===='
exit 1
fi
if [[ -z $OUTPUT_DIR ]]; then
echo_red '====> ERROR: MUST specify a valid OUTPUT directory! <===='
exit 1
fi
# This extracts the last portion of the path or the word if no path. This is the final Terraform file name.
OUTPUT_TF_FILE=${DATA_DIR##*/}
if [[ -z $OUTPUT_TF_FILE ]]; then
echo_red '====> ERROR: MUST specify the Terraform output file name! <===='
exit 1
fi
# Make sure the output directory exists
mkdir -p $OUTPUT_DIR
export TF_IN_AUTOMATION=1
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/version.sh.j2 -o $OUTPUT_DIR/version.sh
chmod +x $OUTPUT_DIR/version.sh
source $OUTPUT_DIR/version.sh
echo
echo_yellow '====> Creating gen_sp.sh...'
# If not enabled then gen_sp.sh will only be a stub
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/gen_sp.sh.j2 -o $OUTPUT_DIR/gen_sp.sh
chmod +x $OUTPUT_DIR/gen_sp.sh
# Generate the script to delete the resource group
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/del_rg.sh.j2 -o $OUTPUT_DIR/del_rg.sh
chmod +x $OUTPUT_DIR/del_rg.sh
# NOTE:
# IF you want to delete the resource group and service principal then do so in the following order:
# 1. ./del_sp.sh
# 2. ./del_rg.sh
# Verify the resource group has been deleted or is still being purged via the Azure Portal. You can check for the existence of
# the resource group via Azure CLI but we don't do that.
echo_yellow '====> Check for service principal being enabled...'
az logout &2>/dev/null || echo "No session to remove.";
if [[ -f $OUTPUT_DIR/az_ad_sp.json ]]; then
echo "File az_ad_sp.json exists."
cat $OUTPUT_DIR/az_ad_sp.json | grep 'appId'
if [[ $? -ne 0 ]]; then
echo "File corrupted. Removing. Please login manually."
rm -f $OUTPUT_DIR/az_ad_sp.json
else
echo "Logging with Service Principal from from az_ad_sp.json."
SP_CLIENT_ID=$(grep -i appId $OUTPUT_DIR/az_ad_sp.json | awk '{print $2}' | tr -d "\"" | tr -d ",")
SP_CLIENT_SECRET=$(grep -i password $OUTPUT_DIR/az_ad_sp.json | awk '{print $2}' | tr -d "\"" | tr -d ",")
SP_TENANT_ID=$(grep -i tenant $OUTPUT_DIR/az_ad_sp.json | awk '{print $2}' | tr -d "\"" | tr -d ",")
az login --service-principal -u $SP_CLIENT_ID -p $SP_CLIENT_SECRET --tenant $SP_TENANT_ID
fi
fi
# Make sure to force a login with enough rights to create service principals. Could create a process later that
# creates certs to use for all 'az' commands...
# testing...
if [[ ! -f $OUTPUT_DIR/az_ad_sp.json ]]; then
if [[ -f $OUTPUT_DIR/../epiphan_azure_cert.pem ]]; then
#WIP az login --service-principal ...
echo 'wip'
else
# For security - login each time unless a service principal is used
az login
fi
fi
echo "Running gen_sp.sh script."
$OUTPUT_DIR/gen_sp.sh $DATA_DIR $OUTPUT_DIR $OUTPUT_TF_FILE $DATA
echo "Logging to Azure."
$OUTPUT_DIR/login.sh $DATA_DIR $OUTPUT_DIR $OUTPUT_TF_FILE $DATA
source $OUTPUT_DIR/env.sh $DATA_DIR
# NOTE: Check for terraform backends first! If using backend then create resource group, storage account and container first.
# Then get key1 for access_key, generate config.tfvars, backend.tf and run 'terraform init -backend-conf=config.tfvars -backend-config="access_key=<value>"'
# Create resources now
echo_yellow '====> Creating base.tf...'
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/base.tf.j2 -o $OUTPUT_DIR/base.tf
if [[ ! -f $OUTPUT_DIR/terraform.init ]]; then
# This means Terraform is starting over so we need to check ./terraform directory for terraform.tfstate and remove it
if [[ -f $OUTPUT_DIR/.terraform/terraform.tfstate ]]; then
rm -f $OUTPUT_DIR/.terraform/terraform.tfstate
fi
(cd $OUTPUT_DIR && terraform init $OUTPUT_DIR)
# NOTE:
# If you receive an error from Terraform like the following:
# Error: Error running plan: 1 error(s) occurred:
# * provider.azurerm: Unable to list provider registration status
#
# This is most likely due to NOT being logged into Azure. Call `az login` and it will give you a device key. Copy that and load 'https://microsoft.com/devicelogin' and then paste it into the prompt and apply. It will then log you in and give your CLI a token
# to use for a short time.
# Create the resources
(cd $OUTPUT_DIR && terraform apply -auto-approve $OUTPUT_DIR)
echo "# Terraform init has been completed - ONLY remove this file if you want it to run again!" > $OUTPUT_DIR/terraform.init
fi
echo_yellow '====> Creating backend.tf...'
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/backend.sh.j2 -o $OUTPUT_DIR/backend.sh
chmod +x $OUTPUT_DIR/backend.sh
$OUTPUT_DIR/backend.sh $DATA_DIR $OUTPUT_DIR $OUTPUT_TF_FILE $DATA
echo_yellow "====> Calling ==> ${REPO_ROOT}/${SCRIPTS_BASE}/gen_templates_azure.sh ${DATA_DIR} ${OUTPUT_DIR} ${OUTPUT_TF_FILE} ${DATA} <=="
$REPO_ROOT/$SCRIPTS_BASE/gen_templates_azure.sh $DATA_DIR $OUTPUT_DIR $OUTPUT_TF_FILE $DATA
source $OUTPUT_DIR/env.sh $DATA_DIR
# Create the resources
echo_yellow "====> Applying plan to resources..."
# Terraform requires you to run in the directory of the *.tf files.
(cd $OUTPUT_DIR && terraform apply -auto-approve $OUTPUT_DIR)
# Extract the output variables and build the terraform.json file that is used to build Ansible inventory data
echo_yellow '====> Gathering Terraform IP addresses...'
(cd $OUTPUT_DIR && terraform output -json > $OUTPUT_DIR/terraform.json)
# Gather IP and host names from Azure
echo_yellow '====> Gathering Azure IP addresses...'
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/az_get_ips.sh.j2 -o $OUTPUT_DIR/az_get_ips.sh
chmod +x $OUTPUT_DIR/az_get_ips.sh
$OUTPUT_DIR/az_get_ips.sh $OUTPUT_DIR
# This will generate the data file that will be common to all platforms, not just terraform
echo_yellow '====> Generating manifest.yaml...'
#cat $DATA_DIR/$DATA > $OUTPUT_DIR/data_with_ips.yaml
$REPO_ROOT/bin/template_engine -d $OUTPUT_DIR/az_vm_ips.json -y >> $OUTPUT_DIR/azure_hosts.yaml
$REPO_ROOT/bin/template_engine -d $OUTPUT_DIR/terraform.json -y >> $OUTPUT_DIR/azure_storage_keys.yaml
chmod +x $REPO_ROOT/$SCRIPTS_BASE/fill_in_manifest.py
$REPO_ROOT/$SCRIPTS_BASE/fill_in_manifest.py -d $DATA_DIR/$DATA -a $OUTPUT_DIR/azure_hosts.yaml -k $OUTPUT_DIR/azure_storage_keys.yaml -t $REPO_ROOT/$COMMON_TEMPLATES_BASE/manifest.yaml.j2 -o $EPIPHANY_DATA_DIR/data/manifest.yaml
echo_yellow '====> Generating infrastructure release...'
$REPO_ROOT/bin/template_engine -d $DATA_DIR/$DATA -i $REPO_ROOT/$TEMPLATES_BASE/release.sh.j2 -o $OUTPUT_DIR/release.sh
chmod +x $OUTPUT_DIR/release.sh
$OUTPUT_DIR/release.sh $DATA_DIR $OUTPUT_DIR $OUTPUT_TF_FILE $DATA
# Make sure you're logged out
az logout &2>/dev/null
| true |
adf183947f57c687c19077b40b7ebdc0983f3d18 | Shell | cteplovs/bigramCount | /run.sh | UTF-8 | 307 | 2.609375 | 3 | [] | no_license | echo "Building Spark Docker Image..."
docker build .
echo "Initializing Master Node..."
sh start-master.sh
sleep 3
echo "Initializing Worker Node..."
sh start-worker.sh
echo "Running BigramCount..."
docker exec -d spark_master sh -c "cd /root/src; /root/src/run.sh"
sleep 30
echo "BigramCount finishes"
| true |
87730d647abb5840d9aa1a9de18b9d3983a92862 | Shell | razzlefratz/MotleyTools | /bash/set-runjobs-perms.sh | UTF-8 | 717 | 3.359375 | 3 | [
"ISC"
] | permissive | #!/bin/bash
# file: /usr/local/sbin/cmassoc/set-runjobs-perms.sh
# Published 2005 by Charles Maier Associates Limited for internal use;
# ====================================================================
# set runjobs script ownerships and permissions;
# --------------------------------------------------------------------
echo "setting runjob file permissions ..."
for file in /etc/runjobs.d/*; do
if [ -d ${file} ]; then
chown root:root ${file} -R
chmod 0540 ${file} -R
chmod 0750 ${file}
else
chown root:root ${file}
chmod 0540 ${file}
fi
done
# ====================================================================
#
# --------------------------------------------------------------------
exit 0
| true |
8d823cf17088acb8ca5a817ffc3904ae8a72d8e3 | Shell | ATrump/hiveos-linux | /hive/bin/lolminer | UTF-8 | 1,891 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
THIS_MINER_NAME="lolminer"
[ -t 1 ] && . colors
[[ `ps aux | grep "\./lolMiner-mnx" | grep -v bash | grep -v grep | wc -l` != 0 ]] &&
echo -e "${RED}$THIS_MINER_NAME miner is already running${NOCOLOR}" &&
exit 1
function config_gen() {
RIG_CONF="/hive-config/rig.conf"
WALLET_CONF="/hive-config/wallet.conf"
MINER_CONFIG="/hive/$THIS_MINER_NAME/pool.cfg"
GLOBAL_CONFIG="/hive/$THIS_MINER_NAME/pool-global.cfg"
conf=`cat $GLOBAL_CONFIG`$'\n'$'\n'
[ ! -f $RIG_CONF ] && echo -e "${RED}No rig config $RIG_CONF${NOCOLOR}" && return 1
[ ! -f $WALLET_CONF ] && echo -e "${RED}No wallet config $WALLET_CONF${NOCOLOR}" && return 1
. $RIG_CONF
. $WALLET_CONF
if [[ ! -z $LOLMINER_USER_CONFIG ]]; then
conf+="#User config overrides global"$'\n'$LOLMINER_USER_CONFIG$'\n'$'\n'
fi
[[ ! -z $LOLMINER_SERVER ]] && conf+="--server $LOLMINER_SERVER"$'\n'
[[ ! -z $LOLMINER_PORT ]] && conf+="--port $LOLMINER_PORT"$'\n'
[[ ! -z $LOLMINER_TEMPLATE ]] && conf+="--user $LOLMINER_TEMPLATE"$'\n'
[[ ! -z $LOLMINER_PASS ]] && conf+="--pass $LOLMINER_PASS"$'\n'
conf+=$'\n'
#replace tpl values in whole file
[[ -z $EWAL && -z $ZWAL && -z $DWAL ]] && echo -e "${RED}No WAL address is set${NOCOLOR}"
[[ ! -z $EWAL ]] && conf=$(sed "s/%EWAL%/$EWAL/g" <<< "$conf")
[[ ! -z $ZWAL ]] && conf=$(sed "s/%ZWAL%/$ZWAL/g" <<< "$conf")
[[ ! -z $DWAL ]] && conf=$(sed "s/%DWAL%/$DWAL/g" <<< "$conf")
[[ ! -z $EMAIL ]] && conf=$(sed "s/%EMAIL%/$EMAIL/g" <<< "$conf")
[[ ! -z $WORKER_NAME ]] && conf=$(sed "s/%WORKER_NAME%/$WORKER_NAME/g" <<< "$conf") #|| echo "${RED}WORKER_NAME not set${NOCOLOR}"
echo "$conf" > $MINER_CONFIG
}
config_gen
cd /hive/$THIS_MINER_NAME
while true
do
miner logrotate $THIS_MINER_NAME
/hive/$THIS_MINER_NAME/lolminer.sh
echo ""
echo -e "${YELLOW}$THIS_MINER_NAME exited, waiting to cooldown a bit${NOCOLOR}"
echo ""
sleep 3
done
| true |
a98d0b1af380666c986f3d8e9792294ae955af13 | Shell | ColdenCullen/d2dl | /build/ddl/sdk.sh | UTF-8 | 342 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# sdk.sh
source build/ddl/ver.sh $@
packageName="../downloads/ddl.sdk.$ddlSrcVersion.zip";
echo "Building DDL SDK"
rm $packageName
packageFiles="$(find ddl meta utils etc test examples lib doc/ddl doc/meta build/ddl -regex $everything)"
zip -v9 $packageName $packageFiles
ls -alF $packageName
echo "Done";
| true |
92bd9979348223814da8f6dd20c336837fc7e6c9 | Shell | JayjeetAtGithub/seissol-workflows | /workflows/tpv33/scripts/execute.sh | UTF-8 | 1,089 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
if [ -z "$SEISSOL_SRC_DIR" ]; then
echo "Expecting SEISSOL_SRC_DIR variable"
exit 1
fi
if [ -z "$OMP_NUM_THREADS" ]; then
echo "No OMP_NUM_THREADS variable defined"
exit 1
fi
if [ -z "$MPI_NUM_PROCESSES" ]; then
echo "No MPI_NUM_ROCESSES variable defined"
exit
fi
if [ -z "$SEISSOL_END_TIME" ]; then
echo "No SEISSOL_END_TIME variable defined"
exit 1
fi
EXECUTION_DIR="$PWD/workflows/tpv33/execution/"
# TODO: we assume that there's only one binary in the build/ folder. Instead, we
# can look for a SEISSOL_BIN variable and use that if defined; otherwise we can
# try to copy whatever binary is there but throw an error if there's more than
# one available
SEISSOL_BIN="$(ls $GITHUB_WORKSPACE/$SEISSOL_SRC_DIR/build/SeisSol_*)"
cp "$SEISSOL_BIN" "$EXECUTION_DIR"
mkdir -p "$EXECUTION_DIR/output"
# run
cd "$EXECUTION_DIR"
sed -i "s#EndTime = .*#EndTime = $SEISSOL_END_TIME#" parameters_tpv33_master.par
mpirun \
--allow-run-as-root \
--oversubscribe \
-np "$MPI_NUM_PROCESSES" \
"$SEISSOL_BIN" \
parameters_tpv33_master.par
| true |
c21d161758960e5c573c2e8c2a4288900fb30224 | Shell | lorenzocomotti/terraform-modules | /vmware/wordpress/files/install.sh | UTF-8 | 956 | 3 | 3 | [] | no_license | export COMPLETED=false
while [ "$COMPLETED" == "false" ]; do
(
set -e errexit
set -o pipefail
# workaround https://github.com/ansible/ansible/issues/21562
export HOME=/root
cd /tmp/
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
rm -rf bin local share roles include lib || true
dpkg-query -l libffi-dev || ( apt update -y && apt install libffi-dev -y )
dpkg-query -l libssl-dev || ( apt update -y && apt install libssl-dev -y )
test -e /usr/bin/python || ( apt update -y && apt install python-minimal -y )
test -e /usr/bin/pip || ( apt update -y && apt install python-pip -y )
test -e /usr/bin/virtualenv || ( apt update -y && apt install virtualenv -y )
apt install docker.io -y
pip install -r /tmp/requirements.txt
virtualenv .
source bin/activate
pip install -r /tmp/requirements.txt
ansible-playbook -e ansible_python_interpreter=/usr/bin/python --connection=local playbook.yml
)
if [ $? == 0 ]; then
COMPLETED=true
fi
sleep 1
done
| true |
8b13673e742fd8fcb94b811096cf25031c0f2d56 | Shell | friendyogi/Linux-Montirong-Scripts | /MySQL/check_mysql_replication_status.sh | UTF-8 | 1,471 | 3.65625 | 4 | [
"LicenseRef-scancode-philippe-de-muyter"
] | permissive | #!/bin/bash
#----------------------------------------
# MySQL replication status script
# Date 11th Oct 2014
# Author: friendyogi@gmail.com
#----------------------------------------
# This script will send a replication status report to mail
# Place this script in MySQL master server
DONE=0
SUBJECT=''
BODY="Please find $HOSTNAME MySQL Master DB and Slave DB sync status attached with this mail"
NOTIFY=me@gmail.com
HOSTNAME=`hostname`
MYIP=`hostname --all-ip-addresses`
SLAVEIP="IP of remote slave server"
DBSLAVE="hostname of remote slave server"
DBMASTER=$HOSTNAME
HTML=/tmp/replication_status.html
DONE=$( mysql -uroot -psecret -h $DBSLAVE -Be 'show slave status;' | tail -1 | cut -f12 | grep Yes | wc -l )
if [ $DONE -eq 0 ];
then
SUBJECT='$HOSTNAME: MySQL Master and Slave DB Replication Stopped'
else
SUBJECT='$HOSTNAME: MySQL Master and Slave DB Replication Running'
fi
echo "<TABLE BORDER=1><TR><TH>$HOSTNAME MySQL DB replication running status:</TH></TR></TABLE>" > $HTML
echo "<p> </p>" >> $HTML
echo "<TABLE BORDER=1><TR><TH>Master Server ($DBMASTER/$MYIP)</TH></TR></TABLE>" >> $HTML
mysql -uroot -psecret -h $DBMASTER -H mysql -s -e \
"show master status; " >> $HTML
echo "<p> </p>" >> $HTML
echo "<TABLE BORDER=1><TR><TH>Slave Server ($DBSLAVE/$SLAVEIP)</TH></TR></TABLE>" >> $HTML
mysql -uroot -psecret -h $DBSLAVE -H mysql -s -e \
"show slave status \G; " >> $HTML
echo "<p> </p>" >> $HTML
echo $BODY | mutt -a $HTML -s "$SUBJECT" $NOTIFY
rm $HTML | true |
cebfeaa9f3a7985e9b0ac48434904780af5b74ea | Shell | yoya/x.org | /X11R5/contrib/lib/Xcu/clients/xcell/Perturb/collect | UTF-8 | 1,529 | 2.65625 | 3 | [] | no_license | #!/bin/ksh
#
# Copyright 1991 Cornell University
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted, provided
# that the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation, and that the name of Cornell U. not be used in advertising
# or publicity pertaining to distribution of the software without specific,
# written prior permission. Cornell U. makes no representations about the
# suitability of this software for any purpose. It is provided "as is"
# without express or implied warranty.
#
# CORNELL UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
# EVENT SHALL CORNELL UNIVERSITY BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# Author: Gene W. Dykes, Program of Computer Graphics
# 580 Theory Center, Cornell University, Ithaca, NY 14853
# (607) 255-6713 gwd@graphics.cornell.edu
#
/bin/cat p? p?? p??? 2>/dev/null | sort | uniq > p.collect
g=`wc p.collect | gwc`
if [ $g -gt 144 ] ; then g=144; fi
grid=`grep g$g grid | ggrid`
xcell `/bin/cat options` \
-rules p.collect \
-pick p.cull \
$grid
| true |
bc1beb83731bc0c79132a3503baf5aa9d31e06e6 | Shell | stjordanis/gdscript-docs-maker | /generate_reference | UTF-8 | 4,634 | 4.125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env sh
project_directory="$1"
output_directory="export"
directories_override=""
format="markdown"
author="developer"
echo_help() {
cat <<'EOT'
Generate a code reference from GDScript
Usage:
generate_reference $project_directory [options]
Required arguments:
$project_directory -- path to your Godot project directory.
This directory or one of its subdirectories should contain a
project.godot file.
Options:
-h/--help -- Display this help message.
-o/--output-directory -- directory path to output the documentation into.
-d/--directory -- Name of a directory to find files and generate the code reference in the Godot project.
You can use the option multiple times to generate a reference for multiple directories.
-f/--format -- Either `markdown` or `hugo`. If `hugo`, the output document includes a TOML front-matter
at the top. Default: `markdown`.
-a/--author -- If --format is `hugo`, controls the author property in the TOML front-matter.
Usage example:
generate_reference ~/Repositories/other/nakama-godot/project/ -o export-nakama -d addons
This command walks files in the res://addons directory of the Godot Nakama project, and converts it
to markdown files output in ./export-nakama.
EOT
exit 0
}
# Interpret arguments
arguments=$(getopt --name "generate_reference" -o "h,o:,d:,f:,a:" -l "help,output-directory:,directories:" -- "$@")
eval set -- "$arguments"
while true; do
case "$1" in
-h | --help)
echo_help
shift
;;
-o | --output-directory)
output_directory=$2
shift 2
;;
-d | --directory)
directories_override="$directories_override $2"
shift 2
;;
-f | --format)
format=$2
shift 2
;;
-a | --author)
author=$2
shift 2
;;
--)
shift
break
;;
*)
echo "Missing arguments. Try 'generate_reference --help' for more information"
exit 1
;;
esac
done
echo "Checking parameters"
if test -z "$project_directory"; then
echo "Missing first parameter: project_directory."
exit 1
fi
if ! test -d "$project_directory"; then
echo "Directory $project_directory does not exist, exiting."
exit 1
fi
godot_project_file=$(find "$project_directory" -iname project.godot -print -quit)
if ! test -f "$godot_project_file"; then
echo "Could not find a project.godot file in $project_directory. This program needs a Godot project to work."
exit 1
fi
godot_project_dir=$(dirname "$godot_project_file")
path_ref_collector="godot-scripts/ReferenceCollectorCLI.gd"
path_collector="godot-scripts/Collector.gd"
# Override the content of the directories variable in ReferenceCollectorCLI.gd if we got --directory arguments
file_ref_collector=$(mktemp)
cat $path_ref_collector > "$file_ref_collector"
if test "$directories_override" != ""; then
echo "Setting directories"
args=$(echo "$directories_override" | sed -r 's#([-/._a-zA-Z0-9]+)#"res://\1",#g' | sed -r 's/,$//')
sed -ri "s#^var directories.+#var directories := [$args]#" "$file_ref_collector"
fi
echo "Copying collectors to project directory"
cp "$file_ref_collector" "$godot_project_dir/$(basename $path_ref_collector)" >/dev/null
cp $path_collector "$godot_project_dir" >/dev/null
echo "Generating reference json data..."
ERROR_LOG=$(mktemp)
if ! godot --editor --quit --no-window --script ReferenceCollectorCLI.gd \
--path "$godot_project_dir" 2>"$ERROR_LOG" >/dev/null
then
ERRORS=$(cat "$ERROR_LOG")
cat <<EOT
There was an error running 'godot'.
The program 'godot' must be available on the system '\$PATH' variable for this program to work.
For more information, see https://en.wikipedia.org/wiki/PATH_(variable).
This was the error log:
$ERRORS
EOT
rm "$ERROR_LOG"
exit 1
fi
echo "Done."
if ! [ -f "$godot_project_dir/reference.json" ]
then
echo "There was an error generating the reference from Godot. The file $godot_project_dir/reference.json was not found."
exit 1
fi
echo "Generating markdown files in $output_directory"
if [ ! -d "$output_directory" ]
then
mkdir -v "$output_directory" >/dev/null
fi
if ! python3 -m gdscript_docs_maker "$godot_project_dir/reference.json" --path "$output_directory" --format "$format" \
--author "$author" 2>"$ERROR_LOG"
then
echo "Error running gdscript_docs_maker. This is the log:"
cat "$ERROR_LOG"
exit 1
fi
echo "Cleaning up..."
rm "$ERROR_LOG" >/dev/null
rm "$godot_project_dir/$(basename $path_ref_collector)" >/dev/null
rm "$godot_project_dir/$(basename $path_collector)" >/dev/null
rm "$godot_project_dir/reference.json" >/dev/null
exit 0
| true |
9e17861233b17e1276ac3e0458bdd692f52d1ef2 | Shell | agross/dotfiles | /git/tools/diffmerge | UTF-8 | 1,242 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env sh
#
# export GIT_DIFFMERGE_VERBOSE=1 to enable logging
script_path="${0%/*}"
. "$script_path/diff-and-merge-support"
tool="$(tool_path '/c/Tools/DiffMerge/sgdm.exe' \
'/c/Tools/DiffMerge/diffmerge.exe' \
'/mnt/c/Tools/DiffMerge/sgdm.exe' \
'/mnt/c/Tools/DiffMerge/diffmerge.exe' \
'sgdm' \
'diffmerge' \
'/Applications/DiffMerge.app/Contents/Resources/diffmerge.sh')" || exit $?
op="${1?Need operation (diff or merge) as the first argument}"
shift
case "$op" in
diff)
diff_args "$1" "$2" || exit $?
exec "$tool" "$left" \
"$right" \
--title1="$left_title" \
--title2="$right_title" \
> /dev/null 2>&1
;;
merge)
merge_args "$1" "$2" "$3" "$4" || exit $?
exec "$tool" --merge \
--result="$result" \
"$local" \
"$base" \
"$remote" \
--title1="Ours: $local_desc" \
--title2="Merged: $4" \
--title3="Theirs: $remote_desc"
;;
*)
printf 'Unknown operation: %s\n' "$op" >&2
exit 1
;;
esac
| true |
d2c84b731a278c90666a319cc74cd13fcaeb9ddb | Shell | capbash/capbash-redis | /install | UTF-8 | 2,393 | 3.375 | 3 | [] | no_license | #!/bin/bash
source ./bits/bootstrap/logging
#-----------
# Configurations
#-----------
export LAUNCHER_OWNER=${LAUNCHER_OWNER-$USER}
export LAUNCHER_DIR=${LAUNCHER_DIR-/var/local}
export REDIS_LAUNCHER_DIR=${REDIS_LAUNCHER_DIR-$LAUNCHER_DIR/redis}
export LOG_DIR=${LOG_DIR-/var/log}
export REDIS_CONFIG_DIR=${REDIS_CONFIG_DIR-$REDIS_LAUNCHER_DIR/config}
export DATA_DIR=${DATA_DIR-/var/local/data}
export REDIS_DATA_DIR=${REDIS_DATA_DIR-$DATA_DIR/redis}
export REDIS_PORT=${REDIS_PORT-6379}
export REDIS_HOST=${REDIS_HOST-"127.0.0.1:"}
export REDIS_VERSION=${REDIS_VERSION-latest}
REDIS_DISPLAY=${REDIS_DISPLAY-standalone} # or internal
#-----------
# Install Script
#-----------
if [[ "$REDIS_DISPLAY" == "standalone" ]]; then
notify "Installing REDIS ($REDIS_VERSION)..."
else
notify " -- Install REDIS ($REDIS_VERSION)..."
fi
OWNER=$LAUNCHER_OWNER ./bits/bootstrap/mkdir \
LAUNCHER_DIR \
REDIS_LAUNCHER_DIR \
REDIS_DATA_DIR \
REDIS_CONFIG_DIR \
${REDIS_LAUNCHER_DIR}/src \
${REDIS_LAUNCHER_DIR}/bin
notify " -- Copying config files"
TEMPLATE=./bits/redis/files/config LOCATION=${REDIS_LAUNCHER_DIR}/config ./bits/docker/copyallif
debug " -- Making logger available on node"
TEMPLATE=./bits/bootstrap/logging LOCATION=$REDIS_LAUNCHER_DIR/bin/logging ./bits/docker/copyif
TEMPLATE=./bits/bootstrap/failonerrors LOCATION=$REDIS_LAUNCHER_DIR/bin/failonerrors ./bits/docker/copyif
OWNER=$LAUNCHER_OWNER TEMPLATE=./bits/redis/files/bin LOCATION=$REDIS_LAUNCHER_DIR/bin \
./bits/docker/copyallif
if [[ "$REDIS_VERSION" != "latest" ]]; then
SRC_NAME=redis-${REDIS_VERSION}.tar.gz SRC_DIR=${REDIS_LAUNCHER_DIR}/src \
NAME="Redis ${REDIS_VERSION}" \
REMOTE_URL=http://download.redis.io/releases \
./bits/bootstrap/wget
TEMPLATE=./bits/redis/files/redis_version.dockerfile LOCATION=$REDIS_LAUNCHER_DIR/Dockerfile \
./bits/docker/copyif \
@REDIS_VERSION@ $REDIS_VERSION
else
TEMPLATE=./bits/redis/files/redis_default.dockerfile LOCATION=$REDIS_LAUNCHER_DIR/Dockerfile ./bits/docker/copyif
fi
LAUNCHER_DIR=$REDIS_LAUNCHER_DIR NAME=redis VERSION=$REDIS_VERSION ./bits/docker/build
NAME=redis DIR=$REDIS_LAUNCHER_DIR BIT=redis VERSION=$REDIS_VERSION ./bits/docker/helpers
DIR=./bits/redis ./bits/bootstrap/cleanup
if [[ "$REDIS_DISPLAY" == "standalone" ]]; then
notify "DONE, Installing REDIS."
else
notify " -- DONE, Installing REDIS."
fi
| true |
67df2a0eed90de7305512326e331dc49a9facaba | Shell | xpmotors/origin-aggregated-logging | /test/remote-syslog.sh | UTF-8 | 24,347 | 3.734375 | 4 | [] | no_license | #!/bin/bash
# This is a test suite for the fluent-plugin-remote-syslog settings.
# These tests verify that the configuration files are properly generated based
# on the values of the environment variables.
source "$(dirname "${BASH_SOURCE[0]}" )/../hack/lib/init.sh"
source "${OS_O_A_L_DIR}/hack/testing/util.sh"
os::util::environment::use_sudo
FLUENTD_WAIT_TIME=${FLUENTD_WAIT_TIME:-$(( 2 * minute ))}
MUX_WAIT_TIME=$(( 10 * minute ))
ALTPORT=601
os::test::junit::declare_suite_start "Remote Syslog Configuration Tests"
# save daemonset
saveds=$( mktemp )
oc get --export ds/logging-fluentd -o yaml > $saveds
# switch pods type depending on the mux configuration
fluentdtype="fluentd"
mpod=$( get_running_pod mux )
if [ -n "${mpod:-}" ]; then
# mux is configured; make sure mux client fluentd runs as maximal mode.
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env ds/logging-fluentd MUX_CLIENT_MODE=maximal 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
fluentdtype="mux"
# save mux config
savemuxdc=$( mktemp )
oc get --export dc/logging-mux -o yaml > $savemuxdc
fi
os::log::info Starting fluentd-plugin-remote-syslog tests at $( date )
# clear the journal
sudo journalctl --vacuum-size=$( expr 1024 \* 1024 \* 2 ) 2>&1 | artifact_out
sudo systemctl restart systemd-journald 2>&1 | artifact_out
cleanup() {
local return_code="$?"
set +e
if [ $return_code -ne 0 ]; then
artifact_log "oc get pods"
oc get pods 2>&1 | artifact_out
fpod=$( oc get pods --selector component=fluentd -o name | awk -F'/' '{print $2}' )
oc logs $fpod > $ARTIFACT_DIR/remote-syslog-${fpod}.log 2>&1
mpod=$( oc get pods --selector component=mux -o name | awk -F'/' '{print $2}' )
if [ -n "${mpod}" ] ; then
oc logs $mpod > $ARTIFACT_DIR/remote-syslog-$mpod.log 2>&1
fi
oc get events > $ARTIFACT_DIR/remote-syslog-events.txt 2>&1
sudo journalctl | grep fluentd | tail -n 30 > $ARTIFACT_DIR/remote-syslog-journal-fluentd.log 2>&1
sudo grep rsyslog /var/log/audit/audit.log > $ARTIFACT_DIR/remote-syslog-audit-rsyslog.log 2>&1
artifact_log "/var/log/messages files"
sudo ls -ltZ /var/log/messages* 2>&1 | artifact_out
sudo tail -n 200 /var/log/messages > $ARTIFACT_DIR/remote-syslog-messages.log 2>&1
if [ -n "${teststart-:}" ] ; then
sudo journalctl -S "$teststart" -u rsyslog > $ARTIFACT_DIR/remote-syslog-journal-rsyslog.log 2>&1
sudo journalctl -S "$teststart" -u systemd-journald > $ARTIFACT_DIR/remote-syslog-journal-journald.log 2>&1
sudo journalctl -S "$teststart" > $ARTIFACT_DIR/remote-syslog-journal.log 2>&1
fi
fi
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
if [ -n "${saveds:-}" -a -f "${saveds:-}" ] ; then
oc replace --force -f $saveds 2>&1 | artifact_out
fi
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
if [ "$fluentdtype" = "mux" ] ; then
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
if [ -n "${savemuxdc:-}" -a -f "${savemuxdc:-}" ] ; then
oc apply --force -f $savemuxdc 2>&1 | artifact_out
fi
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
fi
# Resetting rsyslogd
# Provides TCP syslog reception
# $ModLoad imtcp
# $InputTCPServerRun 514
if [ -n "${rsyslogconfbakup:-}" -a -f "${rsyslogconfbakup:-}" ]; then
sudo cp $rsyslogconfbakup /etc/rsyslog.conf
fi
if [ -n "${rsyslogconfbakup2:-}" -a -f "${rsyslogconfbakup2:-}" ]; then
sudo mv $rsyslogconfbakup2 /etc/rsyslog.d
fi
os::cmd::expect_success "sudo service rsyslog restart"
os::test::junit::reconcile_output
exit $return_code
}
trap "cleanup" EXIT
if [ "$fluentdtype" = "fluentd" ] ; then
my_remote_syslog_host=$( oc set env ds/logging-fluentd --list | awk -F'=' '/^REMOTE_SYSLOG_HOST=/ {print $2}' || : )
else
my_remote_syslog_host=$( oc set env dc/logging-mux --list | awk -F'=' '/^REMOTE_SYSLOG_HOST=/ {print $2}' || : )
fi
if [ -n "$my_remote_syslog_host" ]; then
title="Test 0, checking user configured REMOTE_SYSLOG_HOST is respected"
os::log::info $title
if [ "$fluentdtype" = "fluentd" ] ; then
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env ds/logging-fluentd USE_REMOTE_SYSLOG=true 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
# make sure mux is running after previous test
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
oc get pods | grep mux 2>&1 | artifact_out || :
oc get dc 2>&1 | artifact_out
oc set env dc/logging-mux USE_REMOTE_SYSLOG=true 2>&1 | artifact_out
oc get pods | grep mux 2>&1 | artifact_out || :
oc get dc 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
oc get pods | grep mux 2>&1 | artifact_out || :
oc get dc 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_success "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf"
os::cmd::expect_success_and_text "oc exec $mypod grep 'remote_syslog' /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" "remote_syslog ${my_remote_syslog_host}"
artifact_log $title $mypod
fi
title="Test 1, expecting generate_syslog_config.rb to have created configuration file"
os::log::info $title
if [ "$fluentdtype" = "fluentd" ] ; then
# make sure fluentd is running after previous test
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
# choosing an unrealistic REMOTE_SYSLOG_HOST
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=111.222.111.222 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
# make sure mux is running after previous test
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
# choosing an unrealistic REMOTE_SYSLOG_HOST
oc get pods | grep mux 2>&1 | artifact_out || :
oc get dc 2>&1 | artifact_out
oc set env dc/logging-mux USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=111.222.111.222 2>&1 | artifact_out
oc get pods | grep mux 2>&1 | artifact_out || :
oc get dc 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
oc get pods | grep mux 2>&1 | artifact_out || :
oc get dc 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_success "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" $MUX_WAIT_TIME
artifact_log $title $mypod
title="Test 2, expecting generate_syslog_config.rb to not create a configuration file"
os::log::info $title
if [ "$fluentdtype" = "fluentd" ] ; then
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST- 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
oc set env dc/logging-mux USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST- 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_failure "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" $MUX_WAIT_TIME
artifact_log $title $mypod
title="Test 3, expecting generate_syslog_config.rb to generate multiple stores"
os::log::info $title
if [ "$fluentdtype" = "fluentd" ] ; then
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=127.0.0.1 REMOTE_SYSLOG_HOST2=127.0.0.1 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
oc set env dc/logging-mux USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=127.0.0.1 REMOTE_SYSLOG_HOST2=127.0.0.1 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_text "oc exec $mypod grep '<store>' /etc/fluent/configs.d/dynamic/output-remote-syslog.conf | wc -l" '^2$' $MUX_WAIT_TIME
artifact_log $title $mypod
title="Test 4, making sure tag_key=message does not cause remote-syslog plugin crash"
os::log::info $title
if [ "$fluentdtype" = "fluentd" ] ; then
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=127.0.0.1 REMOTE_SYSLOG_TAG_KEY=message REMOTE_SYSLOG_HOST2- 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
oc set env dc/logging-mux USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=127.0.0.1 REMOTE_SYSLOG_TAG_KEY=message REMOTE_SYSLOG_HOST2- 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_success "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" $MUX_WAIT_TIME
os::cmd::expect_success "oc exec $mypod grep 'tag_key message' /etc/fluent/configs.d/dynamic/output-remote-syslog.conf"
os::cmd::expect_success_and_not_text "oc logs $mypod" "nil:NilClass"
artifact_log $title $mypod
title="Test 5, making sure tag_key=bogus does not cause remote-syslog plugin crash"
os::log::info $title
if [ "$fluentdtype" = "fluentd" ] ; then
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=127.0.0.1 REMOTE_SYSLOG_TAG_KEY=bogus 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
oc set env dc/logging-mux USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=127.0.0.1 REMOTE_SYSLOG_TAG_KEY=bogus 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_success "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" $MUX_WAIT_TIME
os::cmd::expect_success "oc exec $mypod grep 'tag_key bogus' /etc/fluent/configs.d/dynamic/output-remote-syslog.conf"
os::cmd::expect_success_and_not_text "oc logs $mypod" "nil:NilClass"
artifact_log $title $mypod
title="Test 6, use rsyslogd on the node"
os::log::info $title
artifact_log iptables ACCEPT ${ALTPORT}
sudo iptables -A INPUT -m tcp -p tcp --dport ${ALTPORT} -j ACCEPT 2>&1 | artifact_out || :
sudo iptables -L 2>&1 | artifact_out || :
# Make sure rsyslogd is listening on port 514 up and running
# Provides TCP syslog reception
# $ModLoad imtcp
# $InputTCPServerRun 514 -> 601
rsyslogconfbakup=$( mktemp )
cat /etc/rsyslog.conf > $ARTIFACT_DIR/remote-syslog-rsyslog.conf.orig
cp /etc/rsyslog.conf $rsyslogconfbakup
sudo sed -i -e 's/^#*\(\$ModLoad imtcp\)/\1/' -e "s/^#*\(\$InputTCPServerRun\) 514/\1 ${ALTPORT}/" \
-e 's/\(\$ModLoad imuxsock\)/#\1/' -e 's/\(\$ModLoad imjournal\)/#\1/' -e 's/\(\$OmitLocalLogging\)/#\1/' \
-e 's/\(\$IMJournalStateFile imjournal.state\)/#\1/' -e 's/\(\$ActionFileEnableSync\)/#\1/' \
-e 's/\(#### RULES .*\)/\1\n\$template precise,"%syslogpriority%,%syslogfacility%,%timegenerated%,%HOSTNAME%,%syslogtag%,%msg%\\n"/' \
-e 's/^*.info;mail.none;authpriv.none;cron.none *\(\/var\/log\/messages\)/*.* \1;precise/' \
/etc/rsyslog.conf
sudo ls -l /etc/rsyslog.d | artifact_out || :
rsyslogconfbakup2=/tmp/listen.conf
if [ -f /etc/rsyslog.d/listen.conf ]; then
sudo mv /etc/rsyslog.d/listen.conf $rsyslogconfbakup2
fi
cat /etc/rsyslog.conf > $ARTIFACT_DIR/remote-syslog-rsyslog.conf.modified
# date in journalctl -S format
teststart=$( date "+%Y-%m-%d %H:%M:%S" )
artifact_log Before restarting rsyslog
sudo service rsyslog status 2>&1 | artifact_out || :
os::cmd::expect_success "sudo service rsyslog stop"
sudo mv /var/log/messages /var/log/messages."$( date +%Y%m%d-%H%M%S )" || :
sudo touch /var/log/messages || :
sudo chmod 600 /var/log/messages || :
sudo semanage fcontext -a -t var_log_t -s system_u /var/log/messages 2>&1 | artifact_out || :
sudo restorecon -vF /var/log/messages 2>&1 | artifact_out || :
os::cmd::expect_success "sudo service rsyslog start"
artifact_log After restarted rsyslog
sudo service rsyslog status 2>&1 | artifact_out || :
sudo cat /etc/systemd/journald.conf > $ARTIFACT_DIR/remote-syslog-journald.conf
myhost=$( hostname )
if [ "$fluentdtype" = "fluentd" ] ; then
# make sure fluentd is running after previous test
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset/logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=$myhost REMOTE_SYSLOG_PORT=${ALTPORT} \
REMOTE_SYSLOG_USE_RECORD=true REMOTE_SYSLOG_SEVERITY=info \
REMOTE_SYSLOG_TAG_KEY='ident,systemd.u.SYSLOG_IDENTIFIER,local1.err' 2>&1 | artifact_out
sudo rm -f /var/log/journal.pos
sudo rm -rf /var/lib/fluentd/*
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
# make sure mux is running after previous test
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" '^0$' $MUX_WAIT_TIME
oc set env dc/logging-mux FORWARD_INPUT_LOG_LEVEL=info USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=$myhost \
REMOTE_SYSLOG_PORT=${ALTPORT} REMOTE_SYSLOG_USE_RECORD=true \
REMOTE_SYSLOG_SEVERITY=info REMOTE_SYSLOG_TAG_KEY='ident,systemd.u.SYSLOG_IDENTIFIER,local1.err' 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
# make sure fluentd is running after previous test
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset/logging-fluentd -o jsonpath='{ .status.numberReady }'" '^0$' $FLUENTD_WAIT_TIME
sudo rm -f /var/log/journal.pos
sudo rm -rf /var/lib/fluentd/*
oc set env daemonset/logging-fluentd FORWARD_INPUT_LOG_LEVEL=info 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
fi
os::cmd::try_until_success "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" $MUX_WAIT_TIME
oc logs $mypod > $ARTIFACT_DIR/remote-syslog-$mypod.log 2>&1
oc exec $mypod -- head -n 60 /etc/fluent/fluent.conf /etc/fluent/configs.d/openshift/output-operations.conf \
/etc/fluent/configs.d/openshift/output-applications.conf /etc/fluent/configs.d/dynamic/output-remote-syslog.conf | artifact_out || :
artifact_log ping $myhost from $mypod
oc exec $mypod -- ping $myhost -c 3 | artifact_out || :
# wait for the precise formatted logs are found in /var/log/messages
# os::cmd::try_until_text "sudo egrep \"^[0-6],[0-9]*,\" /var/log/messages" "[0-6],[0-9]*,.*" $MUX_WAIT_TIME
# sudo egrep \"^[0-6],[0-9]*,\" /var/log/messages | tail -n 5 | artifact_out || :
artifact_log docker info
docker info | artifact_out || :
getappsmsg() {
appsmessage=$1
# file containing search output is $2
}
getopsmsg() {
opsmessage=$1
# file containing search output is $2
}
rc=0
if ! wait_for_fluentd_to_catch_up getappsmsg getopsmsg ; then
rc=1
fi
if ! os::cmd::try_until_success "sudo egrep -q '${opsmessage}\$' /var/log/messages" $MUX_WAIT_TIME ; then
rc=1
fi
sudo egrep "${opsmessage}$" /var/log/messages 2>&1 | artifact_out || :
if ! os::cmd::try_until_success "sudo egrep -q '${appsmessage}' /var/log/messages" $MUX_WAIT_TIME ; then
rc=1
fi
sudo egrep "/${appsmessage}" /var/log/messages 2>&1 | artifact_out || :
if [ $rc -eq 1 ] ; then
exit 1
fi
title="Test 7, no tag_key"
os::log::info $title
myhost=$( hostname )
if [ "$fluentdtype" = "fluentd" ] ; then
# make sure fluentd is running after previous test
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset/logging-fluentd -o jsonpath='{ .status.numberReady }'" "0" $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=$myhost REMOTE_SYSLOG_PORT=${ALTPORT} REMOTE_SYSLOG_USE_RECORD=true REMOTE_SYSLOG_SEVERITY=info REMOTE_SYSLOG_TAG_KEY- 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
mypod=$( get_running_pod fluentd )
else
# make sure fluentd is running after previous test
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
oc label node --all logging-infra-fluentd- 2>&1 | artifact_out
os::cmd::try_until_text "oc get daemonset/logging-fluentd -o jsonpath='{ .status.numberReady }'" "0" $FLUENTD_WAIT_TIME
oc set env daemonset/logging-fluentd FORWARD_INPUT_LOG_LEVEL=info 2>&1 | artifact_out
oc label node --all logging-infra-fluentd=true --overwrite=true 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=fluentd" "^logging-fluentd-.* Running "
# make sure mux is running after previous test
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux.* Running "
oc scale --replicas=0 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get dc logging-mux -o jsonpath='{ .status.replicas }'" "0" $MUX_WAIT_TIME
oc set env dc/logging-mux FORWARD_INPUT_LOG_LEVEL=info USE_REMOTE_SYSLOG=true REMOTE_SYSLOG_HOST=$myhost REMOTE_SYSLOG_PORT=${ALTPORT} REMOTE_SYSLOG_USE_RECORD=true REMOTE_SYSLOG_SEVERITY=info REMOTE_SYSLOG_TAG_KEY- 2>&1 | artifact_out
oc scale --replicas=1 dc logging-mux 2>&1 | artifact_out
os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " $MUX_WAIT_TIME
mypod=$( get_running_pod mux )
fi
os::cmd::try_until_success "oc exec $mypod find /etc/fluent/configs.d/dynamic/output-remote-syslog.conf" $MUX_WAIT_TIME
artifact_log $title $mypod
if ! wait_for_fluentd_to_catch_up getappsmsg getopsmsg ; then
rc=1
fi
if ! os::cmd::try_until_success "sudo egrep -q '${opsmessage}\$' /var/log/messages" $MUX_WAIT_TIME ; then
rc=1
fi
sudo egrep "${opsmessage}$" /var/log/messages 2>&1 | artifact_out || :
if ! os::cmd::try_until_success "sudo egrep -q '${appsmessage}' /var/log/messages" $MUX_WAIT_TIME ; then
rc=1
fi
sudo egrep "/${appsmessage}" /var/log/messages 2>&1 | artifact_out || :
if [ $rc -eq 1 ] ; then
exit 1
fi
hasNoMethodError()
{
oc logs $mypod 2>&1 | artifact_out || :
no_tag_key_log=$( mktemp )
oc logs $mypod > $no_tag_key_log 2>&1 || :
found=$( grep NoMethodError $no_tag_key_log || : )
if [ "$found" == "" ]; then
artifact_log "good - no NoMethodError in the no tag_key case"
return 0
else
artifact_log "failed - NoMethodError found in the no tag_key case"
return 1
fi
}
hasNoMethodError
| true |
34b21cc9148a93aa5dc0efb9d0b0933891bfe742 | Shell | xiongqi1/web | /db_apps/sms_tools/extra_commands_vdf/sms_wakeup | UTF-8 | 1,879 | 3.921875 | 4 | [] | no_license | #!/bin/sh
#---------------------------------------------------------------------------
# For help text
#---------------------------------------------------------------------------
if [ "$1" = "--help" -o "$1" = "-h" ]; then
echo "This shell script is used by SMS tools to wakeup an inactive WAN"
echo "connection, when dial-on-demand is in use."
echo "WARNING: Using this command may cause loss of network connectivity, and may ruin internal system state."
echo ""
echo "This command is only intended for use by SMS tools, but by"
echo "running it without arguments, it could be used to wakeup the"
echo "WAN connection."
exit 0
fi
DOD_EN=`rdb_get dialondemand.enable`
DEFAULT_PF=1
for i in 1 2 3 4 5 6; do
if [ "`rdb_get link.profile.$i.defaultroute`" = "1" ]; then
DEFAULT_PF=$i
break;
fi
done
# search the activated profile
PROFILE_ENABLED="0"
for i in 1 2 3 4 5 6; do
if [ `rdb_get "link.profile.$i.enable"` = "1" ]; then
PROFILE_ENABLED=$i
break;
fi
done
# enable the default profile when no profile is activated
if [ "$PROFILE_ENABLED" = "0" ]; then
rdb_set "link.profile.$DEFAULT_PF.enable" 1
echo "Enabling Profile-$DEFAULT_PF..."
logger "SMS wakeup: Enabling Auto-APN and Profile-$DEFAULT_PF"
fi
# make it online if dial-on-demand is configured
if [ "$DOD_EN" = "1" ]; then
i=0
res=1
while [ $i -lt 5 ]; do
diald-ctrl.sh up
res=$?
if [ $res = 0 ]; then
break
fi
i=$(( $i + 1 ))
sleep 5
done
if [ $res = 0 ]; then
echo "Set dial-on-demand up command successful"
logger "SMS wakeup: Set dial-on-demand up command successful."
else
echo "Set dial-on-demand up command has failed"
logger "SMS wakeup: Set dial-on-demand up command has failed."
fi
else
if [ "$PROFILE_ENABLED" != "0" ]; then
logger "SMS wakeup: no effect, the WAN connection is already up."
fi
fi
| true |
d09fa90bac594a8fe1495a159a6bcedc7c4cb140 | Shell | ralevn/shell_scripts | /AIX/hmcerr.sh | UTF-8 | 1,564 | 2.734375 | 3 | [] | no_license | #!/bin/bash
out_dir="$(pwd)"
out_html_filename="hmc_errs.html"
hmcCmd="sudo ssh hscroot@slphmc73 -i /root/shell/crontab/HMCscanner/id_dsa_hmcauth"
cat <<'EOF' > ${out_dir}/${out_html_filename}
<!doctype html>
<html>
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
</head>
<body>
<h3>LPAR List</h3>
EOF
#for frame in $($hmcCmd lssyscfg -r sys -F name 2>/dev/null);
# do
# cat <<EOF >> ${out_dir}/${out_html_filename}
# <h4> ${frame}</h4>
# <table>
# <tr><th>LPAR name</th><th>LPAR</th><th>Environment</th><th>State</th><th>OS</th><th>IP</th></tr>
#
#EOF
cat <<EOF>> ${out_dir}/${out_html_filename}
<table>
<tr><th>Problem #</th><th>hmc</th><th>refcode</th><th>status</th><th>create time</th><th>severity</th><th>text</th></tr>
EOF
i=1
maxn='$hmcCmd lssvcevents -t hardware --filter status=Open -F problem_num" 2>/dev/null|wc -l'
while [$i -le $maxn]
do
$hmcCmd lssvcevents -t hardware --filter status=Open -F problem_num,analyzing_hmc,refcode,status,created_time,event_severity,text 2>/dev/null|while IFS=, read prb hmc refc status time sev txt
do
cat <<EOF >> ${out_dir}/${out_html_filename}
<tr><td>${prb}</td><td>${hmc}</td><td>${refc}</td><td>${status}</td><td>${time}</td><td>${sev}</td><td>txt</td></tr>
EOF
done
cat <<EOF >> ${out_dir}/${out_html_filename}
</table>
EOF
done
cat <<'EOF' >> ${out_dir}/${out_html_filename}
</div>
</body>
</html>
EOF
| true |
e24cb374ad322afefe073cf02ba72e2e4c06f357 | Shell | dancor/nudzh | /init | UTF-8 | 231 | 2.5625 | 3 | [] | no_license | #!/bin/sh
set -e
if [ ! -e corpus ]
then
mkdir corpus
cd corpus
wget http://dzl.no-ip.org/l/zw/2474.zip
unzip 2474.zip
cd ..
fi
if [ ! -e corpus/z.mem ]
then
cd corpus
wget http://dzl.no-ip.org/l/zw/z.mem
cd ..
fi
| true |
a0b5617913ddc0d5cdffcca249f441f7bd6de4b3 | Shell | sidkashyap/scripts | /cosmos_results_posix/IOR_DiRAC_POSIX/NS.FPS.HDF5/test.qsub | UTF-8 | 740 | 3 | 3 | [] | no_license | #!/bin/bash
# ior.pbs
# Invoke ior with typical arguments and default environment
# vlaues
#PBS -l nodes=2:ppn=16
#PBS -l walltime=04:00:00
#PBS -A dr002
#PBS -N NS_FPS
#PBS -m bea
#PBS -M sid.kashyap@ed.ac.uk
module load gcc/5.1.0
module load openmpi/gcc/1.8.3
TARGET="/data/admin/IOR_DiRAC_POSIX/NS.FPS"
IOR="/scratch/admin/ior/build/bin/ior"
IOR_SCRIPT="/scratch/admin/IOR_DiRAC_POSIX/NS.FPS/con.ior"
date '+%Y%m%d%H%M%S'
pushd $TARGET
rm -rf testDir
mkdir testDir
ls $TARGET/testDir
numNodes=( 2 )
blockSize=( 1 )
for test in `seq 1`
do
test=$(( $test - 1 ))
cmd="$IOR -vvv -b ${blockSize[$test]}g -f $IOR_SCRIPT"
mpirun -np ${numNodes[$test]} -pernode $IOR -vvv -b ${blockSize[$test]}g -f $IOR_SCRIPT
done
popd
| true |
2da5586425af145d7d597fe0a834a9b764ff9a3c | Shell | marvinzh/cyclegan-ivec | /ivec-cyclegan-pytorch/src/plda_scoring/local/scoring_common_1.sh | UTF-8 | 703 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# Copyright 2015 David Snyder
# Apache 2.0.
#
if [ $# != 6 ]; then
echo "Usage: $0 <plda-data-dir> <enroll-data-dir> <test-data-dir> <plda-ivec-dir> <enroll-ivec-dir> <test-ivec-dir>"
fi
plda_data_dir=${1%/}
enroll_data_dir=${2%/}
test_data_dir=${3%/}
plda_ivec_dir=${4%/}
enroll_ivec_dir=${5%/}
test_ivec_dir=${6%/}
if [ ! -f ${test_data_dir}/trials ]; then
echo "${test_data_dir} needs a trial file."
exit;
fi
mkdir -p local/.tmp
# Compute gender independent and dependent i-vector means.
run.pl ${plda_ivec_dir}/log/compute_mean.log \
ivector-normalize-length scp:${plda_ivec_dir}/ivector.scp \
ark:- \| ivector-mean ark:- ${plda_ivec_dir}/mean.vec || exit 1;
rm -rf local/.tmp
| true |
1a55659587be2de72a705d2313acd6d83e1fadfe | Shell | dulovic/dell-xps-9370 | /home/ali/.bash_aliases | UTF-8 | 1,002 | 2.65625 | 3 | [] | no_license | # Start DE
alias xx='startx'
# Network aliases
alias wanip='curl ipecho.net/plain ; echo'
alias lanip='ip -o addr show up primary scope global | while read -r num dev fam addr rest; do echo ${addr%/*}; done'
# System aliases
alias rmorph='sudo pacman -Rs $(pacman -Qqtd)'
alias update='sudo pacman -Syyuu'
alias systeminfo='inxi -c 5 -b'
alias archey='archey3'
alias h='history'
alias df='df -kTh'
alias sudo='sudo '
alias ipa='ip -br -c a'
alias lock='/home/ali/scripts/i3lock-fancy-rapid 5 3'
alias sleep='/home/ali/scripts/i3lock-fancy-rapid 5 3; systemctl suspend'
# Folders and Files aliases
alias ls='ls --color=auto'
alias ll='ls -al --color=auto'
alias lsl='ls -l --color=auto'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
alias mkdir='mkdir -p'
# Directory navigation aliases
alias cd..='cd ..'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
# Other aliases
alias cow='fortune | cowsay'
alias matrix='cmatrix -C cyan'
| true |
f8bff1eab3789f544c75c9955b276670c56c0588 | Shell | zhasutonggg/LVS-DR- | /lvs_DR_realserver.sh | UTF-8 | 1,622 | 3.453125 | 3 | [] | no_license | #!/bin/bash
#chkconfig: - 28 71
#desription: LVS FOR DR is real server
LOCK=/var/lock/ipvsadm.lock
VIP=192.168.132.80
. /etc/init.d/functions
start() {
PID=`ifconfig |grep lo:10|wc -l`
if [ $PID -ne 0 ];then
echo "The LVS-DR-RIP is already running"
else
/sbin/ifconfig lo:10 $VIP netmask 255.255.255.255 broadcast $VIP up
/sbin/route add -host $VIP dev lo:10
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/ens33/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/ens33/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/bin/touch $LOCK
echo "Start LVS-DR-RIP server is OK"
fi
}
stop() {
/sbin/route del -host $VIP dev lo:10
/sbin/ifconfig lo:10 down >/dev/null
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/ens33/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/ens33/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
rm -rf $LOCK
echo "Stopping LVS-DR-RIP is OK"
}
status() {
if [ -e $LOCK ];
then
echo "The LVS-DR-RIP is running"
else
echo "The LVS-DR-RIP is not running"
fi
}
select chance in start stop status restart quit;do
case "$REPLY" in
1)
start
exit 0
;;
3)
status
exit 0
;;
4)
stop
start
exit 0
;;
2)
stop
exit 0
;;
5)
echo "bye"
break
;;
*)
echo "选择错误,请重新选择(1,2,3,4,5)"
;;
esac
done
| true |
e85b1197d44d606ac7e53167e8f32934eda25243 | Shell | rodolpheche/raspberry-archlinux-packer | /test.sh | UTF-8 | 607 | 2.9375 | 3 | [] | no_license | #!/bin/bash
mkdir -p test
rm -f test/archlinux-vm
NAME=$1
if [ "$NAME" = "" ]
then
NAME=archlinux-vm
fi
if [ ! -f test/$NAME ]
then
cp dist/$NAME test/$NAME
fi
qemu-system-aarch64 -M virt -cpu cortex-a53 -smp 4 -m 1024M -serial stdio \
-kernel dist/Image \
-initrd dist/initramfs-linux-fallback.img \
-drive if=none,file=test/$NAME,format=raw,id=hd \
-device virtio-blk-device,drive=hd \
-netdev user,id=mynet \
-device virtio-net-device,netdev=mynet \
-append "root=/dev/vda2 rw loglevel=3 elevator=deadline fsck.repair=yes net.ifnames=0 rootwait" # TODO same loglevel inside cmdline
| true |
94df42068b4483b7b0e528fe7d2d0bda5968e9e0 | Shell | albertito/chasquid | /test/t-19-dkimpy/config/hooks/post-data | UTF-8 | 1,380 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# If authenticated, sign; otherwise, verify.
#
# It is not recommended that we fail delivery on dkim verification failures,
# but leave it to the MUA to handle verifications.
# https://tools.ietf.org/html/rfc6376#section-2.2
#
# We do a verification here so we have a stronger integration test (check
# encodings/dot-stuffing/etc. works ok), but it's not recommended for general
# purposes.
set -e
TF="$(mktemp --tmpdir post-data-XXXXXXXXXX)"
trap 'rm "$TF"' EXIT
# Save the message to the temporary file.
cat > "$TF"
if [ "$AUTH_AS" != "" ]; then
DOMAIN=$( echo "$MAIL_FROM" | cut -d '@' -f 2 )
# Call /usr/bin/dkimsign directly to prevent a conflict with
# driusan/dkim, which the integration tests install in ~/go/bin.
/usr/bin/dkimsign \
"$(cat "domains/$DOMAIN/dkim_selector")" \
"$DOMAIN" \
"../.dkimcerts/private.key" \
< "$TF" > "$TF.dkimout"
# dkimpy doesn't provide a way to just show the new headers, so we
# have to compute the difference.
# ALSOCHANGE(etc/chasquid/hooks/post-data)
diff --changed-group-format='%>' \
--unchanged-group-format='' \
"$TF" "$TF.dkimout" && exit 1
rm "$TF.dkimout"
else
# NOTE: This is using driusan/dkim instead of dkimpy, because dkimpy can't be
# overriden to get the DNS information from anywhere else (text file or custom
# DNS server).
dkimverify -txt ../.dkimcerts/private.dns < "$TF"
fi
| true |
d318615b191172c5946365f0700723bb6cad3527 | Shell | sdmccabe/poq-constructing-samples-replication | /voter_file_panel/histograms.sh | UTF-8 | 3,371 | 3.203125 | 3 | [] | no_license | #!/bin/bash
INPUT_DATA_FOLDER="/net/data-backedup/twitter-voters/voter-data/targetsmart_oct2017/unzipped/"
NUM_CORES=16
RESERVE_MEM="33%"
OUTPUT_FOLDER="/net/data/twitter-voters/pew_collab_data/external/vf_histograms"
# histogram takes a sample awk call (passed through GNU parallel), runs the awk script in parallel,
# sorts the output, and outputs the counts.
# the GNU parallel bit makes the interpretation a little trickier. note that the awk call must end with "%",
# representing the input file. symbols (including the dollar sign) must be escaped, unlike with normal awk.
function histogram () {
find $INPUT_DATA_FOLDER -name "*.csv" | parallel -I% -j $NUM_CORES --max-args 1 --lb $1 | sort -S $RESERVE_MEM --parallel=$NUM_CORES | uniq -c | sed "s/^\s*//"
}
function histogram_closed_primary_states () {
find $INPUT_DATA_FOLDER -regextype posix-egrep -regex ".*(?:CT|DE|FL|KS|KY|ME|MD|DC|NE|NM|NY|PA|WY)\.csv" -name "*.csv" | parallel -I% -j $NUM_CORES --max-args 1 --lb $1 | sort -S $RESERVE_MEM --parallel=$NUM_CORES | uniq -c | sed "s/^\s*//"
}
[ ! -f $OUTPUT_FOLDER/county.txt ] && histogram "awk -F '\t' 'FNR>1{print \$8 \" \" \$25}' %" > $OUTPUT_FOLDER/county.txt
[ ! -f $OUTPUT_FOLDER/age_reg.txt ] && histogram "awk -F '\t' 'FNR>1{print \$8}' %" > $OUTPUT_FOLDER/state.txt
[ ! -f $OUTPUT_FOLDER/partisan_score.txt ] && histogram "awk -F '\t' 'FNR>1{print \$83}' %" > $OUTPUT_FOLDER/partisan_score.txt
[ ! -f $OUTPUT_FOLDER/party.txt ] && histogram "awk -F '\t' 'FNR>1{print \$23}' %" > $OUTPUT_FOLDER/party.txt
[ ! -f $OUTPUT_FOLDER/age.txt ] && histogram "awk -F '\t' 'FNR>1{print \$20}' %" > $OUTPUT_FOLDER/age.txt
[ ! -f $OUTPUT_FOLDER/race.txt ] && histogram "awk -F '\t' 'FNR>1{print \$22}' %" > $OUTPUT_FOLDER/race.txt
[ ! -f $OUTPUT_FOLDER/gender.txt ] && histogram "awk -F '\t' 'FNR>1{print \$21}' %" > $OUTPUT_FOLDER/gender.txt
[ ! -f $OUTPUT_FOLDER/registration_status.txt ] && histogram "awk -F '\t' 'FNR>1{print \$18}' %" > $OUTPUT_FOLDER/registration_status.txt
[ ! -f $OUTPUT_FOLDER/county_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$8 \" \" \$25}' %" > $OUTPUT_FOLDER/county_RV.txt
[ ! -f $OUTPUT_FOLDER/age_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$20}' %" > $OUTPUT_FOLDER/age_RV.txt
[ ! -f $OUTPUT_FOLDER/race_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$22}' %" > $OUTPUT_FOLDER/race_RV.txt
[ ! -f $OUTPUT_FOLDER/gender_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$21}' %" > $OUTPUT_FOLDER/gender_RV.txt
[ ! -f $OUTPUT_FOLDER/state_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$8}' %" > $OUTPUT_FOLDER/state_RV.txt
[ ! -f $OUTPUT_FOLDER/partisan_score_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$83}' %" > $OUTPUT_FOLDER/partisan_score_RV.txt
[ ! -f $OUTPUT_FOLDER/party_RV.txt ] && histogram "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$23}' %" > $OUTPUT_FOLDER/party_RV.txt
[ ! -f $OUTPUT_FOLDER/party_closed_pimary_RV.txt ] && histogram_closed_primary_states "awk -F '\t' 'FNR>1{if (\$18==\"Registered\") print \$23}' %" > $OUTPUT_FOLDER/party_closed_primary_RV.txt
[ ! -f $OUTPUT_FOLDER/party_closed_primary.txt ] && histogram_closed_primary_states "awk -F '\t' 'FNR>1{print \$23}' %" > $OUTPUT_FOLDER/party_closed_primary.txt
| true |
62142ee03197121f1ff9a5747da89606c4d98627 | Shell | vlttnv/Tophat | /tests/all_tests.sh | UTF-8 | 428 | 2.71875 | 3 | [] | no_license | #!/bin/bash
# To run the script:
# chmod a+x all_test.sh
# ./all_test.sh
# Note: ports 6000, 5000, 5001, 5002 will be used
printf 'Make all test scripts executable.\n'
chmod a+x test_one.sh
chmod a+x test_two.sh
chmod a+x test_three.sh
chmod a+x test_four.sh
chmod a+x test_five.sh
chmod a+x test_six.sh
printf 'Run all test scripts.\n'
./test_one.sh
./test_two.sh
./test_three.sh
./test_four.sh
./test_five.sh
./test_six.sh | true |
9fc42868bb5dc2969c9d26ae0dd8e483bb2ab41e | Shell | eo4929/Docker-hadoop | /docker-hadoop/namenode/run.sh | UTF-8 | 582 | 3.546875 | 4 | [] | no_license | #!/bin/bash
namedir=`echo $HDFS_CONF_dfs_namenode_name_dir | perl -pe 's#file://##'`
if [ ! -d $namedir ]; then
echo "Namenode name directory not found: $namedir"
exit 2
fi
if [ -z "$CLUSTER_NAME" ]; then
echo "Cluster name not specified"
exit 2
fi
echo "remove lost+found from $namedir"
rm -r $namedir/lost+found
if [ "`ls -A $namedir`" == "" ]; then
echo "Formatting namenode name directory: $namedir"
$HADOOP_HOME/bin/hdfs --config $HADOOP_CONF_DIR namenode -format $CLUSTER_NAME
fi
$HADOOP_HOME/bin/hdfs --config $HADOOP_CONF_DIR namenode
| true |
199c42934228d3d48df151c442a214ef1ec35a3b | Shell | rjosest/WTC | /bsubScripts/WTCAirwayParticlesFromPoints.sh | UTF-8 | 1,960 | 3.3125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#BSUB -L /bin/bash
#BSUB -J my_job[1-4025]%100
#BSUB -M 4000
#BSUB -o job_out
#BSUB -e job_err
#BSUB -q short
#Other options commented
#number of processors
##BSUB -n 5
#number of threads
##BSUB -T 10
#send email for each job
##BSUB -N
function run {
cid=$1
name=$2
tmpDir=$3
#Set up variables
cip=/PHShome/rs117/projects/code/cip
cipBuild=/PHShome/rs117/projects/code/cip-build/
#Delay to prevent overflow
perl /PHShome/rs117/projects/code/acil/Scripts/MADWait.pl 100 10 15
# run the analysis command
mkdir $tmpDir/$name
cd $tmpDir
scp "mad-replicated1.research.partners.org:Processed/WTC/$cid/$name/$name.*" .
scp "mad-replicated1.research.partners.org:Processed/WTC/$cid/$name/${name}_airwayRegionAndTypePoints.csv" .
python $cip/Scripts/ExtractAirwayParticlesFromPoints.py -c $name --cipPython $cip --cipBuildDir $cipBuild --tmpDir $tmpDir/$name --dataDir $tmpDir
scp $tmpDir/${name}_*Airway*.vtk mad-replicated1.research.partners.org:Processed/WTC/$cid/$name/
\rm -rf $tmpDir/${name}/
\rm $tmpDir/${name}.nhdr
\rm $tmpDir/${name}.raw.gz
}
# move to the directory where the data files locate
#cd /PHShome/rs117/Databases/COPDGene
# set input file to be processed
#set name=(`sed -n "$LSB_JOBINDEX"p /PHShome/rs117/Databases/COPDGene/20CaseListFromReston.txt`)
name=(`sed -n "$LSB_JOBINDEX"p /PHShome/rs117/Databases/WTC/WTNewINSPCaseList.txt`)
#name=(`sed -n "$LSB_JOBINDEX"p /PHShome/rs117/Databases/WTC/WTPriorityList.txt`)
if [ -z "$name" ]
then
echo "No case name"
exit
fi
cid=`echo $name | cut -f1 -d "_"`
study="WTC"
tmpDir=/data/acil/tmp/WTCAirwayAnalysis
#if [ -e $tmpDir/${name}_AirwayParticlesSubset.vtk ]
#then
# exit
#fi
testfile=Processed/$study/$cid/$name/${name}_airwayRegionAndTypePoints.csv
ssh mad-replicated1.research.partners.org "test -s $testfile" && run $cid $name $tmpDir || echo "$name: airway point file does not exists"
#echo "Job: $LSB_JOBINDEX"
| true |
037c5c3ddb582b1b4e7db3ab5c43628aa055f893 | Shell | excursive/configfiles | /vim/vim-cheatsheet.sh | UTF-8 | 4,066 | 2.734375 | 3 | [] | no_license | #!/bin/bash
printf '\e[35mmagenta: buffers/files/windows/tabs
\e[32mgreen: actions\e[0m
\e[36mcyan: movement
\e[34mblue: visual mode
\e[33myellow: insert
\e[31mred: delete/copy/paste\e[0m
can put numbers in front of nearly any command, e.g. \e[1;36m5h\e[0m to move 5 chars left
vim \e[1;35m-R\e[0;35m [files...]\e[0m open files in read-only mode
\e[1;35m:e[dit]\e[0;35m[!] [file]\e[0m open file (! discards changes to current file)
\e[1;35m:sav[eas]\e[0m save as \e[1;35mCtrl-^\e[0m swap to last edited file
\e[1;35m:n[ext]\e[0m / \e[1;35m:prev[ious]\e[0m / \e[1;35m:fir[st]\e[0m / \e[1;35m:la[st]\e[0m switch files in \e[1margs\e[0m list
\e[1;35m:bn[ext]\e[0m / \e[1;35m:bp[revious]\e[0m / \e[1;35m:bf[irst]\e[0m / \e[1;35m:bl[ast]\e[0m switch buffer in \e[1mbuffer\e[0m list
\e[1;35m:n[ext]\e[0m / \e[1;35m:prev[ious]\e[0m [save current and] go to next/prev file in \e[1margs\e[0m list
\e[1;35m:args\e[0;35m [file1.txt] [file2.txt] [*.txt]\e[0m open new list of files
\e[1;35m:buffers\e[0;35m[!]\e[0m / \e[1;35m:files\e[0;35m[!]\e[0m / \e[1;35m:ls\e[0;35m[!]\e[0m list all [include unlisted] files in buffer list
\e[0;35m:[N]\e[1;35m[v]sp[lit]\e[0m \e[35m[file.txt]\e[0m / \e[0;35m:[N]\e[1;35m[v]new\e[0m split window [N lines high/wide]
\e[1;35mCtrl-W h\e[0m/\e[1;35mj\e[0m/\e[1;35mk\e[0m/\e[1;35ml\e[0m switch windows \e[1;35mCtrl-W H\e[0m/\e[1;35mJ\e[0m/\e[1;35mK\e[0m/\e[1;35mL\e[0m move window to far left/bottom/...
\e[1;35mCtrl-W [+/-/_]\e[0m increase/decrease/set size of window
\e[1;35m:clo[se]\e[0m / \e[1;35mCtrl-W c\e[0m close window \e[1;35m:on[ly]\e[0m / \e[1;35mCtrl-W o\e[0m close all other windows
\e[1;35m:\e[0;35m[w]\e[1;35mqa[ll]\e[0;35m[!]\e[0m close all windows and quit
vim \e[1;35m-p[N]\e[0m [files...] open up to N tab pages, one for each file
\e[1;35m:tabe[dit]\e[0;35m [file]\e[0m / \e[1;35m:tabnew\e[0m edit file/create new file in a new tab page
\e[1;35mgt\e[0m / \e[1;35mgT\e[0m go to next/previous tab \e[1;35m:tabs\e[0m list tabs
\e[1;32mu\e[0m undo \e[1;32mCtrl-R\e[0m redo
\e[1;32m.\e[0m repeat last command
go to line: \e[1;36mgg\e[0m line 1 \e[1;36m:n\e[0m / \e[1;36mnG\e[0m line n \e[1;36mG\e[0m last line
\e[1;36mCtrl-U\e[0m up half screen of text \e[1;36mCtrl-D\e[0m down half screen of text
\e[1;36mzz\e[0m center cursor line \e[1;36mzt\e[0m cursor line at top \e[1;36mzb\e[0m cursor line at bottom
\e[1;36m{\e[0m / \e[1;36m}\e[0m code block
\e[1;36m0\e[0m \e[1;36m^\e[0mmove to first/first non-blank/last char in line\e[1;36m$\e[0m
\e[1;36mgm\e[0m/\e[1;36mgM\e[0m to middle of line / screen line
\e[1;36mf\e[0m/\e[1;36mF{char}\e[0m to [N]th occurrence of {char} to the right/left
\e[1;36mt\e[0m/\e[1;36mT{char}\e[0m till before/after the [N]th occurrence of {char} to the right/left
\e[1;36m``\e[0m previous cursor position \e[1;36m`.\e[0m position of last change
\e[1;36m`"\e[0m cursor position when last editing file
\e[1;36mm[a-zA-Z]\e[0m place mark \e[1;36m`[a-zA-Z]\e[0m jump \e[1;36m'\''[a-zA-Z]\e[0m beginning of line with mark
capital letter marks are global and can be used to jump to different a file
\e[1;36m:marks\e[0m list marks
\e[1;36m/string\e[0m search (must escape \^.*[]%%~/?$ chars) \e[1;36mn\e[0m next / \e[1;36mN\e[0m previous match
\e[1;36m^\e[0mline\e[1;36m$\e[0m \e[1;36m\<\e[0mword\e[1;36m\>\e[0m \e[1;36m(\e[0mgroup as atom\e[1;36m)\e[0m any#\e[1;36m*\e[0m 0or1\e[1;36m\?\e[0m NtoM\e[1;36m\{n,m}\e[0m
\e[1;36m\zs\e[0mset start/end of match\e[1;36m\ze\e[0m
\e[1;36m.\e[0m char(no EOL) \e[1;36m\_.\e[0m char \e[1;36m\s\e[0m whitespace \e[1;36m\S\e[0m non-ws \e[1;36m\d\e[0m digit \e[1;36m[\e[0mchar set\e[1;36m]\e[0m
\e[1;36m*\e[0m / \e[1;36m#\e[0m shortcut to search forward/backward for word under cursor
visual mode: \e[1;34mv\e[0m character \e[1;34mV\e[0m line \e[1;34mCtrl-V\e[0m block
\e[1;31mdd\e[0m cut line \e[1;31myy\e[0m copy line
\e[1;33mp\e[0m paste \e[1;33mP\e[0m paste before
delete: \e[1;31md\e[0m[\e[1;36mmotion\e[0m]\n'
| true |
0f2ce664dbbba3fe3af29eef3fa2b05d36059994 | Shell | xiaoxiaoh16/Linux-study-log | /shell-script-ex10.sh | UTF-8 | 901 | 3.234375 | 3 | [] | no_license | #/*************************************************************************
# > File Name: shell-script-ex5.sh
# > Author: xiaoxiaoh
# > Mail: xiaoxxhao@gmail.com
# > Created Time: Fri Apr 7 15:36:56 2017
# ************************************************************************/
# 设计一个shell程序,添加一个新组为class1,然后添加属于这个组的30个用户,用户名的形式为stdxx,其中xx从01到30。
#!/bin/bash
#groupadd class1
#i=1
#while [ $i -le 30 ]
#do
# if [ $i -le 9 ]
# then
# username=stud0$i
# else
# username=stud$i
# fi
# i=$((i+1))
# useradd -g class1 $username
#done
#!/bin/bash
sudo groupadd class1
i=1
while [ $i -le 30 ]
do
if [ $i -le 9]
then
username=stud0$i
else
username=stud$i
fi
i=$((i+1))
sudo useradd $username
sudo mkdir /home/username
sudo chown -R $username /home/$username
sudo chgrp -R class1 /home/$username
done
| true |
fcd09d8be448944bb599fef85059db71b56903a1 | Shell | TomJKono/Maize_Tandem_Evolution | /Scripts/Data_Handling/Setup_PAML_Files.sh | UTF-8 | 1,581 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# Sanitize files for PAML - replace long species names with short ones in both
# the tree and alignment files
# Set up paths
SP_NAME="/Users/tomkono/Dropbox/GitHub/Maize_Tandem_Evolution/Scripts/Data_Handling/Shorten_Species.sed"
PAML_IN_DIR="/Volumes/LaCie/Maize_Tandem_Evolution/Orthofinder/PAML_Comp_Tests"
SEL_OGS="/Users/tomkono/Dropbox/GitHub/Maize_Tandem_Evolution/Results/Orthofinder/Selected_OGs_Compensated_TandemIDs.txt"
BKTFLT="/Volumes/LaCie/Maize_Tandem_Evolution/Orthofinder/25gap_FLT"
MARK_SCRIPT="/Users/tomkono/Dropbox/GitHub/Maize_Tandem_Evolution/Scripts/Data_Handling/Mark_Trees_For_CodeML.py"
TREES="/Volumes/LaCie/Maize_Tandem_Evolution/Orthofinder/Tree_25"
# First, make the PAML directory structure
~/anaconda_ete/bin/python ${MARK_SCRIPT} ${TREES} ${SEL_OGS} ${PAML_IN_DIR}
# For each selected orthogroup, modify its alignment input and tree input
# for short names
for i in $(cut -f 1 ${SEL_OGS})
do
sed -i.bak -f ${SP_NAME} ${PAML_IN_DIR}/${i}/Null/${i}_Marked.tree
sed -i.bak -f ${SP_NAME} ${PAML_IN_DIR}/${i}/Ha1/${i}_Marked.tree
sed -i.bak -f ${SP_NAME} ${PAML_IN_DIR}/${i}/Ha2/${i}_Marked.tree
sed -i.bak -f ${SP_NAME} ${PAML_IN_DIR}/${i}/Ha3/${i}_Marked.tree
sed -f ${SP_NAME} ${BKTFLT}/${i}_BKT_25Flt.fa > ${PAML_IN_DIR}/${i}/Null/${i}_BKT.fa
sed -f ${SP_NAME} ${BKTFLT}/${i}_BKT_25Flt.fa > ${PAML_IN_DIR}/${i}/Ha1/${i}_BKT.fa
sed -f ${SP_NAME} ${BKTFLT}/${i}_BKT_25Flt.fa > ${PAML_IN_DIR}/${i}/Ha2/${i}_BKT.fa
sed -f ${SP_NAME} ${BKTFLT}/${i}_BKT_25Flt.fa > ${PAML_IN_DIR}/${i}/Ha3/${i}_BKT.fa
done
| true |
7c408242b703383df16a8b1d127d86b31b43d1f0 | Shell | qixin5/debloating_study | /expt/debaug/benchmark/rm-8.4/rdsfuzz/basetestscript/I0/4 | UTF-8 | 117 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
BIN=$1
OUTDIR=$2
TIMEOUT=$3
INDIR=$4
cp $INDIR/e.txt ./
chmod 444 e.txt #Made read only
$BIN -f e.txt
| true |
fe030fed4688ca5a89669387a7a35709307fe5c6 | Shell | boreycutts/linux-config | /bgslideshow.sh | UTF-8 | 478 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# until [ 1 -eq 2 ]; do
# Script to randomly set Background from files in a directory
# Directory Containing Pictures
DIR="/home/borey/Git/linux-config/wallpapers"
# Command to Select a random jpg file from directory
# Delete the *.jpg to select any file but it may return a folder
PIC=$(ls $DIR/*.png | shuf -n1)
# Command to set Background Image
# gconftool -t string -s /desktop/gnome/background/picture_filename $PIC
feh --bg-scale $PIC
# sleep 60s
# done
| true |
85b49987fac0a9a1685acc85e9b1eb487f7be1cc | Shell | tpbtools/jenkins-dind | /devcontrol/global/startup.sh | UTF-8 | 646 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# @file devcontrol/global/startup.sh
# @brief devcontrol startup script and functions
echo "Jenkins DinD (c) TIC para Bien 2019 - 2020"
echo
# @description Check presence of docker-related pieces in the system
# The function aborts the execution if the system dont have docker installed
#
# @example
# checkDocker
#
# @noargs
#
# @exitcode 0 If docker exist, abort execution if other case and return 1 to the system
#
# @stdout Show "Docker not present. Exiting -" message if missing docker
#
function checkDocker() {
command -v docker > /dev/null 2>&1 || bash -c 'echo "Missing docker: aborting"; exit 1'
}
export -f checkDocker
| true |
6d7e8637f6ae4429b2b0779521df008da0bd51d8 | Shell | foodoon-guda/guda | /monitor/deploy/start-agent.sh | UTF-8 | 400 | 2.890625 | 3 | [] | no_license | if [ ! -n "$JAVA_HOME" ]; then
echo "JAVA_HOME IS NULL,use default JAVA_HOME"
JAVA_HOME=/home/admin/jre
else
echo "NOT NULL"
fi
_RUNJAVA="$JAVA_HOME"/bin/java
WORK_HOME=`pwd`
MAIN="com.foodoon.monitor.agent.start.Main"
nohup $_RUNJAVA -Djava.ext.dirs=$WORK_HOME/sigar-bin/lib:$WORK_HOME/lib $MAIN >$WORK_HOME/nohup.log 2>&1 &
echo $! > $WORK_HOME/agent.pid
echo agent start successful.
| true |
961825039dfb55c82eb54bd51a892fd006157472 | Shell | smola/ree-demanda | /generate_graphs.bash | UTF-8 | 143 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
gnuplot graphs.gnuplot
pushd graphs
for f in *.ps ; do
convert $f -rotate 90 -background white -flatten ${f/.ps/.png}
done
popd
| true |
dc69fe649d94c43f29d566dd6ef5fbfe7cac8494 | Shell | bladedancer/agent-tracing | /setup-vapi.sh | UTF-8 | 857 | 2.65625 | 3 | [] | no_license | #!/bin/bash
. ./env.sh
axway central delete deployment webhooksite -s $CLUSTER
axway central delete virtualapi webhooksite
axway central apply -f vapi/vapi.yaml
axway central apply -f vapi/releasetag.yaml
sleep 10
cat << EOF > vapi/deployment.yaml
apiVersion: v1alpha1
group: management
kind: Deployment
name: webhooksite
metadata:
scope:
kind: Environment
name: $CLUSTER
tags:
- v1
spec:
virtualAPIRelease: webhooksite-1.0.0
virtualHost: "$CLUSTER.ampgw.sandbox.axwaytest.net"
EOF
axway central apply -f vapi/deployment.yaml
echo =========
echo = Test =
echo =========
K8_INGRESS=$(kubectl describe -n kube-system service/traefik | grep "LoadBalancer Ingress" | awk "{print \$3}" | sed "s/,//")
echo curl -kv --resolve $CLUSTER.ampgw.sandbox.axwaytest.net:8443:$K8_INGRESS https://$CLUSTER.ampgw.sandbox.axwaytest.net:8443/hook/demo
| true |
e032f6770b910ceb9f0e99323e5bc680476d9a77 | Shell | kapliy/hepcode | /ana/trunk/scripts/update_metadata_files.sh | UTF-8 | 1,132 | 4.09375 | 4 | [] | no_license | #!/bin/bash
# This a script to be used whenever you change the directory location of your xml output. It updates the metadata.xml.bz2
# file to reflect the current directory location
# When sourced it will run over all of the ntuple_*.aen folders in your current directory and update the metadata
# corresponding to each folder. It also backs up your original metadata.xml.bz2 file in metadata.xml.bz2.bak
# loop over all ntuple directories in current directory
for adir in ntuple_*.aen; do
cd $adir
# make backup
cp metadata.xml.bz2 metadata.xml.bz2.bak
# unzip the metadata so it can be edited
bunzip2 metadata.xml.bz2
# determine the old and new directory locations, and use perl to replace the old with the new
newdir=`pwd`
olddir=""; cat metadata.xml | while read line; do
if [ "${line:0:12}" == "<_directory>" ]; then
olddir=`echo $line | sed -e 's#<_directory>##g' | sed -e 's#</_directory>##g'`;
echo -e "old directory: $olddir\nnew directory: $newdir\n;
perl -p -i -e "s#${olddir}#${newdir}#g" metadata.xml; break;
fi;
done;
# rezip
bzip2 metadata.xml
cd ..
done
| true |
9151990f8bf4c7903719fdfb71f41bc3f138df7d | Shell | johony/shell | /Commandforms/Command forms.sh | UTF-8 | 975 | 3.671875 | 4 | [] | no_license | cmd $ #Excute cmd in background
cmd1; cmd2 #Command sequence; excute multiple cmds on the same line
{cmd1;cmd2} #Excute commands as a group in the current shell
(cmd1;cmd2) #Excute commands as a group in a subshell
cmd1 | cmd2 #Pipe; use output from cmd1 as input to cmd2
cmd1 `cmd2` #Command substitution; use cmd2 output as argument to cmd1
cmd1 $(cmd2) #POSIX shell command substitution; nesting is allowed
cmd $((expression)) #POSIX shell arithmetic substitution. Use the result of expression as argument to cmd
cmd1 && cmd2 #AND; excute cmd1 and then(if cmd1 succeeds)cmd2. This is a "short circuit":cmd2 is never executed if cmd1 fails.
cmd1 || cmd2 #OR; execute either cmd1 or (if cmd1 fails)cmd2. This is a "short ciruit" operation; cmd2 is never executed if cmd1 succeeds.
! cmd #NOT; execute cmd, and produce a zero exit status if cmd exits with a nonzero status. Oherwise, produce a nonzero statrus when cmd exits with a zero startus. | true |
d3337e822dbc95f2aa8764519c0ff9ce2fca1e06 | Shell | alexbeatnik/elegant-git | /libexec/git-elegant | UTF-8 | 3,737 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
[ -n "$GED" ] && set -x
# Update PATH for a round of command execution.
# It registers all "libexec" scripts.
BINS=$(dirname ${0})
export PATH=${BINS}:${PATH}
source ${BINS}/plugins/pipe
source ${BINS}/plugins/text
__site="https://elegant-git.bees-hive.org"
git-verbose() {
# Prints a command to be executed and executes it using `git` executable.
# usage: git-verbose [arg]...
command-text "git $@"
git "$@"
}
git-verbose-op() {
# Prints a command to be executed, executes it using `git` executable,
# and processes the output by a given function instead of printing it.
# usage: git-verbose-op <function> [arg]...
local processor=${1}; shift
command-text "git $@"
${processor} "$(git "$@" 2>&1)"
}
MASTER="master"
REMOTE_NAME="origin"
RMASTER="${REMOTE_NAME}/master"
_error-if-empty() {
# _error-if-empty <a value to check> <error message>
if [[ -z "$1" ]]; then
error-text "$2"
exit 45
fi
}
__loop_ask() {
local c="$1"; shift
local m="$1"; shift
[ -z "$1" ] && return 0
for i in $@; do
question-text "$m [$i] (y/n):"
read answer
if [[ "${answer}" == "y" ]]; then
eval "$c $i"
fi
done
}
__loop() {
local c="$1"; shift
[ -z "$1" ] && return 0
for i in $@; do
eval "$c $i"
done
}
__batch() {
local MM="$1"; shift
local AM="$1"; shift
local CM="$1"; shift
question-text "$MM (y/n): "
read answer
if [[ "${answer}" == "y" ]]; then
__loop "$CM" $@
else
__loop_ask "$CM" "$AM" $@
fi
}
branch-from-remote-reference() {
# usage: branch-from-remote-reference <full reference name>
echo ${1} | sed "s|^[a-zA-Z0-9_-]*/||g"
}
remove-file() {
[[ -f ${1} ]] && rm ${1}
}
--print-command-in-usage() { (
source "${BINS}/git-elegant-${1}"
printf " %-20s %s\n" "${1}" "$(command-purpose)"
) }
--usage() {
cat <<MESSAGE
An assistant who carefully makes routine work with Git.
usage: git elegant [-h | --help | help]
or: git elegant [-v | --version | version]
or: git elegant <command> [args]
or: git elegant <command> [-h | --help | help]
There are commands used in various situations such as
act with a repository
$(--print-command-in-usage clone-repository)
$(--print-command-in-usage init-repository)
$(--print-command-in-usage acquire-repository)
$(--print-command-in-usage clear-local)
manage a personal work
$(--print-command-in-usage start-work)
$(--print-command-in-usage save-work)
$(--print-command-in-usage amend-work)
$(--print-command-in-usage deliver-work)
operate a flow of work management
$(--print-command-in-usage obtain-work)
$(--print-command-in-usage accept-work)
release new versions
$(--print-command-in-usage show-release-notes)
$(--print-command-in-usage release-work)
and others
$(--print-command-in-usage commands)
Please visit ${__site} to find out more.
MESSAGE
}
--run-command() {
# usage: <command name> [arg]...
local COMMAND=${1}; shift
. "${BINS}/git-elegant-${COMMAND}" 2>/dev/null || {
echo "Unknown command: git elegant $COMMAND" && --usage && exit 46
}
case "${1}" in
-h|--help|help)
echo ""
command-synopsis
echo ""
command-description
echo ""
;;
*) default "$@" ;;
esac
}
main() {
local COMMAND="none"
[[ -n "$1" ]] && COMMAND="$1" && shift
case "${COMMAND}" in
none|-h|--help|help) --usage ;;
-v|--version|version) cat "${BINS}/../version" ;;
*) --run-command ${COMMAND} "$@" ;;
esac
}
main "$@"
| true |
75d3066201bb4b7e81a659fc525cae0af2486b76 | Shell | dlainhart/python-validity | /snap/local/snap-launcher.sh | UTF-8 | 865 | 3.359375 | 3 | [] | no_license | #!/bin/bash
export PYTHONPATH=$SNAP/usr/lib/python3/dist-packages:$PYTHONPATH
for p in $(ls -1d $SNAP/lib/python3*/site-packages); do
PYTHONPATH=$PYTHONPATH:$p
done
if ! $(command -v lsusb) -d 138a: &> /dev/null; then
echo "Unable to access to USB devices"
echo " $SNAP_NAME is installed as a snap."
echo " To allow it to function correctly you may need to run:"
echo " sudo snap connect $SNAP_NAME:raw-usb"
echo " sudo snap connect $SNAP_NAME:hardware-observe"
exit 1
fi
run_tool() {
[ -n "$VFS_TOOL" ] && \
local args=(--tool "$VFS_TOOL")
$SNAP/vfs-tools/validity-sensors-tools "${args[@]}" "$@"
}
run_tool "$@"
ret=$?
if [ "$ret" -eq 0 ] && [[ "$VFS_TOOL" == 'initializer' ]]; then
unset VFS_TOOL
echo "May the leds be with you...!"
(run_tool "$@" --tool=led-dance &> /dev/null) &
fi
exit $ret
| true |
a9c110fc4b651acd5d88df9fcaa492f0e3254729 | Shell | hellosign/dropbox-sign-php | /bin/check-clean-git-status | UTF-8 | 345 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [[ $(git status --porcelain) != '' ]]; then
printf "Working directory is dirty.\n";
printf "Please check that you committed any changes after running.\n"
printf "You may need to rebuild the SDK by running:\n";
printf "\t./run-build\n"
git status
exit 1
else
echo "Working directory is clean!"
exit 0
fi
| true |
77a8e544950810f1508e20b1ca4a1878eed2708e | Shell | gunzy83/dotfiles | /dot_zsh/rc/10-functions-general.zsh | UTF-8 | 301 | 3.8125 | 4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/zsh
# Get information about an IP address. When left blank, information about current public IP is returned
ipinfo() {
curl http://ipinfo.io/"$@";
}
# Generate a password. Length is 20 unless specified.
passwordgen() {
tr -cd '[:alnum:]' < /dev/urandom | fold -w${@:-20} | head -n1
}
| true |
1ca4f1406b6bcfd202a60464ebe63aa34f4af169 | Shell | KevinMcK100/binance-bot-websocket | /scripts/health_check.sh | UTF-8 | 434 | 3.203125 | 3 | [] | no_license | #!/bin/bash
TIMESTAMP=`date +%Y-%m-%d_%H-%M-%S`
cd /home/ubuntu/binance-bot-websocket/
SERVICE="python binance-bot-websocket.pyz"
if pidof $SERVICE >/dev/null
then
echo "[$TIMESTAMP] $SERVICE is running"
echo "[$TIMESTAMP] $SERVICE is running" >> "status.log"
else
echo "[$TIMESTAMP] $SERVICE was not running. Starting..."
echo "[$TIMESTAMP] $SERVICE was not running. Starting..." >> "status.log"
./start.sh
fi | true |
69163bdc72fba317e80b194fed8de65c7c8de43e | Shell | csren001/vote | /scripts/test_example01.sh | UTF-8 | 2,515 | 2.734375 | 3 | [] | no_license | . scripts/utils.sh
echo '######## - (COMMON) setup variables - ########'
setupCommonENV
export CC_NAME=mycc
if [[ $# -ge 1 ]]; then
export CC_NAME=$1
fi
echo "'CHAINCODE_NAME' set to '$CC_NAME'"
echo "'CHAINCODE_LANG' set to '$CC_LANG'"
echo "'CHAINCODE_PATH' set to '$CC_PATH'"
echo '######## - (ORG1) init chaincode - ########'
setupPeerENV1
set -x
if [[ "$CORE_PEER_TLS_ENABLED" == "true" ]]; then
peer chaincode invoke \
-o ${ORDERER_ADDRESS} --ordererTLSHostnameOverride orderer.example.com --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA \
-C $CHANNEL_NAME -n ${CC_NAME} \
--isInit -c '{"Function":"Init","Args":[]}'
else
peer chaincode invoke \
-o ${ORDERER_ADDRESS} \
-C $CHANNEL_NAME -n ${CC_NAME} \
--isInit -c '{"Function":"Init","Args":[]}'
fi
set +x
sleep 10
echo '######## - (ORG1) Invoke SetValue - ########'
set -x
if [[ "$CORE_PEER_TLS_ENABLED" == "true" ]]; then
peer chaincode invoke \
-o ${ORDERER_ADDRESS} --ordererTLSHostnameOverride orderer.example.com --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA \
-C $CHANNEL_NAME -n ${CC_NAME} \
-c '{"Function":"CreateVote","Args":["vote01","{\"csren\":\"0\",\"stwan\":\"0\"}"]}'
else
peer chaincode invoke \
-o ${ORDERER_ADDRESS} \
-C $CHANNEL_NAME -n ${CC_NAME} \
-c '{"Function":"CreateVote","Args":["vote01","{\"csren\":\"0\",\"stwan\":\"0\"}"]}'
fi
set +x
sleep 5
echo '######## - (ORG1) query chaincode - ########'
setupPeerENV1
set -x
peer chaincode query -C $CHANNEL_NAME -n $CC_NAME -c '{"Function":"Query", "Args":["vote01"]}'
set +x
echo '######## - (ORG2) query chaincode - ########'
setupPeerENV2
set -x
peer chaincode query -C $CHANNEL_NAME -n $CC_NAME -c '{"Function":"Query", "Args":["vote01"]}'
set +x
echo '############# END ###############'
echo '######## - (ORG2) Invoke chaincode - ########'
set -x
if [[ "$CORE_PEER_TLS_ENABLED" == "true" ]]; then
peer chaincode invoke \
-o ${ORDERER_ADDRESS} --ordererTLSHostnameOverride orderer.example.com --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA \
-C $CHANNEL_NAME -n ${CC_NAME} \
-c '{"Function":"Vote","Args":["vote01","csren"]}'
else
peer chaincode invoke \
-o ${ORDERER_ADDRESS} \
-C $CHANNEL_NAME -n ${CC_NAME} \
-c '{"Function":"Vote","Args":["vote01","csren"]}'
fi
set +x
sleep 5
echo '######## - (ORG1) query chaincode - ########'
setupPeerENV1
set -x
peer chaincode query -C $CHANNEL_NAME -n $CC_NAME -c '{"Function":"Query", "Args":["vote01"]}'
set +x
| true |
ae259b25ebfcfb64b40db2c786d499395d36789a | Shell | solarkennedy/puppet-on-openwrt | /scripts/add-repo.sh | UTF-8 | 263 | 2.546875 | 3 | [] | no_license | . /etc/openwrt_release
VERSION=`echo $DISTRIB_RELEASE | cut -f 1 -d -`
TARGET=`echo $DISTRIB_TARGET | cut -f 1 -d /`
LINE="src/gz puppet-packages http://download.xkyle.com/openwrt/$VERSION/$TARGET"
grep -q "$LINE" /etc/opkg.conf || echo "$LINE" >> /etc/opkg.conf
| true |
f3cc47fd928c83c4b4911c85d1f159075e26ff89 | Shell | benhe119/service-dependencies | /examples-data/push_example_data.sh | UTF-8 | 1,605 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Inject all example data
# Requires curl and jq https://stedolan.github.io/jq/
set -e # fail on error
set -x # echo on
echo "Declare the applications"
exterior_id=$(curl -H "Content-Type: application/json" -X POST -d '{"name": "exterior"}' http://localhost:8080/services/applications | jq '.id')
frontend1_id=$(curl -H "Content-Type: application/json" -X POST -d '{"name": "frontend1"}' http://localhost:8080/services/applications | jq '.id')
frontend2_id=$(curl -H "Content-Type: application/json" -X POST -d '{"name": "frontend2"}' http://localhost:8080/services/applications | jq '.id')
backend_id=$(curl -H "Content-Type: application/json" -X POST -d '{"name": "backend"}' http://localhost:8080/services/applications | jq '.id')
echo "Upload the swagger declarations"
curl -X POST -F "file=@./swagger/frontend1.json" "http://localhost:8080/services/applications/$frontend1_id/swagger"
curl -X POST -F "file=@./swagger/frontend2.json" "http://localhost:8080/services/applications/$frontend2_id/swagger"
curl -X POST -F "file=@./swagger/backend.json" "http://localhost:8080/services/applications/$backend_id/swagger"
echo "Upload the logs"
curl -X POST -F "file=@./apache/frontend1.log" "http://localhost:8080/services/applications/$frontend1_id/logs/apache"
curl -X POST -F "file=@./apache/frontend2.log" "http://localhost:8080/services/applications/$frontend2_id/logs/apache"
curl -X POST -F "file=@./apache/backend.log" "http://localhost:8080/services/applications/$backend_id/logs/apache"
echo "Check the result"
curl "http://localhost:8080/services/dependencies" | jq '.'
| true |
8c861d418cdfbff2f8bcec5e9bf4a64515f423ed | Shell | onlinekid/my | /dotfiles/.zshrc | UTF-8 | 4,500 | 3.046875 | 3 | [] | no_license | # prezto
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# change prompt
prompt_context() {
# local user=`whoami`
# if [[ "$user" != "$DEFAULT_USER" || -n "$SSH_CONNECTION" ]]; then
# prompt_segment $PRIMARY_FG default " %(!.%{%F{yellow}%}.)$user@%m "
# fi
prompt_segment $PRIMARY_FG default " 🌈 "
}
prompt_dir() {
prompt_segment blue $PRIMARY_FG ' %c '
}
# autojump
[ -f /usr/local/etc/profile.d/autojump.sh ] && . /usr/local/etc/profile.d/autojump.sh
autoload -U compinit && compinit
# zmv
autoload zmv
# z
. /usr/local/etc/profile.d/z.sh
# functions
## set the tab title to current dir
# function precmd() {
# echo -ne "\e]1;${PWD##*/}\a"
# }
## create a folder and go in it
function mcd() {
mkdir -p "$1" && cd "$1";
}
# export MY
export MY=$HOME/my
# export TMP
export TMP=$HOME/__tmp
# colours
export TERM=xterm-256color
export GREP_OPTIONS='--color=auto' GREP_COLOR='1;32'
export CLICOLOR=1
# display
DISPLAY=:0.0; export DISPLAY
# british and utf-8
export LANG="en_GB"
export LC_ALL="en_GB.UTF-8"
# increase opened files size
ulimit -n 1024
# bind
bindkey '^R' history-incremental-search-backward
# global variable
# export FIREFOXNIGHTLY_BIN="/Applications/FirefoxNightly.app/Contents/MacOS/firefox"
# export BROWSER=$FIREFOXNIGHTLY_BIN # bug with python
export GIT_EDITOR="vim"
export VISUAL="subl"
export EDITOR="subl"
# aliases
. ~/.aliases
# node
export NODE_PATH=/usr/local/share/npm/lib/node_modules
###-begin-npm-completion-###
#
# npm command completion script
#
# Installation: npm completion >> ~/.bashrc (or ~/.zshrc)
# Or, maybe: npm completion > /usr/local/etc/bash_completion.d/npm
#
if type complete &>/dev/null; then
_npm_completion () {
local words cword
if type _get_comp_words_by_ref &>/dev/null; then
_get_comp_words_by_ref -n = -n @ -n : -w words -i cword
else
cword="$COMP_CWORD"
words=("${COMP_WORDS[@]}")
fi
local si="$IFS"
IFS=$'\n' COMPREPLY=($(COMP_CWORD="$cword" \
COMP_LINE="$COMP_LINE" \
COMP_POINT="$COMP_POINT" \
npm completion -- "${words[@]}" \
2>/dev/null)) || return $?
IFS="$si"
if type __ltrim_colon_completions &>/dev/null; then
__ltrim_colon_completions "${words[cword]}"
fi
}
complete -o default -F _npm_completion npm
elif type compdef &>/dev/null; then
_npm_completion() {
local si=$IFS
compadd -- $(COMP_CWORD=$((CURRENT-1)) \
COMP_LINE=$BUFFER \
COMP_POINT=0 \
npm completion -- "${words[@]}" \
2>/dev/null)
IFS=$si
}
compdef _npm_completion npm
elif type compctl &>/dev/null; then
_npm_completion () {
local cword line point words si
read -Ac words
read -cn cword
let cword-=1
read -l line
read -ln point
si="$IFS"
IFS=$'\n' reply=($(COMP_CWORD="$cword" \
COMP_LINE="$line" \
COMP_POINT="$point" \
npm completion -- "${words[@]}" \
2>/dev/null)) || return $?
IFS="$si"
}
compctl -K _npm_completion npm
fi
###-end-npm-completion-###
# android
export ANDROID_HOME=/usr/local/opt/android-sdk
# java
export JAVA_HOME=$(/usr/libexec/java_home -v15)
export JAVA_11_HOME=$(/usr/libexec/java_home -v11)
export JAVA_14_HOME=$(/usr/libexec/java_home -v14)
export JAVA_15_HOME=$(/usr/libexec/java_home -v15)
# python
if which pyenv > /dev/null; then eval "$(pyenv init -)"; fi
if which pyenv-virtualenv-init > /dev/null; then eval "$(pyenv virtualenv-init -)"; fi
export LDFLAGS="-L/usr/local/opt/openssl/lib"
export CPPFLAGS="-I/usr/local/opt/openssl/include"
# homebrew cask
export HOMEBREW_CASK_OPTS=--appdir=/Applications
# babel
export BABEL_CACHE_PATH=/tmp/babel.cache.json
# PATH - must be in the end
export PATH=$PATH:$MY/bin/shims # add commands to open applications
export PATH=/usr/local/Cellar/:$PATH # brew
export PATH=/usr/local/lib/node_modules:$PATH # npm
export PATH=/usr/local/opt/ruby/bin:$PATH # ruby
# export PATH="/usr/local/opt/openjdk/bin:$PATH" # java
export PATH=/usr/local/opt/gnu-sed/libexec/gnubin:$PATH # gnu-sed
export PATH=$MY/bin/git:$PATH # git commands
export PATH=$MY/bin/_:$PATH # own commands
# local
. ~/.zshrc_local
# add or override commands by via profiled ones
export PATH=$MY/profiles/$OS_PROFILE/bin:$PATH
| true |
ffe615b4d10601b4a475e8168f1c897ec9799848 | Shell | joagre/anond | /lib/test/lab/create_node.sh | UTF-8 | 848 | 3.625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
if [ $# -ne 2 ]; then
echo "Usage: $0 <host-only-ip> <firewall-ip>"
echo "Example: $0 11.0.0.3 11.0.1.3"
exit 1
fi
ROOT_PASSWD=mortuta42
ORIGIN_HOST_ONLY_IP=11.0.0.2
ORIGIN_FIREWALL_IP=11.0.1.2
HOST_ONLY_IP=${1}
FIREWALL_IP=${2}
# Clone node
VBoxManage clonevm --register --name node-${HOST_ONLY_IP} node || exit 100
# Start node
VBoxManage startvm node-${HOST_ONLY_IP} --type gui || exit 200
# Change the ip-address on the host-only network adapter
sshpass -p ${ROOT_PASSWD} ssh -o ConnectTimeout=120 -o ConnectionAttempts=120 -o StrictHostKeyChecking=no root@${ORIGIN_HOST_ONLY_IP} "sh -c 'sed -e \"s/address ${ORIGIN_HOST_ONLY_IP}/address ${HOST_ONLY_IP}/\" -e \"s/gateway ${ORIGIN_FIREWALL_IP}/gateway ${FIREWALL_IP}/\" /etc/network/interfaces.in > /etc/network/interfaces; /sbin/halt'" || exit 300
| true |
ec53e7c7a5691326464cb311e5b1393ad778e4b9 | Shell | ewamarciniak/deploy | /tests/isApacheRunning.sh | UTF-8 | 575 | 3.171875 | 3 | [] | no_license |
#!/bin/bash
ERRORCOUNT_1=0
ERRORCOUNT_2=0
sudo /etc/init.d/apache2 start
source /home/testuser/project/functions.sh
isApacheRunning
if [ "$?" -ne 1 ]; then
ERRORCOUNT_1=$((ERRORCOUNT_1+1))
fi
sudo /etc/init.d/apache2 stop
source /home/testuser/project/functions.sh
isApacheRunning
if [ "$?" -ne 0 ]; then
ERRORCOUNT_2=$((ERRORCOUNT_2+1))
fi
ERRORCOUNT=$(($ERRORCOUNT_1+ERRORCOUNT_2))
#ERROR COUNT SHOULD BE 1 after previous
if [ $ERRORCOUNT -eq 0 ] ; then
echo "Unit test for isApacheRunning passed."
else
echo "Unit test for isApacheRunning passed."
fi
| true |
682c1e3dbaec1096694f0bc5798da9f5d4b5197e | Shell | dstrctrng/puddle | /libexec/_puddle | UTF-8 | 163 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [[ "$#" > 0 ]]; then
PUDDLE="$1"; shift
else
PUDDLE="$(cd -P -- "$(dirname -- "${BASH_SOURCE}")/.." && pwd -P)"
fi
PATH="$PUDDLE/bin:$PATH"
| true |
9ebeae8d45c0ee96e7e216ecf05df96d39519bf9 | Shell | eth-cscs/scops | /slurm_helper/relocate.sbatch | UTF-8 | 149 | 2.5625 | 3 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | permissive | #!/bin/bash
NAME="$1"
export PATH="/$REPLAY_USER/data/slurm_helper":$PATH
cmd="rsvmgt $NAME + ${SLURM_NODELIST}"
echo "$cmd"
eval "$cmd"
sleep 10
| true |
8b0b89e9c3164e81c7e41344130449e4cc4a1cf2 | Shell | jneis-baseline/mongodb-js | /import.sh | UTF-8 | 470 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# expects one argument: the host's absolute path where to save the dataset
if [ $# -eq 0 ]
then
echo "sh setup.sh <dataset-destination>"
exit 1
fi
volume=$1
# requirements: wget, docker
wget -P $volume https://raw.githubusercontent.com/mongodb/docs-assets/primer-dataset/dataset.json
docker run --name db -v $volume:/workspace -d mongo
docker exec -it db mongoimport --db test --collection restaurants --drop --file /workspace/dataset.json
| true |
7f8e566b5e39085427e84e9777615dec6d1adad0 | Shell | xuyuegit/SoftwareCarpentry | /simple_while.sh | UTF-8 | 170 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Purpose: simple while loop with a counter statement
COUNTER=0
while [ $COUNTER -lt 10 ] ;
do
echo The counter is at $COUNTER
let COUNTER=COUNTER+1
done
| true |
fe0ccd6a3dda9d58adac7b17c4ddce630db9ca23 | Shell | bopopescu/archive-utils | /chef-old/cookbooks/mysql/files/default/sql/create_ebs_stripe | UTF-8 | 2,954 | 4.375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Usage and warnings for non-root:
if [ $USER != "root" ]; then
echo "You must use 'sudo' or be root to use this tool."
exit
fi
if [ -z "$1" ]; then
echo "Usage: create_ebs_stripe [volume size in gigabytes]"
exit
fi
# Check for 'aws' and '.awssecret' program and conf file:
#SECFILE=`ls ~/.awssecret` | wc -l`
#if [ $SECFILE == 0 ]; then
# echo "No ~/.awssecret config file present. Exiting..."
# exit
#fi
#
#PROGFILE=`ls /usr/local/bin/aws | wc -l`
#if [ $PROGFILE == 0 ]; then
# echo "No 'aws' in /usr/local/bin. Exiting..."
# exit
#fi
# Define variables:
VOLSIZE=$1
DEVLIST=`echo "/dev/sdb
/dev/sdc
/dev/sdd
/dev/sde
/dev/sdf
/dev/sdg"`
echo "Attaches 6 $VOLSIZE gigabyte volumes in a raid0. Do you wish to proceed?
yes/no"
read CHOICE
if [ $CHOICE = "yes" ]; then
# Get our IPv4 address:
IP=`ifconfig eth0 | grep "inet addr:" | awk 'BEGIN { FS = ":" }{ print $2 }' \
| awk '{print $1}'`
# Get our AWS availability zone:
AVZONE=`aws din | grep 10.248.181.219 | grep -v + \
| awk 'BEGIN {FS = "availabilityZone="}{print $2}' | awk '{print $1}'`
# Get our AWS instance ID:
IID=`aws din | grep 10.248.181.219 | grep -v + | awk '{print $2}'`
# Create and attach EBS volumes:
for DEVICE in $DEVLIST;
do
echo "Attaching $DEVICE..."
VOLUME=`aws cvol --size $VOLSIZE --zone $AVZONE | grep "vol-" | awk '{print $4}'`
echo "Attaching volume $VOLUME..."
aws attvol $VOLUME -i $IID -d $DEVICE
sleep 10
echo "...done"
done
elif [ $CHOICE != yes ]; then
echo "Exiting..."
exit
fi
#!/bin/bash
# Get some tools for our host
echo "Installing tools for formatting and installing volumes."
aptitude update
apt-get -y install mdadm xfsdump
# Get our list of newly added EBS volumes
EBSVOLS=$DEVLIST
# Get number of EBS volumes
echo "$DEVLIST" > /tmp/devlist
NUMBEROFVOLS=`cat /tmp/devlist | wc -l`
# Make our partition Linux Raid Autodetect
for VOL in $EBSVOLS
do
sfdisk $VOL << EOF
,,fd
EOF
done
# Get a string to feed to mdadm
STRIPEDEVS=`echo $EBSVOLS | tr -s '\n' ' '`
# Build our raid stripe
mdadm --create --verbose /dev/md0 --level=raid0 --raid-devices=$NUMBEROFVOLS $STRIPEDEVS
# Configure fstab
echo "/dev/md0 /ebs_raid xfs defaults 0 0" >> /etc/fstab
# Make our mountpoint
mkdir -p /ebs_raid
# Backup up /etc/fstab and edit out the default lvm vols
mv /etc/fstab /etc/fstab.backup
grep -v "STORAGE" /etc/fstab.backup > /etc/fstab
# Stop MySQL
/etc/init.d/mysql stop
# Unmount the old lvm volumes
for D in `grep STORAGE /etc/fstab.backup | awk '{ print $1 }'`
do
umount $D
done
# Remove our improper default mysql directories
rm -rf /mnt/mysql-data /mnt/mysql-misc
# Make an ext3 filesystem on our raid stripe
mkfs.xfs -f /dev/md0
# Mount our new device
mount -a
# Set up symlinks to mountpoint for our sql data directories
# mkdir -p /mnt/sql_ebs/mysql-data /mnt/sql_ebs/mysql-misc
# ln -s /mnt/sql_ebs/mysql-misc /mysql-misc
# ln -s /mnt/sql_ebs/mysql-data /mysql-data
| true |
81c0d9c5daa5f6caf0018e0d2e36e7e467b37e43 | Shell | 7956968/sys-bin | /parted/build_hi3536.sh | UTF-8 | 510 | 2.84375 | 3 | [] | no_license | #!/bin/bash -e
. ../one_time_build_env_include
VERSION=3.1
pushd parted-${VERSION}
./configure --host=${CROSS_HOST} -disable-device-mapper --without-readline \
LDFLAGS="-L/home/gjyang/$tmn/sys-bin/util-linux/usr/lib" \
CFLAGS="-I/home/gjyang/$tmn/sys-bin/util-linux/usr/include" \
--enable-static=parted
make CC=${CROSS_HOST}-gcc
if [ $? -ne 0 ]; then
echo "Fail(make):$? [`pwd`]"
exit 1
fi
make DESTDIR=${OUTDIR}/parted install
popd
# make uninstall
echo "parted-${VERSION}" >> ../sysbuild.log
| true |
55e2aac2c2066681391db3fc449f056ec1b129a2 | Shell | mluszczyk/bsk | /bsk1112-openvpn-iptables/setup_server.sh | UTF-8 | 948 | 2.796875 | 3 | [] | no_license | set -xe
sudo apt-get update
sudo apt-get install openvpn easy-rsa
# http://stackoverflow.com/questions/24255205/error-loading-extension-section-usr-cert/26078472#26078472
sudo perl -p -i -e 's|^(subjectAltName=)|#$1|;' /usr/share/easy-rsa/openssl-1.0.0.cnf
EASY_RSA_DIR=/usr/share/easy-rsa
. ${EASY_RSA_DIR}/vars
export EASY_RSA=${EASY_RSA_DIR}
export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
export KEY_COUNTRY="PL"
export KEY_PROVINCE="mazowieckie"
export KEY_CITY="Warsaw"
export KEY_ORG="MIMUW"
export KEY_EMAIL="bsk@mimuw.edu.pl"
export KEY_OU="BSK"
export KEY_DIR="/vagrant/keys/"
${EASY_RSA_DIR}/clean-all
${EASY_RSA_DIR}/build-ca --batch
${EASY_RSA_DIR}/build-dh
${EASY_RSA_DIR}/build-key-server --batch vpn-server
export KEY_CN="CRL"
$OPENSSL ca -gencrl -out $KEY_DIR/crl.pem -config "$KEY_CONFIG"
sudo iptables -A INPUT -i eth1 -s 172.28.128.1/24 -p tcp --dport openvpn -j ACCEPT
sudo iptables -A INPUT -i eth1 -p tcp -j DROP
| true |
85a002dced744868bd3043c6ca42cff6c7f9a2be | Shell | freeman2004/wvdial | /X86/ubuntu10.04/auto.sh | UTF-8 | 4,057 | 3.046875 | 3 | [] | no_license | #!/bin/bash
CURDIR=$PWD
PACKAGE=$CURDIR/PACKAGE
TARGET=$CURDIR/TARGET
OBJ=$CURDIR/OBJ
mkdir $PACKAGE
mkdir $TARGET
mkdir $OBJ
echo "************ zlib-1.2.5 ***************************"
if [ ! -d "$TARGET/zlib-1.2.5" ]; then
if [ ! -f $PACKAGE/zlib-1.2.5.tar.bz2 ];
then
cd $PACKAGE
wget "http://enterprise-storage-os.googlecode.com/files/zlib-1.2.5.tar.bz2"
tar xvf zlib-1.2.5.tar.bz2 -C $TARGET
fi
cd "$TARGET/zlib-1.2.5"
echo "***************************************"
./configure --prefix=$OBJ
echo "***************************************"
make
echo "***************************************"
make install
cd $CURDIR
fi
## echo "************ openssl-0.9.8n ************************"
## if [ ! -d "$TARGET/openssl-0.9.8n" ]; then
## if [ ! -f "$PACKAGE/openssl-0.9.8n.tar.gz" ]
## then
## cd $PACKAGE
## wget "http://www.openssl.org/source/openssl-0.9.8n.tar.gz"
## tar xvf openssl-0.9.8n.tar.gz -C $TARGET
## fi
##
## if [ ! -f "$PACKAGE/openssl-0.9.8n-fix_manpages-1.patch.1" ]
## then
## cd $PACKAGE
## wget "ftp://ftp.yellowdoglinux.com/.1/blfs/conglomeration/openssl/openssl-0.9.8n-fix_manpages-1.patch"
## fi
##
## cd "$TARGET/openssl-0.9.8n"
## patch -Np1 -i $PACKAGE/openssl-0.9.8n-fix_manpages-1.patch
## ./config --prefix=$OBJ
## make
## make install
## cd $CURDIR
## fi
echo "************ openssl-1.0.0l.tar.gz ************************"
if [ ! -d "$TARGET/openssl-1.0.0l" ]; then
if [ ! -f "$PACKAGE/openssl-1.0.0l.tar.gz" ]
then
cd $PACKAGE
wget "http://www.openssl.org/source/openssl-1.0.0l.tar.gz"
tar xvf "openssl-1.0.0l.tar.gz" -C $TARGET
fi
cd "$TARGET/openssl-1.0.0l"
./config --prefix=$OBJ shared
make
make install
cd $CURDIR
fi
echo "************ wvstream-4.6.1 ***********************"
if [ ! -d $TARGET/wvstreams-4.6.1 ]; then
if [ ! -f "$PACKAGE/wvstreams-4.6.1.tar.gz" ]; then
cd $PACKAGE
wget "http://wvstreams.googlecode.com/files/wvstreams-4.6.1.tar.gz"
tar xvf "wvstreams-4.6.1.tar.gz" -C $TARGET
fi
cd $TARGET/wvstreams-4.6.1
./configure \
--prefix=$OBJ/ \
--without-dbus \
--with-pam=no \
--with-tcl=no \
--with-qt=no \
--disable-testgui \
LDFLAGS=-L$OBJ/lib/ \
CFLAGS=-I$OBJ/include/ \
--with-openssl=$OBJ/ \
--with-zlib=$OBJ/ \
--disable-debug
patch -Np1 -d $TARGET/wvstreams-4.6.1 < $CURDIR/PATCH/wvstreams-4.6.1.patch
make
make install
fi
## echo "************ wvdial-1.60.3 ***********************"
## if [ ! -d $TARGET/wvdial-1.60.3 ]; then
## cd $PACKAGE
## tar xvf "wvdial_1.60.3.tar.gz" -C $TARGET
##
## cp $CURDIR/PATCH/config.h.in $TARGET/wvdial-1.60.3/
## cp $CURDIR/PATCH/configure $TARGET/wvdial-1.60.3/
## cp $CURDIR/PATCH/configure.ac $TARGET/wvdial-1.60.3/
## cp $CURDIR/PATCH/install-sh $TARGET/wvdial-1.60.3/
## cp $CURDIR/PATCH/Makefile.in $TARGET/wvdial-1.60.3/
##
## cd $TARGET/wvdial-1.60.3/
##
## ./configure \
## --prefix=$OBJ \
## CPPFLAGS=-I$OBJ/include/wvstreams \
## LDFLAGS=-L$OBJ/lib
##
## make
## make install
## fi
echo "************ wvdial-1.61 ***********************"
if [ ! -d $TARGET/wvdial-1.61 ]; then
if [ ! -f "$PACKAGE/wvdial-1.61" ]; then
cd $PACKAGE
wget "http://wvstreams.googlecode.com/files/wvdial-1.61.tar.gz"
tar xvf "wvdial-1.61.tar.gz" -C $TARGET
fi
cp $CURDIR/PATCH/config.h.in $TARGET/wvdial-1.61/
cp $CURDIR/PATCH/configure $TARGET/wvdial-1.61/
cp $CURDIR/PATCH/configure.ac $TARGET/wvdial-1.61/
cp $CURDIR/PATCH/install-sh $TARGET/wvdial-1.61/
cp $CURDIR/PATCH/Makefile.in $TARGET/wvdial-1.61/
cd $TARGET/wvdial-1.61/
./configure \
--prefix=$OBJ \
CPPFLAGS=-I$OBJ/include/wvstreams \
LDFLAGS=-L$OBJ/lib
make
make install
fi
echo "************ ppp-2.4.5 ***********************"
if [ ! -d $TARGET/ppp-2.4.5 ]; then
if [ ! -f "$PACKAGE/ppp-2.4.5.tar.gz" ]; then
cd $PACKAGE
wget "ftp://ftp.samba.org/pub/ppp/ppp-2.4.5.tar.gz"
tar xvf "ppp-2.4.5.tar.gz" -C $TARGET
fi
cd $TARGET/ppp-2.4.5/
./configure \
--prefix=$OBJ \
make
make install
fi
| true |
4f492a8606f6f5c81a32a436bfe7c01178eaf1f8 | Shell | bgiesing/gappsintegrator | /000gappsintegrator | UTF-8 | 5,943 | 3.625 | 4 | [] | no_license | #!/system/bin/sh
# chmod -R 755 /system/etc/init.d /system/su.d
#
# ROM GApps Auto-Integration
# osm0sis @ xda-developers
logbuff() { logbuff+="$($*)"; }
writable() { touch $1/tmpfile 2>/dev/null; ret=$?; rm $1/tmpfile 2>/dev/null; echo $ret; }
# wait for /system to become remountable/writable
until [ "$(mount -o remount,rw /system >/dev/null; writable /system; mount -o remount,ro /system >/dev/null)" == 0 ]; do
sleep 1;
done;
logbuff echo -n `date`;
gtmp=/data/local/tmp/gapp;
# get SDK version to perform different actions due to /data/app layout changes
sdkver=`getprop ro.build.version.sdk`;
# find new unintegrated Google Apps APKs in /data
for i in $(ls /data/app/ | grep -E 'com.android|com.google.android'); do
# find equivalent /system APK name and only process if it exists
xml=/data/system/packages.xml;
package=`echo $i | cut -d- -f1`;
sysapk=`grep "updated-package name=\"$package\"" $xml | grep -o 'codePath=.*$' | cut -d\" -f2`;
logbuff echo -ne "\n/data/app/$i $sysapk";
if [ "$sysapk" ]; then
# compare /data and /system APK versions and only integrate if /data is newer (necessary on Lollipop and above)
datver=$(grep "codePath=\"/data/app/$i" $xml | grep -o 'version=.*$' | cut -d\" -f2);
sysver=$(grep "codePath=\"$sysapk" $xml | grep -o 'version=.*$' | cut -d\" -f2);
if [ "$datver" -gt "$sysver" ]; then
logbuff echo -ne "\t ($datver > $sysver)";
mkdir -p $gtmp;
# KitKat (and below) support
if [ "$sdkver" -le 20 ]; then
# remove libraries from copied APK and zipalign if the binaries exist
datapk=/data/app/$i;
if [ "$(zip --help)" -a "$(zipalign --help 2>&1)" ]; then
cp -fp $datapk $gtmp/preopt-$i;
zip -d $gtmp/preopt-$i lib/*;
zipalign -v 4 $gtmp/preopt-$i $gtmp/$i;
datapk=$gtmp/$i;
fi;
# extract and force copy libraries to /system
unzip /data/app/$i -d $gtmp;
chmod 644 $gtmp/lib/arm*/*;
mount -o remount,rw /system;
cp -fp $gtmp/lib/arm*/* /system/lib/;
# overwrite /system APK with new /data APK then fix permissions
cp -f $datapk $sysapk;
chown root.root $sysapk;
chmod 644 $sysapk;
# Lollipop support
elif [ "$sdkver" -le 22 ]; then
# save time on boots after APK work has been completed but apps are awaiting optimization
if [ ! -f /data/app/$i/integrated ]; then
# remove libraries from copied APK and zipalign if the binaries exist
datapk=/data/app/$i/base.apk;
if [ "$(zip --help)" -a "$(zipalign --help 2>&1)" ]; then
# workaround for Chrome not playing by the usual rules (per usual)
case $(basename $sysapk) in
*Chrome*)
mount -o remount,rw /system;
rm -f $sysapk/lib/arm*/libchrome.so;;
*)
cp -fp $datapk $gtmp/preopt-$i.apk;
zip -d $gtmp/preopt-$i.apk lib/*;
zipalign -v 4 $gtmp/preopt-$i.apk $gtmp/$i.apk;
datapk=$gtmp/$i.apk;;
esac;
fi;
# force copy libraries to /system respecting symlinks then clean up empty files
mount -o remount,rw /system;
cp -RLf /data/app/$i/lib $sysapk;
for j in `ls $sysapk/lib/arm*/*`; do
test ! -s $j && rm -f $j;
done;
# overwrite /system APK with new /data APK then fix permissions
cp -fp $datapk $sysapk/`basename $sysapk`.apk;
chown -R root.root $sysapk;
chmod 644 $sysapk/*.apk $sysapk/lib/arm*/*;
# flag for cleanup on reboot following optimization
touch /data/app/$i/integrated;
fi;
# remove packages.xml entry for /data APK
sed -i -e "/<updated-package name=\"${package}/,/<\/updated-package>/d" $xml;
# Marshmallow (and above) support
elif [ "$sdkver" -ge 23 ]; then
# save time on boots after APK work has been completed but apps are awaiting optimization
if [ ! -f /data/app/$i/integrated ]; then
# if necessary force copy libraries to /system respecting symlinks then clean up empty files
mount -o remount,rw /system;
if [ -d $sysapk/lib ]; then
cp -RLf /data/app/$i/lib $sysapk;
for j in `ls $sysapk/lib/arm*/*`; do
test ! -s $j && rm -f $j;
done;
fi;
# if necessary force copy APK odex file to /system
if [ -d $sysapk/oat ]; then
cp -fp /data/app/$i/oat/arm*/base.odex $sysapk/oat/arm*/`basename $sysapk`.odex;
fi;
# overwrite /system APK with new /data APK then fix permissions
cp -fp /data/app/$i/base.apk $sysapk/`basename $sysapk`.apk;
chown -R root.root $sysapk;
chmod 644 $sysapk/lib/arm*/* $sysapk/oat/arm*/*;
# flag for cleanup on reboot following optimization
touch /data/app/$i/integrated;
fi;
# remove packages.xml entry for /data APK
sed -i -e "/<updated-package name=\"${package}/,/<\/updated-package>/d" $xml;
fi;
mount -o remount,ro /system;
rm -rf $gtmp;
fi;
elif [ -f /data/app/$i/integrated ]; then
# clean up to mimic pre-Lollipop (AOSP) behavior
rm -rf /data/app/$i;
fi;
done;
# global cleanups required on Lollipop (and above)
if [ "$sdkver" -ge 21 ]; then
# fix /system/lib permissions to ensure libs copied via symlink are correct
mount -o remount,rw /system;
chown root.root /system/lib/*.so;
chmod 644 /system/lib/*.so;
mount -o remount,ro /system;
fi;
logbuff echo -e "\n---";
# write buffered log once /sdcard is available
until [ "$(writable /sdcard)" == 0 ]; do
sleep 1;
done;
log=/sdcard/gapps-integrator.log;
test ! -f $log && echo -e "## GApps Auto-Integration Script Log\n" > $log;
echo -e "$logbuff\n" >> $log;
| true |
8a546d0a0a0094fd042a0f7ceb9d1867c085576d | Shell | gsnowman/nba | /catchup.sh | UTF-8 | 198 | 2.921875 | 3 | [] | no_license | #!/bin/bash
. etc/set_ruby_lib.sh
for date in "2016-03-09" "2016-03-10"; do
echo -n "Fetching date ${date}..."
ruby ruby/fetch_games.rb "${date}" 2>&1 >> catchup.log
echo "Done"
done
| true |
66f9fb9c8fe0c9c5a8e59bc2afe74528303522fc | Shell | ludwig778/lab-infra | /certbot/scripts/clean_certs.sh | UTF-8 | 122 | 2.71875 | 3 | [] | no_license | #!/bin/bash
. ./config
if [ -d $CERTBOT_DIR ]
then
rm -rf $CERTBOT_DIR
echo "Successfully cleaned certs"
exit 0
fi
| true |
c43759ca74d7a302f8174abebc1a9bc1d2f16a2f | Shell | aur-archive/shellex | /PKGBUILD | UTF-8 | 825 | 2.6875 | 3 | [] | no_license | #Maintainer: Johannes Visintini <arch at joker234.de>
pkgname=shellex
pkgver=0.1 # don't forget to change sha1sums (with makepkg -g) if you update this
pkgrel=1
pkgdesc="shell-based launcher"
arch=('i686' 'x86_64')
url='https://github.com/Merovius/shellex'
license=('BSD')
depends=('rxvt-unicode' 'zsh' 'perl-x11-protocol' 'xorg-xrandr')
makedepends=('wget' 'asciidoc' 'docbook-xsl' 'tar')
source=(https://github.com/Merovius/$pkgname/archive/$pkgver.tar.gz)
sha1sums=('074f6869a95b7bbdbf2e494c0600ab4b6978e66d')
conflicts=('shellex-git')
build() {
cd "$pkgname-$pkgver"
make
make -C doc/man
}
package() {
cd "$pkgname-$pkgver"
make DESTDIR="$pkgdir/" install
install -Dm644 doc/man/shellex.1 \
${pkgdir}/usr/share/man/man1/shellex.1
install -Dm644 LICENSE \
${pkgdir}/usr/share/licenses/${pkgname}/LICENSE
make clean
}
| true |
b0a4eff2fbfd797ed159418631cce8c95ae59fd6 | Shell | joemulray/265 | /Assignments/A1/prob1 | UTF-8 | 407 | 3.75 | 4 | [] | no_license | #!/bin/bash
#Joseph Mulray
#The purpose of this program is to determine the number of directories containing problems
#Archive used for testing
#ARCHIVE="/home/kschmidt/public_html/Files/DrMathArchive"
#Number of directories in the folder
directory=$(ls -d $ARCHIVE/*/ | wc -l)
#Number of problems
problems=$(ls -R -l $ARCHIVE/*/ | egrep -n "prob*" | wc -l )
#Printing output
echo -e "$directory\t$problems"
| true |
bc765f6bcb87f2f2455f1a2c0012f2b1be3d4ef3 | Shell | mattiaponza/kubernetes-projects | /curly | UTF-8 | 239 | 3.171875 | 3 | [] | no_license | #!/bin/bash
COUNTER=0
MAXCURL=15
while [ $COUNTER -lt $MAXCURL ]; do
OUTPUT="$(curl http:/$1:30305/testwebapp/)"
if [ "$OUTPUT" != "404 page not found" ]; then
echo $OUTPUT
let COUNTER=COUNTER+1
sleep 1
fi
done
| true |
967d41e2cd0c3ff1bb77146055706177f4e2c236 | Shell | xsxusheng/agent | /agent/script/diskalarm | UTF-8 | 1,218 | 3.203125 | 3 | [] | no_license | #!/bin/sh
#name:disk
#function:fetch disk info
diskname=`fdisk -l 2>/dev/null | grep "Disk /dev" | grep -v /dev/md | awk '{print $2}' | sed -e 's/:/\ /g'`
flag=1
for name in $diskname;do
tt=$tt$name']'
diskTemperature=`smartctl -A $name | awk 'BEGIN {temp=0} /Temperature_Celsius/ {temp=$10} /Current Drive Temperature/ { temp=$4} END {print temp}'`
# diskTemperature=`smartctl -A $name |awk /'Temperature_Celsius/ {print $10}'`
# if test $diskTemperature='';
# then
# diskTemperature=`smartctl -A $name |awk /'Current Drive Temperature/ {print $4}'`
# fi
tt1=$tt1$diskTemperature']'
#diskTempeValue=`smartctl -A $name |awk /'Temperature_Celsius/ {print $4}'`
#diskTempeThrod=`smartctl -A $name |awk /'Temperature_Celsius/ {print $6}'`
#diskTempeValue=$diskTempeValue'1'
#diskTempeThrod=$diskTempeThrod'1'
diskTempeThrod=50
if test $diskTemperature -gt $diskTempeThrod;
then
flag=1
tt2=$tt2$flag']'
else
flag=0
tt2=$tt2$flag']'
fi
done
diskname=$tt
diskTemperature=$tt1
isTemprAlarm=$tt2
echo -e "diskname=$diskname;isTemprAlarm=$isTemprAlarm;diskTemperature=$diskTemperature"
exit 0
| true |
49ceacb37173e6ad32e359dea62a9c05f3c39b75 | Shell | xchem/XChemExplorer | /test_build.sh | UTF-8 | 809 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [[ "$OSTYPE" == "linux-gnu" ]]; then
wget http://devtools.fg.oisin.rc-harwell.ac.uk/nightly/ccp4-linux64-latest.tar.bz2
bunzip2 ccp4-linux64-latest.tar.bz2
mkdir ./ccp4
tar -xf ccp4-linux64-latest.tar -C ./ccp4 --strip-components=1
elif [[ "$OSTYPE" == "darwin"* ]]; then
wget http://devtools.fg.oisin.rc-harwell.ac.uk/nightly/ccp4-osx-clang-latest.tar.gz
gunzip ccp4-osx-clang-latest.tar.gz
mkdir ./ccp4
tar -xf ccp4-osx-clang-latest.tar -C ./ccp4 --strip-components=1
fi
cd ccp4
echo "changed directory to ccp4"
echo "running setup"
yes y | ./BINARY.setup > /dev/null 2>&1
echo "finishing some other stuff..."
source bin/ccp4.setup-sh
yes y | ccp4-python -m pip uninstall panddas
ccp4-python -m pip install panddas
#git clone https://www.github.com/xchem/XChemExplorer
| true |
65bdf2369beb0737c513c4fce0ffcf415facb645 | Shell | antonellocaroli/bubba | /app-admin/bubba-networkmanager/files/lan-bridge.nm-dispatcher | UTF-8 | 437 | 3.359375 | 3 | [] | no_license | #!/bin/sh
function bridge_up {
/usr/bin/nmcli connection up ifname eth1
/usr/bin/systemctl start hostapd
}
function bridge_down {
/usr/bin/systemctl stop hostapd
MASTER=$(/sbin/brctl show | | grep "^br0\s")
if [ "${MASTER}" != "" ];then
brctl delbr br0
fi
}
if [ "$1" = "br0" ]; then
if [ "$2" = "up" ]; then
bridge_up
fi
if [ "$2" = "down" ]; then
bridge_down
fi
fi
| true |
659a5662b12d1746c6c8bbb02ac0a94baf51e7b5 | Shell | closescreen/metki | /50_rep_03 | UTF-8 | 1,833 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
#> К 50_rep_02 (суммы по переходам - 30/total) присоединяются справа колонки из 50_rep_01 (суммы по лиды-каналы).
#>> Данные за один рассчетный день.
set -u
set +x
set -o pipefail
src_30=$1 # ../RESULT/30/<day>/total.gz
res=$2 # ../RESULT/50/<day>/50_rep_03.gz
day=`fn2days "$res"`
# leads_channels sums:
rep_01="../RESULT/50/$day/50_rep_01.gz"
chk "$rep_01" "($0 line: $LINENO) rep_01 file" "-s" "exists and filled" nopr || exit 3
rep_01_format="sid dom channel mark uniq cnt checksum"
# visits_cannels sums:
rep_02="../RESULT/50/$day/50_rep_02.gz"
chk "$rep_02" "($0 line: $LINENO) rep_02 file" "-s" "exists and filled" nopr || exit 3
#>: идем по суммам переходов, присоединяем суммы лиды-каналы:
zcat "$rep_02" | lae -sw -lb="sid dom channel mvaldom uniq cnt" '
my $leads_channels_sum_file = shift @ARGV or die "file!";
open my $lcfh, "zcat $leads_channels_sum_file |" or die "$leads_channels_sum_file: $!";
my %lech;
while (<$lcfh>){
chomp;
my ( $sid, $dom, $channel, $mark, $uniq, $cnt, $chsum ) = split /\*/;
#> из лидов берем только строки с LEAD/ORDER
next if $mark eq "CONFIRM";
$lech{ $sid }{ $dom }{ $channel } = [ $uniq, $cnt, $chsum ];
}
_{
p @F, @{ delete( $lech{ &Sid }{ &Dom }{ &Channel } ) || [undef,undef,undef] };
};
# распечатать оставшиеся lead-channel sums:
for my $sid ( keys %lech ){
for my $dom ( keys %{ $lech{$sid} } ){
for my $channel ( keys %{ $lech{$sid}{$dom} } ){
p $sid, $dom, $channel, "", 0, 0, @{ $lech{$sid}{$dom}{$channel} };
}
}
}
' "$rep_01" | sort -T. -t\* -k1,1n -k2,2 -k3,3 -S 333M
#>> OUT: sid dom channel mvaldom uniq uids uniq_leads leads chsum
| true |
6f4d64343c2edc3d7326c31c745aaff83fa0a1a3 | Shell | AliThari/Nek5000 | /tools/amg_setup/hypre/install | UTF-8 | 320 | 2.8125 | 3 | [] | no_license | #!/bin/bash
set -e
if [ -f ./lib/libHYPRE.a ]; then
exit 0
fi
if [ ! -f ../../../3rd_party/hypre/*.tar.gz ]; then
wget -O v2.14.0.tar.gz https://github.com/LLNL/hypre/archive/v2.14.0.tar.gz
fi
tar -zxf ../../../3rd_party/hypre/*.tar.gz
cd hypre*/src
./configure --prefix=`pwd`/../.. --without-MPI
make -j4 install
| true |
7333bf8054c77db9b1577aadca4240f76e2c5274 | Shell | MalawiGeospatialTools/MASDAP20_bck | /geonode_bck.sh | UTF-8 | 3,842 | 3.609375 | 4 | [] | no_license | #!/bin/bash
# geonode backupper
echo "*****************************************************************"
echo "-----------------------------------------------------------------"
echo "*****************************************************************"
echo
echo $(date) "--> starting bck for geonode dbs"
sudo mount /dev/xvdg /mnt/auto_bck
today_dir=/mnt/auto_bck/pg_dumps/$(date +"%Y%m%d")_dump
echo "creating backup folder -->" $today_dir
mkdir $today_dir
echo $(date) "--> dumping geonode db"
pg_dump -Fc -U geonode geonode > $today_dir/geonode.dump
echo $(date) "--> geonode db dumped"
echo $(date) "--> dumping geonode_imports db"
pg_dump -Fc -b -U geonode geonode_imports > $today_dir/geonode_imports.dump
echo $(date) "--> geonode_imports db dumped"
# removing old folders
bck_dir=/mnt/auto_bck/pg_dumps
# get current month
cur_m=$(date +%m)
# get previous month
prev_m=$(expr $cur_m - 1)
# get previous month with leading 0
prev_m2=$(printf "%02d" $prev_m)
# build re that matches current and previous month backups
re=$(echo "201[6-9]("$cur_m"|"$prev_m2")")
# loop on backups in days other than 01, 08, 15, 23
for dir in $(ls $bck_dir | head -n -7 | grep -E $re | grep -Ev \
'(01|08|15|23)_dump')
do
echo removing $dir
rm -r $(echo $bck_dir"/"$dir)
done
echo "end of dumping procedure"
echo "-----------------------------------------------------------------"
echo $(date) "--> starting bck for geoserver data folder"
this_week_dir=/mnt/auto_bck/geoserver_bck/$(date +"%Yweek%W")
echo "checking backup folder -->" $this_week_dir
if [ -e $this_week_dir ]
then
echo folder exists
echo $(date) "--> creating incremental archive"
sudo tar -cpzf $this_week_dir/incr_dump.tgz -C /mnt -g $this_week_dir/tarlog.snap --backup=numbered geoserver_data
sudo cp $this_week_dir/tarlog_lev0.snap $this_week_dir/tarlog.snap
echo $(date) "--> incremental archive created"
else
echo creating folder
mkdir $this_week_dir
echo $(date) "--> creating archive"
sudo tar -cpzf $this_week_dir/full_dump.tgz -C /mnt -g $this_week_dir/tarlog.snap geoserver_data
sudo cp $this_week_dir/tarlog.snap $this_week_dir/tarlog_lev0.snap
echo $(date) "--> archive created"
fi
# SHORT NOTE ON HOW-TO RESTORE: firstly restore the full dump, then restore an incremental dump on top of it
# restoring the full dump (example)
# sudo tar xpzf full_dump.tgz -C /destination/folder
# restoring incremental dump (example)
# sudo tar xpzf incr_dump.tgz -C /destination/folder --listed-incremental=/dev/null
# restoring a single file (example): include the full path to the file on the archive, as it is printed with tar --list
# sudo tar xpzf full_dump.tgz -C /destination/folder geoserver-data/workspaces/geonode/osm_extracts/schools_point_osm/layer.xml
# now deleting old folders
bck_dir=/mnt/auto_bck/geoserver_bck
ws=$(echo 01)
for w in $(seq -w 5 4 52)
do
ws=$(echo $ws"|"$w)
done
wre=$(echo "201[6-9]week("$ws")")
for dir in $(ls $bck_dir | head -n -3 | grep -Ev $wre)
do
echo removing $dir
sudo rm -r $(echo $bck_dir"/"$dir)
done
echo "-----------------------------------------------------------------"
echo $(date) "--> starting bck static folder"
sthis_week_dir=/mnt/auto_bck/geonode_statics/$(date +"%Yweek%W")
echo "checking backup folder -->" $sthis_week_dir
if [ -e $sthis_week_dir ]
then
echo folder exists
else
echo creating folder
mkdir $sthis_week_dir
fi
echo $(date) "--> starting copy"
cp -L -R --preserve=all -u --backup=numbered /var/www/geonode/ $sthis_week_dir
echo $(date) "--> copy complete"
bck_dir=/mnt/auto_bck/geonode_statics
ws=$(echo 01)
for w in $(seq -w 5 4 52)
do
ws=$(echo $ws"|"$w)
done
wre=$(echo "201[6-9]week("$ws")")
for dir in $(ls $bck_dir | head -n -3 | grep -Ev $wre)
do
echo removing $dir
rm -r $(echo $bck_dir"/"$dir)
done
sudo umount -d /dev/xvdg
echo "end of procedure"
| true |
36c56e26186e5f1f5d904f78b07d235f1cee03d0 | Shell | dgvncsz0f/dot | /roles/bash/files/.bash_profile | UTF-8 | 724 | 3.015625 | 3 | [] | no_license | #!/bin/bash
shopt -s histappend
shopt -s extglob
shopt -s extquote
shopt -s cdspell
shopt -s dirspell
shopt -s globstar
shopt -s checkjobs
shopt -s checkwinsize
export PS1='\u@\H [\w]\n\$ '
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export HISTFILE=$HOME/.bash_history
export HISTFILESIZE=10000
export HISTSIZE=7500
export HISTCONTROL=ignoredups:erasedups
export HISTIGNORE=" *"
if [ -r /etc/profile ]
then . /etc/profile; fi
if [ -e $HOME/.bash.d/functions ]
then . $HOME/.bash.d/functions; fi
for d in $(find $HOME/.bash.d/bin -type d -o -type l)
do PATH="$PATH:$(realpath $d)"; done
export PATH
for f in $(find $HOME/.bash.d/profile.d -type f)
do . $f; done
if [ -r $HOME/.bashrc ]
then . $HOME/.bashrc; fi
| true |
472d10630b72202f98770b8ea92ffcff870885f0 | Shell | dstrctrng/offline-install | /libexec/build-cache-apt | UTF-8 | 1,188 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
set -efu
shome="$(unset CDPATH; cd -P -- "$(dirname -- "$BASH_SOURCE")/.." && pwd -P)"
url_ubuntu="rsync://archive.ubuntu.com/ubuntu"
mkdir -p $shome/var/apt/ubuntu
cd $shome/var/apt/ubuntu
mkdir -p $shome/var/apt/ubuntu/{dists,pool}
tmp_updates="$(mktemp -t XXXXXXXXX)"
trap "rm -f $tmp_updates" EXIT
{
# main mirror except for debugging trace, symlink ubuntu, and very large pool, dists
rsync -iaO --delete --exclude mirror --exclude trace --exclude ubuntu --exclude pool --exclude dists $url_ubuntu/. $shome/var/apt/ubuntu/
# mirror interesting distributions
for nm_distro in "$@"; do
for pth_dist in ubuntu/dists/$nm_distro{,-{backports,security,updates}}; do
# skip installer, not relevant for aptitude or debootstrap
rsync -iaO --delete --exclude 'installer-*' $url_ubuntu/$pth_dist/. $shome/var/apt/$pth_dist/
done
done
# mirror select and existing packages
rsync -iaO \
--include '*/' \
$({ dpkg -l 2>&- | grep ^i | awk '{print $2}'; find pool -type f -name '*.deb' | cut -d/ -f4; } | sort -u | while read -r p; do echo --include "${p}_*"; done) \
--exclude '*' $url_ubuntu/pool/. $shome/var/apt/ubuntu/pool/
} | tee "$tmp_updates"
| true |
7ffba392dfa4973d75113b50da96e73d2b904756 | Shell | devel0/docker-nas | /first_setup.sh | UTF-8 | 459 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
testparm
echo -n "use follow itadmin credential: "
cat /security/dc01/itadmin
echo
echo "---> kerberos init"
echo
kinit itadmin
# to enable debug add -d 10
echo
echo "---> join domain"
echo
net ads join -U itadmin
echo
echo "---> restarting samba services"
echo
restart_samba
touch /root/initialized
while true; do
echo "listing user, groups..."
wbinfo -ug
if [ "$?" == "0" ]; then break; fi
sleep 1
done
#service supervisor start
| true |
74acb83e9652e50c7aee0d2bf5cc0d498a92571e | Shell | makefu/archive-tarballs | /add-mirror.sh | UTF-8 | 596 | 3.765625 | 4 | [] | no_license | #!/bin/sh
## mirrors a nixpkgs url like mirror: , http:, ftp: to archive.org
# usage: add-mirror.sh URL [ITEM-IDENTIFIER]
set -euf
. $(command -v slog.sh)
if test ! -e $HOME/.config/ia.ini ;then
error "please run 'ia configure' first"
exit 1
fi
p=$(nix-prefetch-url --print-path ${1?please provide url to mirror}| tail -n1)
ident=${2:-nixos-stockholm-tarballs}
base=$(basename $p)
if ia list $ident | grep "^$base$"; then
info "$base already mirrored"
else
ia upload $ident $p --retries=10 || error "$base upload failed to $ident"
fi
echo "https://archive.org/download/$ident/$base"
| true |
d2b21ed9e356f058a37858db5611e1d06fa4f232 | Shell | shivshav/jenkins-docker | /jenkins-setup.sh | UTF-8 | 2,899 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
CI_ADMIN_UID=$OPENLDAP_ENV_CI_ADMIN_UID
CI_ADMIN_PWD=$OPENLDAP_ENV_CI_ADMIN_PWD
GERRIT_NAME=gerrit
GERRIT_WEBURL=$GERRIT_ENV_WEBURL
#NEXUS_REPO=$4
SLAPD_DOMAIN=$OPENLDAP_ENV_SLAPD_DOMAIN
LDAP_ACCOUNTBASE=$OPENLDAP_ENV_LDAP_ACCOUNTBASE
LDAP_NAME=$LDAP_SERVER
DEFAULT_CONFIG_XML=config.xml.override
REFS_DIR=/usr/share/jenkins/ref
echo "Running first-time setup..."
# Replace '/' in url to '\/'
[ "${GERRIT_WEBURL%/}" = "${GERRIT_WEBURL}" ] && GERRIT_WEBURL="${GERRIT_WEBURL}/"
while [ -n "${GERRIT_WEBURL}" ]; do
GERRIT_URL="${GERRIT_URL}${GERRIT_WEBURL%%/*}\/"
GERRIT_WEBURL="${GERRIT_WEBURL#*/}"
done
#Convert FQDN to LDAP base DN
SLAPD_TMP_DN=".${SLAPD_DOMAIN}"
while [ -n "${SLAPD_TMP_DN}" ]; do
SLAPD_DN=",dc=${SLAPD_TMP_DN##*.}${SLAPD_DN}"
SLAPD_TMP_DN="${SLAPD_TMP_DN%.*}"
done
SLAPD_DN="${SLAPD_DN#,}"
LDAP_ACCOUNTBASE="$( cut -d ',' -f 1 <<< "$LDAP_ACCOUNTBASE" )"
echo "Setting up templated files..."
#Create config.xml
sed -i "s/{SLAPD_DN}/${SLAPD_DN}/g" ${REFS_DIR}/${DEFAULT_CONFIG_XML}
sed -i "s/{LDAP_NAME}/${LDAP_NAME}/g" ${REFS_DIR}/${DEFAULT_CONFIG_XML}
sed -i "s/{LDAP_ACCOUNTBASE}/${LDAP_ACCOUNTBASE}/g" ${REFS_DIR}/${DEFAULT_CONFIG_XML}
# Setup gerrit-trigger.xml
sed -i "s/{GERRIT_NAME}/${GERRIT_NAME}/g" ${REFS_DIR}/gerrit-trigger.xml
sed -i "s/{GERRIT_URL}/${GERRIT_URL}/g" ${REFS_DIR}/gerrit-trigger.xml
# Replace '/' in url to '\/'
[ "${JENKINS_WEBURL%/}" = "${JENKINS_WEBURL}" ] && JENKINS_WEBURL="${JENKINS_WEBURL}/"
while [ -n "${JENKINS_WEBURL}" ]; do
JENKINS_URL="${JENKINS_URL}${JENKINS_WEBURL%%/*}\/"
JENKINS_WEBURL="${JENKINS_WEBURL#*/}"
done
# Setup Jenkins url and system admin e-mail
sed -i "s/{JENKINS_URL}/${JENKINS_URL}/g" ${REFS_DIR}/jenkins.model.JenkinsLocationConfiguration.xml
if [[ -n $CI_ADMIN_UID && -n $CI_ADMIN_PWD ]]; then
#create ssh key.
echo "Creating jenkins user's ssh key..."
mkdir -p /var/jenkins_home/.ssh/
ssh-keygen -q -N '' -t rsa -f /var/jenkins_home/.ssh/id_rsa
### Not sure if this is necessary!
# Creating the jenkins user in gerrit?
echo "Waiting for Gerrit to be ready..."
until $(curl --output /dev/null --silent --head --fail http://${CI_ADMIN_UID}:${CI_ADMIN_PWD}@gerrit:8080/gerrit); do
printf '.'
sleep 5
done
echo "Creating jenkins user in Gerrit..."
JENKINS_USER_POST_DATA="{
\"name\": \"Jenkins User\",
\"ssh_key\": \"$(cat /var/jenkins_home/.ssh/id_rsa.pub)\",
\"http_password\": \"TestPassword\",
\"groups\": [
\"Non-Interactive Users\"
]
}"
echo $JENKINS_USER_POST_DATA >> $(dirname $0)/jenkins-user.json
set -x
curl -H Content-Type:application/json \
-X PUT \
--data "${JENKINS_USER_POST_DATA}" \
--user ${CI_ADMIN_UID}:${CI_ADMIN_PWD} \
http://gerrit:8080/gerrit/a/accounts/jenkins
set +x
fi
echo "First-time setup complete."
rm "$0"
| true |
b2de3ce417282b4c58d211c8426aeb3649309ba8 | Shell | maysrp/Ttomp4 | /head.sh | UTF-8 | 84 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
m=`uptime | awk '{print int($8)}'`
if [ "$m" -gt 2 ];then
exit
fi
| true |
73461360ef758fabbd3e6eef46d2b5855c4bec70 | Shell | FrancescoBellino97/isa_gr02_lab03 | /RISC_V/innovus/scripts/innovus.sh | UTF-8 | 335 | 2.53125 | 3 | [] | no_license | #!/bin/bash
if ! [ -e design.globals ]
then
echo "ERRORE: file design.globals non trovato"
exit 0
fi
if ! [ -e mmm_design.tcl ]
then
echo "ERRORE: file mmm_design.tcl non trovato"
exit 0
fi
rm -f innovus.cmd
rm -f innovus.log
rm -f innovus.logv
#inizializzo ambiente innovus
source /software/scripts/init_innovus17.11
innovus
| true |
f56d4b530655fefc7025e034db5d0609b4002c57 | Shell | hepcat72/bioconda-recipes | /recipes/plink2/build.sh | UTF-8 | 583 | 2.90625 | 3 | [] | permissive | # Portable sha1 sums across linux and os x
sed -i.bak -e "s/shasum/openssl sha1 -r/g" plink_first_compile
# Remove "make plink" so we can call it with overrides
sed -i.bak -e "s/make plink//g" plink_first_compile
# This downloads and builds a local zlib-1.2.8
./plink_first_compile
# Build using Makefile.std as recommended in the README
if [[ -z "$OSX_ARCH" ]]; then
make CFLAGS="-Wall -O2 -I$PREFIX/include" BLASFLAGS="-L$PREFIX/lib -lopenblas" -f Makefile.std plink
else
make -f Makefile.std plink
fi
# Install as plink2
mkdir -p $PREFIX/bin
cp plink $PREFIX/bin/plink2
| true |
a89df6b418c0650cf7f2b5494932e14a21783b48 | Shell | emuchogu/docker | /qcadoo/entrypoint.sh | UTF-8 | 1,601 | 4 | 4 | [] | no_license | #!/bin/bash
#
# Qcadoo Docker Entrypoint
# Copyright (c) 2021 Asymworks, LLC
export PGDB_HOST=${QCADOO_DB_HOST:-db}
export PGDB_PORT=${QCADOO_DB_PORT:-5432}
export PGDB_NAME=${QCADOO_DB_NAME:-mes}
export PGDB_USERNAME=${QCADOO_DB_USERNAME:-qcadoo}
export PGDB_PASSWORD=${QCADOO_DB_PASSWORD:-qcadoo}
abort() {
local msg=$1
shift
local code=${1:-1}
echo -e "\033[31m\033[1m[ABORT]\033[0m ${msg}"
exit ${code}
}
info() {
echo -e "\033[32m[INFO]\033[0m $1"
}
pg_wait() {
# Wait for the PostgreSQL connection to be available
pg_isready -q -h ${PGDB_HOST} -p ${PGDB_PORT} && return
info "Waiting for PostgreSQL connection"
for run in {1..10} ; do
sleep 3
pg_isready -q -h ${PGDB_HOST} -p ${PGDB_PORT} && break
done
[ $? -eq 0 ] || abort "Timed out waiting for PostgreSQL connection"
}
if [ "$1" == "start" ] ; then
# Write environment variables into Qcadoo Configuration
[ -f db.properties.conf ] || abort "Missing db.properties.conf file"
info "Updating database connection information in db.properties"
envsubst < db.properties.conf > $HOME/mes-application/qcadoo/db.properties
# Start the Qcadoo Server
pg_wait
info "Starting Qcadoo"
exec $HOME/mes-application/bin/startup.sh
fi
if [ "$1" == "create-schema" ]; then
pg_wait
info "Loading SQL Schema"
SQL_FILE="${HOME}/mes-application/webapps/ROOT/WEB-INF/classes/schema/demo_db_en.sql"
PSQL="psql -h ${PGDB_HOST} -p ${PGDB_PORT} -U ${PGDB_USERNAME} ${PGDB_NAME}"
PGPASSWORD="${PGDB_PASSWORD}" exec $PSQL < $SQL_FILE
fi
exec "$@"
| true |
cb28d34e2f1b627f35936508860647a5a8920e84 | Shell | Sergong/selfsigned-certs | /make_ca.sh | UTF-8 | 1,279 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#
# This script creates a 'mini CA' that can be used to create self-signed certs.
#
# The certs will be 'recognised' on your device if you import and trust the myCA.pem certificate
#
#
exittext=$(cat <<SETVAR
------------------------------------------------------------------------------------------------------
Done!
To use this on an iOS device, do the following:
1. Email the root certificate to yourself so you can access it on your iOS device
2. Click on the attachment in the email on your iOS device
3. Go to the settings app and click ‘Profile Downloaded’ near the top
4. Click install in the top right
5. Once installed, hit close and go back to the main Settings page
6. Go to “General” > “About”
7. Scroll to the bottom and click on “Certificate Trust Settings”
8. Enable your root certificate under “ENABLE FULL TRUST FOR ROOT CERTIFICATES”
SETVAR
)
echo "Generating myCA private key..."
openssl genrsa -des3 -out myCA.key 2048
echo "Generating myCA root certificate..."
openssl req -x509 -new -nodes -key myCA.key -sha256 -days 1825 -out myCA.pem
echo "Adding the Root Certificate to macOS Keychain..."
sudo security add-trusted-cert -d -r trustRoot -k "/Library/Keychains/System.keychain" myCA.pem
echo "$exittext"
| true |
4dee82e5e59fcfbf321bcbcbc09d8eecf7c0763b | Shell | infinitio/infinit | /oracles/apertus/server/etc/init.d/apertus | UTF-8 | 2,144 | 3.609375 | 4 | [] | no_license | #! /bin/sh
. /etc/apertus.conf
DAEMON="$APERTUS"
PIDFILE=/var/run/apertus.pid
ARGUMENTS="--meta ${META} --port-tcp ${PORT_TCP} --port-ssl ${PORT_SSL} --syslog"
. /lib/lsb/init-functions
command()
{
echo "$DAEMON" $ARGUMENTS
}
start()
{
# Enable core dumps
ulimit -c 300000
if status
then
log_success_msg "Apertus already started"
return 0
fi
log_daemon_msg "Starting apertus" "apertus"
start-stop-daemon \
--start \
--exec "$DAEMON" \
--pidfile "$PIDFILE" --make-pidfile \
--user "$USER" \
--chdir ${TMPDIR:-/tmp} \
--background \
-- ${ARGUMENTS}
ret=$?
for i in $(seq 30); do
test $ret -ne 0 && break
sleep 0.1
status
ret=$?
done
log_end_msg $ret
}
stop()
{
log_daemon_msg "Stopping apertus" "apertus"
start-stop-daemon \
--stop \
--signal INT \
--exec "$DAEMON" \
--retry 5 \
--pidfile "$PIDFILE"
ret=$?
log_end_msg $ret
if test $ret -eq 0; then
rm "$PIDFILE"
fi
}
status()
{
if start-stop-daemon \
--status \
--exec "$DAEMON" \
--pidfile "$PIDFILE"
then
return 0
else
if test -e "$PIDFILE"
then
log_success_msg "Remove stray PID file"
rm "$PIDFILE"
fi
return 1
fi
}
case "$1" in
start)
start || exit $?
;;
stop)
stop || exit $?
;;
force-reload|restart)
stop || exit $?
start || exit $?
;;
status)
status || exit $?
;;
command)
command || exit $?
;;
*)
echo "Usage: $0 {start|stop|restart|force-reload|status}"
exit 1
;;
esac
| true |
bf15fa04dcfc8593296ed90b5fbed47430b9e70e | Shell | svs14/dotfiles | /install.sh | UTF-8 | 2,616 | 4.34375 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/bash
#
# Installs env-custom
#
# Backs up and then
# replaces ~ dot files with those found in env-custom/home
#
# Shell rc files are appended with env-custom details
# such that it is sourced appropriately.
#
# Vim is setup with Vundle, if it does not exist then it is installed.
#
### Set bash environment for script
# Set bash to exit immediately on failure
set -e
# Set bash statements to return failure if any piped command within it fails
set -o pipefail
# Use this file's directory as current working directory
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $dir
# Set old dotfiles home
old_dotfiles_home=./.old-home
# Set env custom dotfiles home
env_dotfiles_home=./home
# If already installed, exit
if [ -d "$old_dotfiles_home" ]; then
echo "Environment already installed. Run uninstall.sh to uninstall." 1>&2
exit 1
fi
### Do a backup of dot files to be replaced by env-custom's
### env-custom/home files, then replace
# For file found in env_custom/home directory
for file in $(find $env_dotfiles_home/ -maxdepth 1 -type l -or -type f); do
# Retrieve base filename
filename="$(basename $file)"
# Create .old-home backup directory if not exists
mkdir -p $old_dotfiles_home
# Backup file with same filename found in home directory
cp -d ~/$filename $old_dotfiles_home/ 2>/dev/null \
|| touch $old_dotfiles_home/$filename
# Copy file to home directory based on type
if [ -h "$file" ]; then
ln -fs $(readlink -f $file) ~/$filename
else
cp -f $file ~/$filename
fi
done
### Append env-custom details to shell rc files
# Build shell statements to enable env custom
source_env_custom_str=$( cat <<- EOF
: ENV_CUSTOM && export ENV_CUSTOM_DIR=$( pwd )
: ENV_CUSTOM && . \$ENV_CUSTOM_DIR/shell-settings/.shellrc
EOF
)
# Enable env-custom for bash
if [ -f ~/.bashrc ]; then
echo "$source_env_custom_str" >> ~/.bashrc
fi
# Enable env-custom for zsh
if [ -f ~/.zshrc ]; then
echo "$source_env_custom_str" >> ~/.zshrc
fi
### Setup vim
# If vundle does not exist
if [ ! -d ~/.vim/bundle ]; then
# Install vundle
git clone https://github.com/gmarik/vundle.git ~/.vim/bundle/vundle
# Setup vundle in new bash shell (so it has updated changes)
bash -ic 'vim +BundleInstall +qall'
fi
# If using YouCompleteMe vim plugin
if [ -d ~/.vim/bundle/YouCompleteMe ]; then
# Compile YouCompleteMe
( cd ~/.vim/bundle/YouCompleteMe && ./install.sh --clang-completer )
fi
# Print completion message
echo "Installation complete. Restart or source the rc of your shell to take
effect."
| true |
2a5cbfdf6e66016458e2023d7f5aa54be27f34b5 | Shell | D31m05z/szoftlab | /convert.sh | UTF-8 | 134 | 2.78125 | 3 | [] | no_license | #!/bin/bash
for file in **/*.java; do
echo $file
iconv -c -f utf-8 -t ascii $file > $file.tmp
mv -f $file.tmp $file
done
| true |
0095ef1e2a1ed5ae86eda107bede3acb133c94db | Shell | Croxed/dotfiles | /.zshrc | UTF-8 | 4,532 | 3.046875 | 3 | [
"CC0-1.0"
] | permissive | #
#
# ██
# ░██
# ██████ ██████░██ ██████ █████
# ░░░░██ ██░░░░ ░██████ ░░██░░█ ██░░░██
# ██ ░░█████ ░██░░░██ ░██ ░ ░██ ░░
# ██ ██ ░░░░░██░██ ░██ ░██ ░██ ██
# ░██ ██████ ██████ ░██ ░██░███ ░░█████
# ░░ ░░░░░░ ░░░░░░ ░░ ░░ ░░░ ░░░░░
#
# Personal Zsh configuration file. It is strongly recommended to keep all
# shell customization and configuration (including exported environment
# variables such as PATH) in this file or in files source by it.
#
# Documentation: https://github.com/romkatv/zsh4humans/blob/v5/README.md.
# Periodic auto-update on Zsh startup: 'ask' or 'no'.
# You can manually run `z4h update` to update everything.
zstyle ':z4h:' auto-update 'ask'
# Ask whether to auto-update this often; has no effect if auto-update is 'no'.
zstyle ':z4h:' auto-update-days '28'
# Automaticaly wrap TTY with a transparent tmux ('integrated'), or start a
# full-fledged tmux ('system'), or disable features that require tmux ('no').
zstyle ':z4h:' start-tmux 'no'
# Move prompt to the bottom when zsh starts up so that it's always in the
# same position. Has no effect if start-tmux is 'no'.
zstyle ':z4h:' prompt-at-bottom 'yes'
# Keyboard type: 'mac' or 'pc'.
zstyle ':z4h:bindkey' keyboard 'mac'
# Right-arrow key accepts one character ('partial-accept') from
# command autosuggestions or the whole thing ('accept')?
zstyle ':z4h:autosuggestions' forward-char 'accept'
# Enable ('yes') or disable ('no') automatic teleportation of z4h over
# ssh when connecting to these hosts.
zstyle ':z4h:ssh:example-hostname1' enable 'yes'
zstyle ':z4h:ssh:*.example-hostname2' enable 'no'
# The default value if none of the overrides above match the hostname.
zstyle ':z4h:ssh:*' enable 'no'
# Send these files over to the remote host when connecting over ssh to the
# enabled hosts.
zstyle ':z4h:ssh:*' send-extra-files '~/.nanorc' '~/.env.zsh'
# Clone additional Git repositories from GitHub.
#
# This doesn't do anything apart from cloning the repository and keeping it
# up-to-date. Cloned files can be used after `z4h init`. This is just an
# example. If you don't plan to use Oh My Zsh, delete this line.
z4h install laggardkernel/git-ignore || return
z4h install hlissner/zsh-autopair || return
z4h install peterhurford/up.zsh || return
z4h install romkatv/zsh-defer || return
# Install or update core components (fzf, zsh-autosuggestions, etc.) and
# initialize Zsh. After this point console I/O is unavailable until Zsh
# is fully initialized. Everything that requires user interaction or can
# perform network I/O must be done above. Everything else is best done below.
z4h init || return
# Extend PATH.
path=(~/bin $path)
# Export environment variables.
export GPG_TTY=$TTY
# Source additional local files if they exist.
z4h source ~/.env.zsh
# Use additional Git repositories pulled in with `z4h install`.
#
# This is just an example that you should delete. It does nothing useful.
# Define key bindings.
z4h bindkey undo Ctrl+/ # undo the last command line change
z4h bindkey redo Alt+/ # redo the last undone command line change
z4h bindkey z4h-cd-back Shift+Left # cd into the previous directory
z4h bindkey z4h-cd-forward Shift+Right # cd into the next directory
z4h bindkey z4h-cd-up Shift+Up # cd into the parent directory
z4h bindkey z4h-cd-down Shift+Down # cd into a child directory
# Autoload functions.
autoload -Uz zmv
# Define functions and completions.
function md() { [[ $# == 1 ]] && mkdir -p -- "$1" && cd -- "$1" }
compdef _directories md
# Define named directories: ~w <=> Windows home directory on WSL.
[[ -n $z4h_win_home ]] && hash -d w=$z4h_win_home
# Define aliases.
alias tree='tree -a -I .git'
# Add flags to existing aliases.
alias ls="${aliases[ls]:-ls} -A"
# Set shell options: http://zsh.sourceforge.net/Doc/Release/Options.html.
setopt glob_dots # no special treatment for file names with a leading dot
setopt no_auto_menu # require an extra TAB press to open the completion menu
# path to the framework root directory
SIMPL_ZSH_DIR=${HOME}/.zsh/.zsh-config
. "${SIMPL_ZSH_DIR}/init.zsh"
| true |
7c5b4c949adaf4c108070c800a06cefb9f633e25 | Shell | vvbka/jtools | /bin/jtest | UTF-8 | 980 | 3.71875 | 4 | [] | no_license | #!/bin/bash
file="$1"
test="$2"
cwd="$(dirname $(dirname $0))/lib/node_modules/jtools/bin"
if [ -z "$file" ]; then
echo "Please provide a file to test."
echo "usage: jtest <file>.java <test_class>.java"
exit 1
fi
if [ -z "$test" ]; then
echo "Please provide a test class to use."
echo "usage: jtest <file>.java <test_class>.java"
exit 1
fi
## the input file should always have .java on it
if [ "${file: -5}" == ".java" ]; then
:
else
file="${file}.java"
fi
## the test file should never have .java on it
if [ "${test: -5}" == ".java" ]; then
test=$(echo ${test} | sed 's/\.java//')
fi
# copy over the important jar files
cp ${cwd}/junit-4.12.jar ./
cp ${cwd}/hamcrest-core-1.3.jar ./
# compile your file
javac ${file}
# compile your unit tests
javac -cp .:junit-4.12.jar ${test}.java
# run your unit tests
java -cp .:junit-4.12.jar:hamcrest-core-1.3.jar org.junit.runner.JUnitCore ${test}
# remove the jar files
rm junit-4.12.jar hamcrest-core-1.3.jar
| true |
972c2c0f64903eca8f030dcb0c2482736bae77d5 | Shell | ecomcoders/magento2-deployscripts | /ecomcoders-update-manager.sh | UTF-8 | 1,488 | 4.09375 | 4 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
create_backup()
{
RESULT=$(aws backup start-backup-job --backup-vault-name Default --resource-arn $RESOURCE_ARN --lifecycle DeleteAfterDays=7 --iam-role-arn $IAM_ROLE_ARN)
echo "BACKUP TRIGGERED: $RESULT"
}
install_updates()
{
echo -n "Install OS Updates? (Y/n): "
read INSTALL_UPDATES
if [[ "$INSTALL_UPDATES" == "Y" ]]; then
sudo apt-get -qq update
sudo apt-get autoremove
sudo apt upgrade
sudo apt-get clean
else
echo "SKIPPED: Install OS Updates"
fi
}
check_reboot()
{
if [ -f /var/run/reboot-required ]; then
echo "----------------------------------------------------"
cat /var/run/reboot-required
echo "SYSTEM MUST BE REBOOTED!!! REBOOT NOW? (Y/n): "
read DO_REBOOT
if [[ "$DO_REBOOT" == "Y" ]]; then
sudo reboot
else
echo "SKIPPED: System reboot"
fi
else
echo "----------------------------------------------------"
echo "NO REBOOT REQUIRED AT THIS TIME"
echo "----------------------------------------------------"
fi
}
#######################################
# Main programm
while getopts ':r:i:' OPTION; do
case "${OPTION}" in
r)
RESOURCE_ARN="${OPTARG}"
;;
i)
IAM_ROLE_ARN="${OPTARG}"
;;
esac
done
create_backup
install_updates
check_reboot
| true |
8e7a3b92dc86b347cd52f5eda9649238692967b2 | Shell | lamemate/dotfiles | /aliases.zsh | UTF-8 | 1,061 | 2.625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | # CLI Shortcuts
alias zshconfig="vim ~/.zshrc"
alias copyssh="pbcopy < $HOME/.ssh/id_dsa.pub"
alias reloadcli="source $HOME/.zshrc"
alias reloaddns="dscacheutil -flushcache && sudo killall -HUP mDNSResponder"
alias ll="$(brew --prefix coreutils)/libexec/gnubin/ls -ahlF --color --group-directories-first"
alias weather="curl -4 http://wttr.in"
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias localip="ipconfig getifaddr en1"
alias update="sudo softwareupdate -i -a; brew update; brew upgrade; brew cleanup; brew cu; brew cask cleanup; npm update npm -g; npm update -g; sudo gem update --system; sudo gem update"
alias cask="brew cask"
alias ssh="ssh -A"
# Directories
alias dotfiles="cd $DOTFILES"
alias library="cd $HOME/Library"
alias sites="cd $HOME/Sites"
alias db="cd ~/Dropbox"
alias dw="cd ~/Downloads"
# Git
alias glog="git log --graph --pretty=format:'%Cred%h%Creset %an: %s - %Creset %C(yellow)%d%Creset %Cgreen(%cr)%Creset' --abbrev-commit --date=relative"
alias gd='git diff --color | sed "s/^\([^-+ ]*\)[-+ ]/\\1/" | less -r'
| true |
156953ba48051cbb08e59197554fdaa788ef63a1 | Shell | xiang-123352/code_snippets | /bash/win-resize.sh | UTF-8 | 166 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# Using xwininfo and wmctrl to resize a window
# Written by Yu-Jie Lin
# Public Domain
W=$1
H=$2
X=${3:--1}
Y=${4:--1}
wmctrl -r :SELECT: -e 0,$X,$Y,$W,$H
| true |
e29e3c55a4d4838f68d74f017d5d1681335b5022 | Shell | remodoy/conflused-updater | /crowd/update_crowd.sh | UTF-8 | 2,108 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Update CROWD to most recent version.
#
if [ -z "$1" ]
then
echo "Usage $0 path/to/config.sh"
exit 1
fi
export CONFIG_FILE="$1"
set -e
export THIS=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
# Include commons
. ${THIS}/crowd_common.sh
# Include helpers
. ${THIS}/../helpers.sh
CROWD_TGZ="$(mktemp -u --suffix=.tar.gz)"
function post_cleanup() {
rm $CROWD_TGZ || true
}
trap post_cleanup SIGINT SIGTERM
# Download newest
CROWD_NEW_VERSION="$(latest_version crowd)"
CROWD_DOWNLOAD_URL="$(latest_version_url crowd)"
set +e
vercomp "$CROWD_VERSION" "$CROWD_NEW_VERSION" '<='
RES=$?
set -e
if [ $RES -lt 2 ]
then
info "Current CROWD versio $CROWD_VERSION is up-to-date"
exit 0
fi
CROWD_NEW="${CROWD_BASE}/crowd-${CROWD_NEW_VERSION}"
info "Downloading new CROWD"
wget -O "$CROWD_TGZ" "$CROWD_DOWNLOAD_URL"
# Do initial backup
backup_crowd
if [ "${CROWD_SERVICE_NAME}" != "disable" ]
then
# Stop CROWD
servicemanager "${CROWD_SERVICE_NAME}" stop
# wait for CROWD to stop
sleep 60
fi
# Backup CROWD again
backup_crowd
#Unzip new CROWD
mkdir "$CROWD_NEW"
info "Unzipping new CROWD"
tar --strip-components=1 -xf "$CROWD_TGZ" -C "$CROWD_NEW"
# Remove tempdir
rm "$CROWD_TGZ"
# Restore some files from previous version
info "Restoring some config files"
restore_file crowd-webapp/WEB-INF/classes/crowd-init.properties "${CROWD_PREVIOUS}" "${CROWD_NEW}"
restore_file apache-tomcat/bin/setenv.sh "${CROWD_PREVIOUS}" "${CROWD_NEW}"
restore_file apache-tomcat/conf/server.xml "${CROWD_PREVIOUS}" "${CROWD_NEW}"
info "Setting permissions..."
chown -R "$CROWD_USER" "${CROWD_NEW}/apache-tomcat/temp"
chown -R "$CROWD_USER" "${CROWD_NEW}/apache-tomcat/logs"
chown -R "$CROWD_USER" "${CROWD_NEW}/apache-tomcat/work"
# TODO: version specific stuff here!!
info "Updating current symlink"
rm ${CROWD_CURRENT}
ln -s ${CROWD_NEW} ${CROWD_CURRENT}
info "CROWD is now updated!"
if [ "${CROWD_SERVICE_NAME}" != "disable" ]
then
info "Starting CROWD"
servicemanager "${CROWD_SERVICE_NAME}" start
info "Be patient, CROWD is starting up"
fi
| true |
e83f3f815e97b19e2d044e8fc1c7b381bcfbc146 | Shell | griddynamics/osc-robot-openstack-dashboard | /run_tests.sh | UTF-8 | 416 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
cd django-openstack
python bootstrap.py
bin/buildout
bin/test
# get results of the django-openstack tests
OPENSTACK_RESULT=$?
cd ../openstack-dashboard
python tools/install_venv.py
cp local/local_settings.py.example local/local_settings.py
tools/with_venv.sh dashboard/manage.py test
# get results of the openstack-dashboard tests
DASHBOARD_RESULT=$?
exit $(($OPENSTACK_RESULT || $DASHBOARD_RESULT))
| true |
ca70177b7abeb359c51fb68ac261210d65ed3239 | Shell | OCEANOFANYTHINGOFFICIAL/git-lfs | /t/t-checkout.sh | UTF-8 | 6,728 | 3.609375 | 4 | [
"MIT",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
. "$(dirname "$0")/testlib.sh"
begin_test "checkout"
(
set -e
reponame="$(basename "$0" ".sh")"
setup_remote_repo "$reponame"
clone_repo "$reponame" repo
git lfs track "*.dat" 2>&1 | tee track.log
grep "Tracking \"\*.dat\"" track.log
contents="something something"
contentsize=19
contents_oid=$(calc_oid "$contents")
# Same content everywhere is ok, just one object in lfs db
printf "%s" "$contents" > file1.dat
printf "%s" "$contents" > file2.dat
printf "%s" "$contents" > file3.dat
mkdir folder1 folder2
printf "%s" "$contents" > folder1/nested.dat
printf "%s" "$contents" > folder2/nested.dat
git add file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat
git add .gitattributes
git commit -m "add files"
[ "$contents" = "$(cat file1.dat)" ]
[ "$contents" = "$(cat file2.dat)" ]
[ "$contents" = "$(cat file3.dat)" ]
[ "$contents" = "$(cat folder1/nested.dat)" ]
[ "$contents" = "$(cat folder2/nested.dat)" ]
assert_pointer "main" "file1.dat" "$contents_oid" $contentsize
# Remove the working directory
rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat
echo "checkout should replace all"
GIT_TRACE=1 git lfs checkout 2>&1 | tee checkout.log
[ "$contents" = "$(cat file1.dat)" ]
[ "$contents" = "$(cat file2.dat)" ]
[ "$contents" = "$(cat file3.dat)" ]
[ "$contents" = "$(cat folder1/nested.dat)" ]
[ "$contents" = "$(cat folder2/nested.dat)" ]
grep "Checking out LFS objects: 100% (5/5), 95 B" checkout.log
grep 'accepting "file1.dat"' checkout.log
! grep 'rejecting "file1.dat"' checkout.log
# Remove the working directory
rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat
echo "checkout with filters"
git lfs checkout file2.dat
[ "$contents" = "$(cat file2.dat)" ]
[ ! -f file1.dat ]
[ ! -f file3.dat ]
[ ! -f folder1/nested.dat ]
[ ! -f folder2/nested.dat ]
echo "quotes to avoid shell globbing"
git lfs checkout "file*.dat"
[ "$contents" = "$(cat file1.dat)" ]
[ "$contents" = "$(cat file3.dat)" ]
[ ! -f folder1/nested.dat ]
[ ! -f folder2/nested.dat ]
echo "test subdir context"
pushd folder1
git lfs checkout nested.dat
[ "$contents" = "$(cat nested.dat)" ]
[ ! -f ../folder2/nested.dat ]
# test '.' in current dir
rm nested.dat
git lfs checkout . 2>&1 | tee checkout.log
[ "$contents" = "$(cat nested.dat)" ]
popd
echo "test folder param"
git lfs checkout folder2
[ "$contents" = "$(cat folder2/nested.dat)" ]
echo "test '.' in current dir"
rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat
git lfs checkout .
[ "$contents" = "$(cat file1.dat)" ]
[ "$contents" = "$(cat file2.dat)" ]
[ "$contents" = "$(cat file3.dat)" ]
[ "$contents" = "$(cat folder1/nested.dat)" ]
[ "$contents" = "$(cat folder2/nested.dat)" ]
echo "test checkout with missing data doesn't fail"
git push origin main
rm -rf .git/lfs/objects
rm file*.dat
git lfs checkout
[ "$(pointer $contents_oid $contentsize)" = "$(cat file1.dat)" ]
[ "$(pointer $contents_oid $contentsize)" = "$(cat file2.dat)" ]
[ "$(pointer $contents_oid $contentsize)" = "$(cat file3.dat)" ]
[ "$contents" = "$(cat folder1/nested.dat)" ]
[ "$contents" = "$(cat folder2/nested.dat)" ]
)
end_test
begin_test "checkout: without clean filter"
(
set -e
reponame="$(basename "$0" ".sh")"
git lfs uninstall
git clone "$GITSERVER/$reponame" checkout-without-clean
cd checkout-without-clean
echo "checkout without clean filter"
git lfs uninstall
git config --list > config.txt
grep "filter.lfs.clean" config.txt && {
echo "clean filter still configured:"
cat config.txt
exit 1
}
ls -al
git lfs checkout | tee checkout.txt
grep "Git LFS is not installed" checkout.txt
if [ "0" -ne "${PIPESTATUS[0]}" ]; then
echo >&2 "fatal: expected checkout to succeed ..."
exit 1
fi
contentsize=19
contents_oid=$(calc_oid "something something")
[ "$(pointer $contents_oid $contentsize)" = "$(cat file1.dat)" ]
[ "$(pointer $contents_oid $contentsize)" = "$(cat file2.dat)" ]
[ "$(pointer $contents_oid $contentsize)" = "$(cat file3.dat)" ]
[ "$(pointer $contents_oid $contentsize)" = "$(cat folder1/nested.dat)" ]
[ "$(pointer $contents_oid $contentsize)" = "$(cat folder2/nested.dat)" ]
)
end_test
begin_test "checkout: outside git repository"
(
set +e
git lfs checkout 2>&1 > checkout.log
res=$?
set -e
if [ "$res" = "0" ]; then
echo "Passes because $GIT_LFS_TEST_DIR is unset."
exit 0
fi
[ "$res" = "128" ]
grep "Not in a git repository" checkout.log
)
end_test
begin_test "checkout: write-only file"
(
set -e
reponame="checkout-locked"
filename="a.txt"
setup_remote_repo_with_file "$reponame" "$filename"
pushd "$TRASHDIR" > /dev/null
GIT_LFS_SKIP_SMUDGE=1 clone_repo "$reponame" "${reponame}_checkout"
chmod -w "$filename"
refute_file_writeable "$filename"
assert_pointer "refs/heads/main" "$filename" "$(calc_oid "$filename\n")" 6
git lfs fetch
git lfs checkout "$filename"
refute_file_writeable "$filename"
[ "$filename" = "$(cat "$filename")" ]
popd > /dev/null
)
end_test
begin_test "checkout: conflicts"
(
set -e
reponame="checkout-conflicts"
filename="file1.dat"
setup_remote_repo_with_file "$reponame" "$filename"
pushd "$TRASHDIR" > /dev/null
clone_repo "$reponame" "${reponame}_checkout"
git tag base
git checkout -b first
echo "abc123" > file1.dat
git add -u
git commit -m "first"
git lfs checkout --to base.txt --base file1.dat 2>&1 | tee output.txt
grep 'Could not checkout.*not in the middle of a merge' output.txt
git checkout -b second main
echo "def456" > file1.dat
git add -u
git commit -m "second"
# This will cause a conflict.
! git merge first
git lfs checkout --to base.txt --base file1.dat
git lfs checkout --to ours.txt --ours file1.dat
git lfs checkout --to theirs.txt --theirs file1.dat
echo "file1.dat" | cmp - base.txt
echo "abc123" | cmp - theirs.txt
echo "def456" | cmp - ours.txt
popd > /dev/null
)
end_test
begin_test "checkout: GIT_WORK_TREE"
(
set -e
reponame="checkout-work-tree"
remotename="$(basename "$0" ".sh")"
export GIT_WORK_TREE="$reponame" GIT_DIR="$reponame-git"
mkdir "$GIT_WORK_TREE" "$GIT_DIR"
git init
git remote add origin "$GITSERVER/$remotename"
git lfs uninstall --skip-repo
git fetch origin
git checkout -B main origin/main
git lfs install
git lfs fetch
git lfs checkout
contents="something something"
[ "$contents" = "$(cat "$reponame/file1.dat")" ]
)
end_test
| true |
c0127e5439151f9df5ddf50ade1a058d4bcd775f | Shell | the-dilo/micrometre-software-development-stack | /ruby development Vagrant/bootstrap.sh | UTF-8 | 1,598 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
#dependecies for building
apt-get update
apt-get install -y git-core curl zlib1g-dev build-essential checkinstall automake libssl-dev libreadline-dev libyaml-dev cmake wget unzip openssh-server bash-completion
apt-get install -y libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev libcurl4-openssl-dev python-software-properties libffi-dev libssl-dev libffi-dev
#Python, interactive object oriented language, for network programming, system administration,
apt-get install -y python3.5 python-dev python3-dev python3-pip
apt-get update
#install nodejs
cd ~
curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
apt-get update
apt-get install -y nodejs
node -v
#install packages install it globally add the -g flag:
npm install -g express
npm install -g jason
npm install -g npm-check-updates
apt-get update
###############################################################################
# Get rid of annoyances and extraneous error messages
###############################################################################
echo "remove \"stdin is not a tty\" error message"
sed -i 's/^mesg n$//g' /root/.profile
echo "set locale to en_US"
# http://serverfault.com/questions/500764/dpkg-reconfigure-unable-to-re-open-stdin-no-file-or-directory
# Set the LC_CTYPE so that auto-completion works and such.
export LANGUAGE=en_US.UTF-8
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
locale-gen en_US.UTF_8 en_US.UTF-8
dpkg-reconfigure locales
update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
echo -e "LC_ALL=\"en_US.UTF-8\"\nLANG=\"en_US.UTF-8\"" > /etc/default/locale
| true |
d69b8ebad91693e6ff2333f6da27536aaa1c43a1 | Shell | KeepPositive/Tad-OS | /build_scripts/extra/py_setuptools.sh | UTF-8 | 344 | 3.109375 | 3 | [
"MIT"
] | permissive | #! /bin/bash
## Start variables
PACKAGE="setuptools"
VERSION=$1
FOLD_NAME="$PACKAGE-$VERSION"
if [ -z "$CORES" ]
then
CORES=4
fi
## End variables
## Start script
tar xf "$PACKAGE_DIR/$FOLD_NAME.tar.gz"
pushd "$FOLD_NAME"
python2 setup.py install --optimize=1
python3 setup.py install --optimize=1
popd
rm -rf "$FOLD_NAME"
## End script
| true |
7ffb7cf53d443ef424aaf45fb8a13c5ab1ca813d | Shell | delkyd/alfheim_linux-PKGBUILDS | /python2-humanize/PKGBUILD | UTF-8 | 1,093 | 2.75 | 3 | [] | no_license | # Maintainer: Morten Linderud <morten@linderud.pw>
# Contributor: Javier Tia <javier dot tia at gmail dot com>
_pkgname=humanize
pkgname="python2-${_pkgname}"
pkgver=0.5.1
pkgrel=7
pkgdesc='a modest package contains various common humanization utilities'
url='https://github.com/jmoiron/humanize'
arch=('any')
license=('MIT')
depends=('python2')
makedepends=('python2-setuptools' 'python2-sphinx')
checkdepends=('python2-tox')
source=("$pkgname-$pkgver.tar.gz::https://github.com/jmoiron/humanize/archive/$pkgver.tar.gz")
sha256sums=('e3f3ac75e647f75eec48c3950385ab7585c1c1cde2a6c1479c1f58e055a3e868')
check() {
cd "${srcdir}/${_pkgname}-${pkgver}"
tox2 -e py27
}
build() {
cd "${srcdir}/${_pkgname}-${pkgver}"
python2 setup.py build
make -C ./docs html
}
package() {
cd "${srcdir}/${_pkgname}-${pkgver}"
python2 setup.py install --root="${pkgdir}/" --optimize=1 --skip-build
install -d "${pkgdir}/usr/share/doc/${pkgname}"
mv ./docs/_build/html/* "${pkgdir}/usr/share/doc/${pkgname}"
install -Dm 644 LICENCE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
# vim:set ft=sh ts=2 sw=2 et:
| true |
e6a054eafd85541c63977bf76fd5f0bcad4ca9b9 | Shell | ardan09/Database_KIB_2021 | /Week1/solutions/hw1_dorzhiev_ardan/coffeshop.sh | UTF-8 | 1,427 | 3.640625 | 4 | [] | no_license | echo 'Welcome to the Dave Coffeeshop!'
if [ -f davecoffeeshop.db ]; then
echo 'davecoffeeshop.db exists, using existing database'
else
echo 'davecoffeeshop.db not exists => creating db file'
touch davecoffeeshop.db
echo 'success'
fi
echo 'To log order, enter LOG
To end session, enter EOS
To end business day, enter EOD'
while read command
do
if [ $command = 'LOG' ]; then
echo 'Enter Barista'
read barista
echo 'Enter Drink'
read in_drink
echo "Barista name: $barista"
echo "Drinking: $in_drink"
case $in_drink in
"AME")
price=100
;;
"ESP")
price=120
;;
"CAP")
price=150
;;
"LAT")
price=170
;;
"RAF")
price=200
;;
*)
price=0
;;
esac
if (( price > 0 )); then
echo "$barista;$in_drink;$price" >> davecoffeeshop.db
echo 'Order was saved!'
else
echo Invalid drink c, ignoring
fi
elif [ $command = 'EOS' ]; then
echo 'End of Session'
exit
elif [ $command = 'EOD' ]; then
echo 'End of Day!'
cat davecoffeeshop.db | sort -nk1 -t ";" | awk -F";" 'BEGIN {OFS=";" }\
($1 == last || last == "") {sum += $3} ($1 != last && last != "")\
{print last, sum; sum = $3} {last = $1} END {print last, sum}' | sort -nrk2 -t ";"
else
echo 'WRONG COMMAND. USE ONLY LOG, EOD, EOS'
fi
done
| true |
aab3339b9ac133e35a87c46c615ac31f3ebc9d17 | Shell | attornatus/code4tuba | /venda/src/main/docker/entrypoint.sh | UTF-8 | 222 | 3.296875 | 3 | [] | no_license | #!/bin/sh
if [ ! -z "${CONFIG_SERVICE}" ]; then
echo "Checking if config service is up. Timeout in 60s"
curl --retry 20 --connect-timeout 3 -s "${CONFIG_SERVICE}"
if [ 0 != $? ]; then
exit 1;
fi;
fi;
exec "$@"
| true |
2cf7bcf8868a657e3ab0926d83e9c2167a05452d | Shell | Ankits-lab/build_soong | /cuj/run_cuj_tests.sh | UTF-8 | 595 | 3.109375 | 3 | [] | no_license | #!/bin/bash -e
readonly UNAME="$(uname)"
case "$UNAME" in
Linux)
readonly OS='linux'
;;
Darwin)
readonly OS='darwin'
;;
*)
echo "Unsupported OS '$UNAME'"
exit 1
;;
esac
readonly ANDROID_TOP="$(cd $(dirname $0)/../../..; pwd)"
cd "$ANDROID_TOP"
export OUT_DIR="${OUT_DIR:-out}"
readonly SOONG_OUT="${OUT_DIR}/soong"
build/soong/soong_ui.bash --make-mode "${SOONG_OUT}/host/${OS}-x86/bin/cuj_tests"
"${SOONG_OUT}/host/${OS}-x86/bin/cuj_tests" || true
if [ -n "${DIST_DIR}" ]; then
cp -r "${OUT_DIR}/cuj_tests/logs" "${DIST_DIR}"
fi
| true |
c122ae605c7a3f84efa76247883be3c2338d83aa | Shell | gdemir/pro-lang | /shell/odev2.sh | UTF-8 | 206 | 3.28125 | 3 | [] | no_license | #!/bin/sh
# /usr/bin altındaki dosyalardan,
# ismim uzunlugu 3den kucuk olanlari yazan program.
path=/usr/bin/*
for i in $path; do
son=$(basename $i)
[ ${#son} -lt 3 ] && echo "3 den kucuk $son"
done
| true |
e7a848a27e2007d55f15e811824c6be23a292454 | Shell | aur-archive/bitstream-git | /PKGBUILD | UTF-8 | 698 | 2.984375 | 3 | [] | no_license | # Contributor: Huei-Horng Yo <hiroshiyui@gmail.com>
pkgname=bitstream-git
pkgver=1.0
pkgrel=1
pkgdesc='biTStream is a set of C headers allowing a simpler access to binary structures such as specified by MPEG, DVB, IETF, etc.'
license=('MIT')
arch=('i686' 'x86_64')
url="http://www.videolan.org/developers/bitstream.html"
conflicts=('bitstream')
depends=()
makedepends=('git')
build() {
cd "$srcdir"
if [ -e ${pkgname} ]; then
cd ${pkgname}
git pull
else
git clone git://git.videolan.org/bitstream.git ${pkgname}
cd ${pkgname}
fi
}
package() {
cd "$srcdir"
cd ${pkgname}
make PREFIX=usr DESTDIR="$pkgdir" install
install -Dm644 COPYING "$pkgdir/usr/share/licenses/$pkgname/COPYING"
}
| true |
11a6f9956abfb398be68608e1f7931f1a20838b9 | Shell | Tieske/deck | /.ci/setup_kong_ee.sh | UTF-8 | 1,911 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
MY_SECRET_CERT='''-----BEGIN CERTIFICATE-----
MIIEczCCAlugAwIBAgIJAMw8/GAiHIFBMA0GCSqGSIb3DQEBCwUAMDYxCzAJBgNV
BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQDDAlsb2NhbGhvc3Qw
HhcNMjIxMDA0MTg1MjI5WhcNMjcxMDAzMTg1MjI5WjA2MQswCQYDVQQGEwJVUzET
MBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkq
hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3LUv6RauFfFn4a2BNSTE5oQNhASBh2Lk
0Gd0tfPcmTzJbohFwyAGskYj0NBRxnRVZdLPeoZIQyYSaiPWyeDITnXyKk3Nh9Zk
xQ03YsbZCk9jIsp78/ECdnYCCS4dpYGswu8b37dxUta+6AhEEte73ezrAhc+ZIy5
2yjcix4P5+vfhBf0EzBT8D7z+wZjji3F/A969EqphFwPz3KudkTOR6d0bQEVbN3x
cg4lcj49RzwS4sPbq6ub52QrKcx8s+d9bqC/nhHLn1HM/eef+cxROedcIWZs5RvG
mk/H+K2lcKL33gIcXgzSeunobV+8xwwoYk4GZroXjavUkgelKKjQBQIDAQABo4GD
MIGAMFAGA1UdIwRJMEehOqQ4MDYxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxp
Zm9ybmlhMRIwEAYDVQQDDAlsb2NhbGhvc3SCCQCjgi452nKnUDAJBgNVHRMEAjAA
MAsGA1UdDwQEAwIE8DAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQEL
BQADggIBAJiKfxuh2g0fYcR7s6iiiOMhT1ZZoXhyDhoHUUMlyN9Lm549PBQHOX6V
f/g+jqVawIrGLaQbTbPoq0XPTZohk4aYwSgU2Th3Ku8Q73FfO0MM1jjop3WCatNF
VZj/GBc9uVunOV5aMcMIs2dFU2fwH4ZjOwnv7rJRldoW1Qlk3uWeIktbmv4yZKvu
FWPuo3ks+7B+BniqKXuYkNcuhlE+iVr3kJ55qRgX1RxKo4CB3Tynkp7sikp4al7x
jlHSM9YAqvPFFMOhlU2U3SxE4CLasL99zP0ChINKp9XqzW/qo+F0/Jd4rZmddU2f
M9Cx62cc0L5IlsHLVJj3zwHZzc/ifpBUeebB98IjoQAfiRkbX0Oe/c2TxtR4o/RH
GWNeKCThdliZkXaLiOPswOV1BYfA00etorcY0CIy0aTaZgfvrYsJe4aT/hkF/JvF
tHJ/iD67m8RhLysRL/w50+quVMluUDqJps0HhKrB9wzNJWrddWhvplVeuOXEJfTM
i80W1JE4OApdISMEn56vRi+BMQMgIeYWznfyQnI4G3rUJQFMI5KzLxkvfYNTF3Ci
3Am0KaJ7X2oLOq4Qc6CM3qDkvvId61COlfJb2Fo3ETnoT66mxcb6bjtz1WTWOopm
UcmBKErRUKksINUxwuvP/VW007tXOjZH7wmiM2IW5LUZVkbhB1iE
-----END CERTIFICATE-----'''
MY_SECRET_KEY='''-----BEGIN RSA PRIVATE KEY-----
MIIEpgIBAAKCAQEA3LUv6RauFfFn4a2BNSTE5oQNhASBh2Lk0Gd0tfPcmTzJbohF
wyAGskYj0NBRxnRVZdLPeoZIQyYSaiPWyeDITnXyKk3Nh9ZkxQ03YsbZCk9jIsp7
8/ECdnYCCS4dpYGswu8b37dxUta+6AhEEte73ezrAhc+ZIy52yjcix4P5+vfhBf0
EzBT8D7z+wZjji3F/A969EqphFwPz3KudkTOR6d0bQEVbN3xcg4lcj49RzwS4sPb
q6ub52QrKcx8s+d9bqC/nhHLn1HM/eef+cxROedcIWZs5RvGmk/H+K2lcKL33gIc
XgzSeunobV+8xwwoYk4GZroXjavUkgelKKjQBQIDAQABAoIBAQDcd/nmAwvfS4iT
vTgGmDZAdsTxjXa+gSFEtTO21mUUhc5JpcLaSdGmn74DRzWI4oiz8EPlhuIEgbF/
aVGT1AEDr3o6nAGloZqD5NHgz/XbALZs+IudgLEPGI6sEO74d3LWPvg/IAYJ1A5b
xnYJxIscAyA2tHVVB+ZYcJbuORd2eSKeZSjfEfX8/DN8sKD+4DK9g/GiVlOJBG/d
bSoZmcRv3HXpnSKTvCydkxbBliD/8H7jRvkCOi2VcYT9+08rucwXc6v+q9wiQ/b7
hPdBn6KqDKRO9HPZYVkztlsdHXnthq16QyNPOk2umggfyXMIPhYBcW/dZ5xNqxBD
KiInqjbBAoGBAP3s/FS8GvFJ80pwtA0AUtB7Lo3ZASSs9dq+Dr8binPpp/1qz3hJ
Q/gRC9EP63MOWA2PK22D4qsjTfrBpqxLaalZCbCJGDGT+2knTN+qsOJ3//qI5zjj
cFEcnWcJ3bI5eLAU/2GKViyXzdGlZxBbc4zKBUSyxMAUewr3PsqEO0SJAoGBAN6C
vEYAbNuCZagtDrhhGYb+8rbSKZA7K4QjJcLTyZZ+z4ohWRW9tVyEXkszIwCRrs3y
rhHJU+z1bJOXxIin1i5tCzVlG6iNLct9dm2Z5v4hbDGNw4HhIV0l+tXrVGhkM/1v
vbRhldQA0H9iwWx+bKNS3lVLeUvYu4udmzrY74idAoGBAJ/8zQ9mZWNZwJxKXmdC
qOsKcc6Vx46gG1dzID9wzs8xjNKylX2oS9bkhpl2elbH1trUNfyOeCZz3BH+KVGt
QimdG+nKtx+lqWYbiOfz1/cYvIPR9j11r7KrYNEm+jPs2gm3cSC31IvMKbXJjSJV
PHycXK1oJWcQgGXsWfenUOBhAoGBAKezvRa9Z04h/2A7ZWbNuCGosWHdD/pmvit/
Ggy29q54sQ8Yhz39l109Xpwq1GyvYCJUj6FULe7gIo8yyat9Y83l3ZbGt4vXq/Y8
fy+n2RMcOaE3iWywMyczYtQr45gyPYT73OzAx93bJ0l7MvEEb/jAklWS5r6lgOR/
SumVayN5AoGBALLaG16NDrV2L3u/xzxw7uy5b3prpEi4wgZd4i72XaK+dMqtNVYy
KlBs7O9y+fc4AIIn6JD+9tymB1TWEn1B+3Vv6jmtzbztuCQTbJ6rTT3CFcE6TdyJ
8rYuG3/p2VkcG29TWbQARtj5ewv9p5QNfaecUzN+tps89YzawWQBanwI
-----END RSA PRIVATE KEY-----'''
KONG_IMAGE=${KONG_IMAGE}
NETWORK_NAME=deck-test
PG_CONTAINER_NAME=pg
DATABASE_USER=kong
DATABASE_NAME=kong
KONG_DB_PASSWORD=kong
KONG_PG_HOST=pg
GATEWAY_CONTAINER_NAME=kong
# create docker network
docker network create $NETWORK_NAME
waitContainer() {
for try in {1..100}; do
echo "waiting for $1.."
nc localhost $2 && break;
sleep $3
done
}
# Start a PostgreSQL container
docker run --rm -d --name $PG_CONTAINER_NAME \
--network=$NETWORK_NAME \
-p 5432:5432 \
-e "POSTGRES_USER=$DATABASE_USER" \
-e "POSTGRES_DB=$DATABASE_NAME" \
-e "POSTGRES_PASSWORD=$KONG_DB_PASSWORD" \
postgres:9.6
waitContainer "PostgreSQL" 5432 0.2
# Prepare the Kong database
docker run --rm --network=$NETWORK_NAME \
-e "KONG_DATABASE=postgres" \
-e "KONG_PG_HOST=$KONG_PG_HOST" \
-e "KONG_PG_PASSWORD=$KONG_DB_PASSWORD" \
-e "KONG_PASSWORD=$KONG_DB_PASSWORD" \
-e "KONG_LICENSE_DATA=$KONG_LICENSE_DATA" \
$KONG_IMAGE kong migrations bootstrap
# Start Kong Gateway EE
docker run -d --name $GATEWAY_CONTAINER_NAME \
--network=$NETWORK_NAME \
-e "KONG_DATABASE=postgres" \
-e "KONG_PG_HOST=$KONG_PG_HOST" \
-e "KONG_PG_USER=$DATABASE_USER" \
-e "KONG_PG_PASSWORD=$KONG_DB_PASSWORD" \
-e "KONG_PROXY_ACCESS_LOG=/dev/stdout" \
-e "KONG_ADMIN_ACCESS_LOG=/dev/stdout" \
-e "KONG_PROXY_ERROR_LOG=/dev/stderr" \
-e "KONG_ADMIN_ERROR_LOG=/dev/stderr" \
-e "KONG_ADMIN_LISTEN=0.0.0.0:8001" \
-e "KONG_PORTAL_GUI_URI=127.0.0.1:8003" \
-e "KONG_ADMIN_GUI_URL=http://127.0.0.1:8002" \
-e "KONG_LICENSE_DATA=$KONG_LICENSE_DATA" \
-e "MY_SECRET_CERT=$MY_SECRET_CERT" \
-e "MY_SECRET_KEY=$MY_SECRET_KEY" \
-p 8000:8000 \
-p 8443:8443 \
-p 8001:8001 \
-p 8444:8444 \
-p 8002:8002 \
-p 8445:8445 \
-p 8003:8003 \
-p 8004:8004 \
$KONG_IMAGE
waitContainer "Kong" 8001 0.2
| true |
4eb9166371c30bf91d40f2b176f3f6ece9bebf5b | Shell | thinetic-systems/thindistro | /scripts/thindistro-bottom/30user | UTF-8 | 2,171 | 3.234375 | 3 | [] | no_license | #!/bin/sh
#
quiet=n
. /conf/initramfs.conf
cat <<EOF > ${rootmnt}/etc/rc2.d/S10adduser.sh
#!/bin/sh
#
# adduser.sh Add a new user for the live session.
#
# Version: @(#)adduser.sh 1.0 15-Oct-2005 juanje@interactors.coop
# Based in casper script
#
VERBOSE=yes
TMPTIME=0
[ -f /etc/default/rcS ] && . /etc/default/rcS
. /lib/lsb/init-functions
#. /usr/share/debconf/confmodule
#
# Add the user for the live session
#
# load meta settings
[ -f /.dirs/dev/system/settings.conf ] && . /.dirs/dev/system/settings.conf
USERNAME=\${conf_username}
# Avoid empty liveuser
if [ "\$USERNAME" = "" ] ; then
USERNAME=thinetic
fi
#log_begin_msg "Adding user \"\$USERNAME\"..."
# Comment for testing better
#for question in username user-fullname user-password user-password-again; do
# db_set passwd/\$question $USERNAME
# db_fset passwd/\$question seen true
#done
#
#db_set passwd/md5 true
#db_fset passwd/md5 seen true
#
##DEBCONF_RECONFIGURE=1 dpkg-reconfigure -fpassthrough passwd
#version="\$(dpkg --status passwd | grep ^Version: | sed 's/^Version: //')"
#CODE=0
#DEBCONF_RECONFIGURE=1 /var/lib/dpkg/info/passwd.config reconfigure "\$version"
# Ugly way, but necessary by now, and anyway is the way as internally passwd package does....
# Create the user
if test -x /usr/sbin/adduser; then
adduser --disabled-password --gecos "\$USERNAME" "\$USERNAME" >/dev/null 2>&1
else
useradd -c "\$USERNAME" -m "\$USERNAME" >/dev/null 2>&1
fi
# Setup the password
echo "\$USERNAME:\$USERNAME" | chpasswd --md5 >/dev/null 2>&1
# Add to the groups
if test -x /usr/sbin/adduser; then
for group in adm audio cdrom dialout floppy video plugdev dip fuse; do
adduser "\$USERNAME" \$group >/dev/null 2>&1
done
for group in lpadmin scanner admin ; do
addgroup --system \$group >/dev/null 2>&1
adduser "\$USERNAME" admin >/dev/null 2>&1
done
else
log_end_msg 1
exit 1
fi
if [ -f /etc/sudoers ]; then
if grep -q \%admin /etc/sudoers; then
sed -i -e '/^%admin/s/ALL$/NOPASSWD: ALL/' /etc/sudoers
else
echo "%admin ALL=NOPASSWD: ALL" >> /etc/sudoers
fi
fi
#log_end_msg 0
exit 0
EOF
chmod +x ${rootmnt}/etc/rc2.d/S10adduser.sh
exit 0
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.