blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ee0b67c3a1e03c24a3187148db0b722e9ced179b
|
Shell
|
yfyau/flutter_utils
|
/certificates/openssl_cer_to_pem.command
|
UTF-8
| 217
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
CURR_PATH="`dirname \"$0\"`"
cd $CURR_PATH
echo "Script Path - $CURR_PATH"
for cer in *.cer;
do
echo "Convert file - $cer";
echo $(openssl x509 -inform der -in "${cer}" -out "${cer%.*}".pem)
done
| true
|
a56efdb20c0ed6267a38165977d97d004cd85306
|
Shell
|
nadlabak/android_device_motorola_umts_sholes
|
/prebuilt/bin/init_prep_keypad.sh
|
UTF-8
| 880
| 2.65625
| 3
|
[] |
no_license
|
#!/system/bin/sh
export PATH=/system/bin:/system/xbin:$PATH
mount -o remount,rw /system
ln -s /system/usr/keychars/qwerty.kcm.bin /system/usr/keychars/qtouch-touchscreen.kcm.bin
ln -s /system/usr/keychars/qwerty.kcm.bin /system/usr/keychars/cpcap-key.kcm.bin
ln -s /system/usr/keylayout/qwerty.kl /system/usr/keylayout/qtouch-touchscreen.kl
if [ -e /proc/device-tree/System@0/Keypad@0/name ]; then
keypad_name=`cat /proc/device-tree/System@0/Keypad@0/name`
keypad_type=`getprop persist.sys.keypad_type`
keylayout=`getprop persist.sys.keylayout_alt`
if [ "x$keypad_name" != "x" ]; then
ln -fs /system/usr/keylayout/$keypad_name$keylayout.kl /system/usr/keylayout/sholes-keypad.kl
rm -f /system/usr/keychars/sholes-keypad.kcm.bin
ln -fs /system/usr/keychars/$keypad_name-$keypad_type.kcm.bin /system/usr/keychars/sholes-keypad.kcm.bin
fi
fi
mount -o remount,ro /system
| true
|
59e647d01371098186fbd4d2f69aa0589449bea5
|
Shell
|
lovepocky/dotfiles
|
/.zshrc
|
UTF-8
| 843
| 2.640625
| 3
|
[] |
no_license
|
################################################### * envs
export HOMEBREW_BOTTLE_DOMAIN=https://mirrors.tuna.tsinghua.edu.cn/homebrew-bottles
export NODE_BUILD_MIRROR_URL="https://npm.taobao.org/dist"
################################################### * inits
eval "$(nodenv init -)"
eval "$(pyenv init -)"
[ -f "~/.fzf.zsh" ] && source ~/.fzf.zsh
[ -f "~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh" ] && source ~/.zsh/zsh-autosuggestions/zsh-autosuggestions.zsh
################################################### * alias
alias pc='proxychains4'
[ `uname` = "Darwin" ] && alias dc='docker-compose'
alias re='source ~/.zshrc'
pyenv_download () {
v=$1
echo downloading from https://npm.taobao.org/mirrors/python/$v/Python-$v.tar.xz
wget https://npm.taobao.org/mirrors/python/$v/Python-$v.tar.xz -P ~/.pyenv/cache/
}
| true
|
4b0513740a918b0e65c90ac567592828b943c722
|
Shell
|
epiclongstone/server-setup
|
/serversetup.sh
|
UTF-8
| 2,744
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
FULLLOG=/tmp/serversetup.log
echo "Full logfile: /tmp/serversetup.log"
echo " - Installing required components" | tee -a ${FULLLOG}
sudo apt-get update >> ${FULLLOG}
sudo apt-get -y install \
software-properties-common \
wget \
git >> ${FULLLOG}
if ! which ansible >/dev/null ; then
echo " - Installing Ansible" | tee -a ${FULLLOG}
sudo apt-add-repository ppa:ansible/ansible -y >> ${FULLLOG} 2>&1
sudo apt-get update >> ${FULLLOG} 2>&1
sudo apt-get install -y ansible >> ${FULLLOG} 2>&1
fi
if ! which terraform >/dev/null ; then
echo " - Install Terraform" | tee -a ${FULLLOG}
cd /tmp
wget -O terraform.zip https://releases.hashicorp.com/terraform/0.11.8/terraform_0.11.8_linux_amd64.zip >> ${FULLLOG} 2>&1
unzip terraform.zip >> ${FULLLOG}
sudo mv terraform /usr/local/bin/terraform
rm terraform.zip
fi
if ! [ -d /tmp/server-setup ]; then
echo " - Cloning Server Setup repository" | tee -a ${FULLLOG} 2>&1
cd /tmp
git clone https://github.com/epiclongstone/server-setup >> ${FULLLOG}
fi
echo " - Running Ansible" | tee -a ${FULLLOG}
cd /tmp/server-setup/ansible
VAULTFILE=${HOME}/.vault_pass
if ! [ -s ${VAULTFILE} ]; then
echo "Ansible vault password not found in ~/.vault_pass" | tee -a ${FULLLOG}
echo "Please enter Ansible Vault password to continue" | tee -a ${FULLLOG}
read -s VAULTPASSWD
echo "Storing Ansible Vault password in to ${VAULTFILE}" | tee -a ${FULLLOG}
echo ${VAULTPASSWD} > ${VAULTFILE}
fi
echo -n "Enter IP Address for longstone Server: "
read LONGSTONE_IP
echo "Decrypting inventory file" | tee -a ${FULLLOG}
ansible-vault decrypt inventory/longstone.yml --vault-password-file ${VAULTFILE}
echo "Setting Longstone server IP to ${LONGSTONE_IP} in Ansible inventory" | tee -a ${FULLLOG}
sed -i "s/{{ longstone_IP }}/${LONGSTONE_IP}/" inventory/longstone.yml
echo "Encrypting inventory file" | tee -a ${FULLLOG}
ansible-vault encrypt inventory/longstone.yml --vault-password-file ${VAULTFILE}
echo " - Running Playbook for all hosts" | tee -a ${FULLLOG}
#{
ansible-playbook -i inventory/longstone.yml ./all.yml --vault-password-file ~/.vault_pass
echo " - Running Playbook for Rancher Server hosts" | tee -a ${FULLLOG}
ansible-playbook -i inventory/longstone.yml ./rancher-server.yml --vault-password-file ~/.vault_pass
echo " - Running Terraform"
cd /tmp/server-setup/terraform
echo " - Updating terraform IP Address"
sed -i "s/{{ longstone_IP }}/${LONGSTONE_IP}/g" longstone.tf
echo " - Initialising Terraform"
terraform init
echo " - Runnign Terraform plan"
terraform plan
echo " - Applying Terraform plan"
terraform apply --auto-approve
#} | tee -a ${FULLLOG}
| true
|
4ba4f4d05e2c968464bec725affd2af138b36b38
|
Shell
|
rafritts/bunit
|
/bunit_unit_tests/UnitTestsWithIntentionalFailures.ut
|
UTF-8
| 3,583
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC1091
source ../bunit.shl
testSetup () {
touch testFile
}
testAssertEqualsWithStrings () {
assertEquals "something" "something"
}
testAssertEqualsWithIntegers () {
assertEquals 10 10
}
testAssertEqualsWithIntegers2 () {
assertEquals 5 $((10 - 5))
}
testAssertEqualsWithIntegers3 () {
assertEquals 1 1
}
testAssertEqualsWithDoubles () {
assertEquals 5.5 5.5
}
testAssertEqualsWithDoubles2 () {
assertEquals 5.5 5.50
}
testAssertEqualsWithArrays () {
local array1=(1 2 3)
local array2=(1 2 3)
assertEquals "${array1[*]}" "${array2[*]}"
}
testAssertNotEqualsWithStrings () {
assertNotEquals "something" "something else"
}
testAssertNotEqualsWithIntegers () {
assertNotEquals 5 6
}
testAssertNotEqualsWithArrays () {
local array1=(1 2 3)
local array2=(4 5 6)
assertNotEquals "${array1[*]}" "${array2[*]}"
}
testAssertNull () {
nullString=""
assertNull "$nullString"
}
testAssertTrueWithFilename () {
assertTrue "-f \"testFile\""
}
testAssertTrueWithIntegers () {
assertTrue "10 = 10"
}
testAssertFalseWithIntegers () {
assertFalse "10 = 5"
}
testAssertFalseWithFilename () {
assertFalse "-f \"testFile2\""
}
testAssertFalseWithArray () {
local array1=(1 2 3)
local array2=(4 5 6)
# shellcheck disable=SC2128
# We are comparing the actual arrays, not just the elements
assertFalse "${array1} = ${array2}"
}
testAssertContainsWithString () {
local string="FooBar"
local substring="Bar"
assertContains $substring $string
}
testAssertContainsWithArray () {
local array1=(1 2 3)
local num2=2
assertContains $num2 "${array1[*]}"
}
testAssertEqualsWithStringsFailure () {
assertEquals "something" "something else"
}
testAssertEqualsWithIntegersFailure () {
assertEquals 5 10
}
testAssertEqualsWithArraysFailure () {
local array1=(1 2 3)
local array2=(4 5 6)
assertEquals "${array1[*]}" "${array2[*]}"
}
testAssertNotEqualsWithIntegersFailure () {
assertNotEquals 5 5
}
testAssertNotEqualsWithStringsFailure () {
assertNotEquals "same" "same"
}
testAssertNotEqualsWithArrayFailure () {
local array1=(1 2 3)
local array2=(1 2 3)
assertNotEquals "${array1[*]}" "${array2[*]}"
}
testAssertNullWithIntegerFailure () {
assertNull 5
}
testAssertNullWithStringFailure () {
assertNull "something"
}
testAssertNullWithArrayFailure () {
local array1=(1 2 3)
assertNull "${array1[*]}"
}
testAssertTrueWithFilenameFailure () {
assertTrue "-f \"testFile2\""
}
testAssertTrueWithIntegersFailure () {
assertTrue "10 = 5"
}
testAssertFalseWithFilenameFailure () {
assertFalse "-f \"testFile\""
}
testAssertFalseWithIntegersFailure () {
assertFalse "10 = 10"
}
testAssertContainsWithStringsFailure () {
local string="FooBar"
local substring="BarBar"
assertContains $substring $string
}
testAssertContainsWithArrayFailure () {
local array1=(4 5 6)
local num2=7
assertContains $num2 "${array1[*]}"
}
testAssertEqualsWrongArgsNum() {
assertEquals "something"
}
testAssertNotEqualsWrongArgsNum() {
assertNotEquals 20
}
testAssertNullWrongArgsNum() {
assertNull
}
testAssertTrueWrongArgsNum() {
assertTrue
}
testAssertFalseWrongArgsNum() {
assertFalse
}
testAssertContainsWrongArgsNum() {
assertContains "Foo"
}
function testAssertNotEqualsWithStringsFunction() {
assertEquals "something" "something else"
}
testTeardown () {
rm testFile
}
runUnitTests
| true
|
34e5aa128306b32eb53114dde2365f5b36e541a2
|
Shell
|
JohnMahowald/timer
|
/_timer_backup.sh
|
UTF-8
| 729
| 3.78125
| 4
|
[] |
no_license
|
#! /bin/bash
TIMER_SRC_DIR="/Users/johnmahowald/Desktop/day/07.31.2018/timer"
NUMBER_DIR="$TIMER_SRC_DIR/numbers"
while true; do
now=$(date +"%H:%M:%S:%p")
output_display=""
for idx in $(seq 7); do # there are 7 lines in each font file
# output_line=$(($idx - 1))
output_line=$idx
for l in $(seq ${#now}); do
char=${now:l-1:1}
lower_char=$(awk '{ print tolower($0) }' <<< $char)
filename="$lower_char.txt"
sed_args_postfix="q;d"
sed_args=$output_line$sed_args_postfix
content=$(sed $sed_args $NUMBER_DIR/$filename)
output_display="$output_display $content"
done
output_display="$output_display\n"
done
clear screen
printf "$output_display"
done
| true
|
2fc6e7fbf2c861885d6c38bf0594095a40218226
|
Shell
|
ravibjj/NetworkSecurityExamples
|
/client.py
|
UTF-8
| 325
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
# ****** Ravi Nori
# ****** 12/11/2020
# ****** Purpose: create a client socket
import socket
import time
# connect to the listener
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 1424))
time.sleep(30)
# test host/port
print("you are connecting to {}:{}".format(host, port))
s.close()
| true
|
97e62e5de4c04c74df1080bd529ddef46c73c1ce
|
Shell
|
mcievents/docker-weblogic
|
/run-weblogic.sh
|
UTF-8
| 2,181
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
WL_HOME=/opt/weblogic/wlserver
# See if WebLogic domain already exists.
if [ ! -d "/srv/weblogic/$WEBLOGIC_DOMAIN/config" ]; then
# It does not. Create it.
# Find or generate domain admin password.
if [ -z "$WEBLOGIC_PWD" ]; then
if [ -f /run/secrets/weblogic_admin_password ]; then
export WEBLOGIC_PWD=`cat /run/secrets/weblogic_admin_password`
echo "Weblogic domain $WEBLOGIC_DOMAIN admin password read from Docker secret."
else
export WEBLOGIC_PWD=${WEBLOGIC_PWD:-"`openssl rand -base64 12`"}
echo "Generated random admin password for Weblogic domain $WEBLOGIC_DOMAIN."
echo "WEBLOGIC DOMAIN $WEBLOGIC_DOMAIN ADMIN PASSWORD: $WEBLOGIC_PWD"
fi
else
echo "Weblogic domain $WEBLOGIC_DOMAIN admin password read from environment."
fi
$WL_HOME/common/bin/wlst.sh -skipWLSModuleScanning <<EOF
readTemplate("$WL_HOME/common/templates/domains/wls.jar")
set('Name', '$WEBLOGIC_DOMAIN')
setOption('DomainName', '$WEBLOGIC_DOMAIN')
cd('/Security/$WEBLOGIC_DOMAIN/User/weblogic')
cmo.setPassword('$WEBLOGIC_PWD')
setOption('OverwriteDomain', 'true')
writeDomain('/srv/weblogic/$WEBLOGIC_DOMAIN')
closeTemplate()
exit()
EOF
# Execute custom user setup scripts
SETUP_SCRIPT_DIR=/opt/weblogic/scripts/setup
if [ -d $SETUP_SCRIPT_DIR ] && [ -n "$(ls -A $SETUP_SCRIPT_DIR)" ]; then
echo ""
echo "Executing user-defined setup scripts..."
for f in $SETUP_SCRIPT_DIR/*; do
case "$f" in
*.sh) echo "running $f"; . "$f" ;;
*.py) echo "running $f"; $WL_HOME/common/bin/wlst.sh "$f" ;;
*) echo "ignoring $f" ;;
esac
done
echo ""
fi
fi
cd /srv/weblogic/$WEBLOGIC_DOMAIN
USER_MEM_ARGS=$WEBLOGIC_MEM_ARGS
PRE_CLASSPATH=$WEBLOGIC_PRE_CLASSPATH
. bin/setDomainEnv.sh
exec $JAVA_HOME/bin/java $JAVA_VM $MEM_ARGS \
-Dweblogic.Name=$SERVER_NAME \
-Djava.security.policy=$WL_HOME/server/lib/weblogic.policy \
$JAVA_OPTIONS $PROXY_SETTINGS $SERVER_CLASS
| true
|
da3d31f2dc98bf9bd4f8b3a998d8656a3584c2d5
|
Shell
|
hoglet67/BeebFpga
|
/release.sh
|
UTF-8
| 501
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p releases
name=beebfpga_$(date +"%Y%m%d_%H%M")
release=releases/${name}
echo "WARNING: Altera and Xilinx projects need manual compilation before release"
pushd roms
./make_duo_bitstream.sh
popd
mkdir -p ${release}/de1
cp -a altera/output_files/bbc_micro_de1.[ps]of ${release}/de1
cp -a roms/tmp/rom_image.bin ${release}/de1
mkdir -p ${release}/duo
cp -a roms/tmp/merged.bit ${release}/duo/bbc_micro_duo.bit
pushd releases
zip -qr ${name}.zip ${name}
unzip -l ${name}
popd
| true
|
ccdf0c6e187f85eb489edc80c3a86f8e62cd3b2f
|
Shell
|
Braedencraig/create-react-app-blueprint
|
/install.sh
|
UTF-8
| 2,404
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
ERROR="🛑"
RED='\e[31m'
NC='\e[0m' # No Color
function command_exists {
command -v "$1" &> /dev/null
}
if [ -z "$LC_ALL" ]; then
if command_exists apt-get; then
sudo apt-get install locales
fi
printf "\r${2-$ERROR} ${RED}ERROR Locales are not found.${NC}
🐧 Linux OS:
You must to include those variable in your shell profile:
export LANGUAGE=en_US.UTF-8
export LANG=en_US.UTF-8
export LC_ALL=\"en_US.UTF-8\"
After that, run in your terminal this:
sudo locale-gen en_US.UTF-8
🍏 MAC OS:
You must to include those variable in your shell profile:
export LC_CTYPE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
For any cases, please refresh your terminal (e.g. source ~/.bashrc) or open a new tab and try again the installation process.\n"
exit 1;
fi
# project_folder="${1}"
# dir=`pwd`
# app_path="${dir}/${project_folder}"
##Remove previous version
#rm -rf /var/tmp/create-react-app-blueprint
##Clone latest version
#git clone https://github.com/charly-palencia/create-react-app-blueprint.git /var/tmp/create-react-app-blueprint
##go to tmp folder
#cd /var/tmp/create-react-app-blueprint
###Install CRA blueprint
##echo "=>>> ${app_path}"
##./main.sh $app_path
##cd -
show_spinner()
{
local -r pid="${1}"
local -r delay='0.75'
local spinstr='\|/-'
local temp
while ps a | awk '{print $1}' | grep -q "${pid}"; do
temp="${spinstr#?}"
printf " [%c] " "${spinstr}"
spinstr=${temp}${spinstr%"${temp}"}
sleep "${delay}"
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
(mkdir -p $HOME/bin && wget -O $HOME/bin/crabp https://raw.githubusercontent.com/charly-palencia/create-react-app-blueprint/master/helper.sh https://raw.githubusercontent.com/charly-palencia/create-react-app-blueprint/master/main.sh https://raw.githubusercontent.com/charly-palencia/create-react-app-blueprint/master/crabp.sh ) &
# cat ./helper.sh ./main.sh ./crabp.sh > $HOME/bin/crabp &
show_spinner "$!"
chmod +x $HOME/bin/crabp
local_crabp_setting='
# ------- crabp command settings ----
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
# ------- crabp command settings ----
'
if [ $SHELL == "/bin/sh" ]; then
[ -f ~/.bash_profile ] && echo "${local_nvm_setting}" >> ~/.bash_profile
[ -f ~/.bashrc ] && echo "${local_nvm_setting}" >> ~/.bashrc
else
[ -f ~/.zshrc ] && echo "${local_nvm_setting}" >> ~/.zshrc
fi
| true
|
2f3ac9252d904a1554628d153a305eeaf3aac6e4
|
Shell
|
fabioespinosa/CMSKubernetes
|
/docker/sqoop/scripts/phedex-file-catalog.sh
|
UTF-8
| 2,507
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
. $(dirname $0)/sqoop_utils.sh
setJava
BASE_PATH="/project/awg/cms/phedex/catalog/csv"
#BASE_PATH="cms-catalog"
JDBC_URL=$(sed '1q;d' cmsr_cstring)
USERNAME=$(sed '2q;d' cmsr_cstring)
PASSWORD=$(sed '3q;d' cmsr_cstring)
me=`basename $0`_$$
if [ -n "$1" ]
then
START_DATE=$1
else
START_DATE=`date +'%F' -d "1 day ago"`
fi
year=`date +'%Y' -d "$START_DATE"`
month=`date +'%-m' -d "$START_DATE"`
day=`date +'%-d' -d "$START_DATE"`
END_DATE=`date +'%F' -d "$START_DATE + 1 day"`
START_DATE_S=`date +'%s' -d "$START_DATE"`
END_DATE_S=`date +'%s' -d "$END_DATE"`
LOG_FILE=log/`date +'%F_%H%m%S'`_`basename $0`
OUTPUT_FOLDER=$BASE_PATH/diff/date=$START_DATE
MERGED_FOLDER=$BASE_PATH/merged
echo "Timerange: $START_DATE to $END_DATE" >> $LOG_FILE.cron
echo "Folder: $OUTPUT_FOLDER" >> $LOG_FILE.cron
echo "quering..." >> $LOG_FILE.cron
#exit;
sqoop import -Dmapreduce.job.user.classpath.first=true -Ddfs.client.socket-timeout=120000 --direct --connect $JDBC_URL --fetch-size 10000 --username $USERNAME --password $PASSWORD --target-dir $OUTPUT_FOLDER -m 1 \
--query "select ds.name as dataset_name, ds.id as dataset_id, ds.is_open as dataset_is_open, ds.time_create as dataset_time_create, bk.name as block_name, bk.id as block_id, bk.time_create as block_time_create, bk.is_open as block_is_open, f.logical_name as file_lfn, f.id as file_id, f.filesize, f.checksum, f.time_create as file_time_create from cms_transfermgmt.t_dps_dataset ds join cms_transfermgmt.t_dps_block bk on bk.dataset=ds.id join cms_transfermgmt.t_dps_file f on f.inblock=bk.id where f.time_create >= ${START_DATE_S} and f.time_create < ${END_DATE_S} and \$CONDITIONS" \
--fields-terminated-by , --escaped-by \\ --optionally-enclosed-by '\"' \
1>$LOG_FILE.stdout 2>$LOG_FILE.stderr
OUTPUT_ERROR=`cat $LOG_FILE.stderr | egrep "ERROR tool.ImportTool: Error during import: Import job failed!"`
TRANSF_INFO=`cat $LOG_FILE.stderr | egrep "INFO mapreduce.ImportJobBase: Transferred"`
if [[ $OUTPUT_ERROR == *"ERROR"* || ! $TRANSF_INFO == *"INFO"* ]]
then
echo "Error occured, check $LOG_FILE"
sendMail $LOG_FILE.stdout cms-transfermgmt-catalog $START_DATE
sendMail $LOG_FILE.stderr cms-transfermgmt-catalog $START_DATE
else
hdfs dfs -cat $OUTPUT_FOLDER/part-m-00000 | hdfs dfs -appendToFile - $MERGED_FOLDER/part-m-00000
fi
#hdfs dfs -put /tmp/$me.stdout $OUTPUT_FOLDER/sqoop.stdout && rm /tmp/$me.stdout
#hdfs dfs -put /tmp/$me.stderr $OUTPUT_FOLDER/sqoop.stderr && rm /tmp/$me.stderr
#rm /tmp/$$.stdout /tmp/$$.stderr
| true
|
8d24e476f11b834a3e275070d4e8dd54e2db6fd1
|
Shell
|
yzrobot/ENRICHME_Releases
|
/ais/openhab/scripts/mongodb_status.sh
|
UTF-8
| 295
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
standard_mongo_file="/var/lib/mongodb/mongod.lock"
uol_mongo_file="/home/mongodb/database/mongod.lock"
if [ -f $uol_mongo_file ];
then
#File FILE exists
echo "ON"
else
if [ -f $standard_mongo_file ];
then
#File FILE exists
echo "ON"
else
echo "OFF"
fi
fi
| true
|
5e94edfa5703dade80e7ab33a0f48ce0a9da1fec
|
Shell
|
memphis-iis/iis-sign
|
/test.sh
|
UTF-8
| 361
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
caddy&
CADDY_PID=$!
echo "caddy is running as $CADDY_PID"
QS=""
if [ x"$1" != x ]; then
QS="?$1"
echo "Using querystring $QS"
fi
echo "Debug at http://localhost:9222"
/opt/google/chrome/chrome --kiosk --disable-session-crashed-bubble --remote-debugging-port=9222 "http://localhost:2020/sign.html$QS"
echo "Killing caddy"
kill $CADDY_PID
| true
|
d4bd5494d4dc55f21294d4db6503d26375f8a6ce
|
Shell
|
aps337/wlan-ap
|
/feeds/realtek/rtl83xx-poe/files/etc/init.d/poe
|
UTF-8
| 459
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
START=40
USE_PROCD=1
PROG=/bin/poe.lua
service_triggers() {
procd_add_reload_trigger poe
}
start_service() {
[ "$(uci get poe.poe.enable)" -eq 1 ] || return 0
local budget=$(uci get poe.poe.budget)
procd_open_instance
procd_set_param command "$PROG"
procd_append_param command ${budget:-65}
for p in `seq 1 8`; do
local pwr=$(uci get poe.poe.port$p)
procd_append_param command ${pwr:-0}
done
procd_close_instance
}
| true
|
94b819685ad20c4e5762be44dfbd62a0e9421286
|
Shell
|
cloudfoundry/bosh-azure-cpi-release
|
/ci/tasks/run-integration.sh
|
UTF-8
| 3,470
| 2.9375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
: ${AZURE_ENVIRONMENT:?}
: ${AZURE_TENANT_ID:?}
: ${AZURE_SUBSCRIPTION_ID:?}
: ${AZURE_CLIENT_ID:?}
: ${AZURE_CLIENT_SECRET:?}
: ${AZURE_CERTIFICATE:?}
: ${SSH_PUBLIC_KEY:?}
: ${METADATA_FILE:=environment/metadata}
metadata=$(cat ${METADATA_FILE})
export BOSH_AZURE_ENVIRONMENT=${AZURE_ENVIRONMENT}
export BOSH_AZURE_TENANT_ID=${AZURE_TENANT_ID}
export BOSH_AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID}
export BOSH_AZURE_CLIENT_ID=${AZURE_CLIENT_ID}
export BOSH_AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}
export BOSH_AZURE_CERTIFICATE=${AZURE_CERTIFICATE}
export BOSH_AZURE_LOCATION=$(echo ${metadata} | jq -e --raw-output ".location")
export BOSH_AZURE_DEFAULT_RESOURCE_GROUP_NAME=$(echo ${metadata} | jq -e --raw-output ".default_resource_group_name")
export BOSH_AZURE_ADDITIONAL_RESOURCE_GROUP_NAME=$(echo ${metadata} | jq -e --raw-output ".additional_resource_group_name")
export BOSH_AZURE_STORAGE_ACCOUNT_NAME=$(echo ${metadata} | jq -e --raw-output ".storage_account_name")
export BOSH_AZURE_EXTRA_STORAGE_ACCOUNT_NAME=$(echo ${metadata} | jq -e --raw-output ".extra_storage_account_name")
export BOSH_AZURE_VNET_NAME=$(echo ${metadata} | jq -e --raw-output ".vnet_name")
export BOSH_AZURE_SUBNET_NAME=$(echo ${metadata} | jq -e --raw-output ".subnet_1_name")
export BOSH_AZURE_SECOND_SUBNET_NAME=$(echo ${metadata} | jq -e --raw-output ".subnet_2_name")
export BOSH_AZURE_DEFAULT_SECURITY_GROUP=$(echo ${metadata} | jq -e --raw-output ".default_security_group")
export BOSH_AZURE_PRIMARY_PUBLIC_IP=$(echo ${metadata} | jq -e --raw-output ".public_ip_in_default_rg")
export BOSH_AZURE_SECONDARY_PUBLIC_IP=$(echo ${metadata} | jq -e --raw-output ".public_ip_in_additional_rg")
export BOSH_AZURE_APPLICATION_SECURITY_GROUP=$(echo ${metadata} | jq -e --raw-output ".asg_name")
export BOSH_AZURE_APPLICATION_GATEWAY_NAME=$(echo ${metadata} | jq -e --raw-output ".application_gateway_name")
export BOSH_AZURE_DEFAULT_USER_ASSIGNED_IDENTITY_NAME=$(echo ${metadata} | jq -e --raw-output ".default_user_assigned_identity_name")
export BOSH_AZURE_USER_ASSIGNED_IDENTITY_NAME=$(echo ${metadata} | jq -e --raw-output ".user_assigned_identity_name")
export BOSH_AZURE_SSH_PUBLIC_KEY=${SSH_PUBLIC_KEY}
export BOSH_AZURE_STEMCELL_PATH=$(realpath stemcell/*.tgz)
source stemcell-state/stemcell.env
source /etc/profile.d/chruby.sh
chruby ${RUBY_VERSION}
pushd bosh-cpi-src/src/bosh_azure_cpi > /dev/null
bundle install
tags="--tag ~light_stemcell --tag ~migration"
export BOSH_AZURE_USE_MANAGED_DISKS=${AZURE_USE_MANAGED_DISKS}
if [ "${AZURE_USE_MANAGED_DISKS}" == "true" ]; then
tags+=" --tag ~unmanaged_disks"
else
tags+=" --tag ~availability_zone"
fi
bundle exec rspec spec/integration/ ${tags} --format documentation
# migration: unmanged disk -> managed disk
# Only run migration test when AZURE_USE_MANAGED_DISKS is set to false initially
if [ "${AZURE_USE_MANAGED_DISKS}" == "false" ]; then
unset BOSH_AZURE_USE_MANAGED_DISKS
bundle exec rspec spec/integration/migrations/managed_disks_migration_spec.rb --format documentation
fi
# migration: regional resource -> zonal resource
# Only run migration test when AZURE_USE_MANAGED_DISKS is set to true initially
if [ "${AZURE_USE_MANAGED_DISKS}" == "true" ]; then
export BOSH_AZURE_USE_MANAGED_DISKS=true
bundle exec rspec spec/integration/migrations/availability_zone_migration_spec.rb --format documentation
fi
popd > /dev/null
| true
|
be984778d19c4d4f2cef0b67d8693fed315062ca
|
Shell
|
GoldsteinE/binaries
|
/generate.sh
|
UTF-8
| 548
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
set -ex
cd "$(basename "$(dirname "$0")")"
CONTAINER_TAG='binaries-pack'
NEOVIM_VERSION='7685fc9ecd2a1a0b6c62fe56a14de8441d9f3a58'
RIPGREP_VERSION='13.0.0'
FD_VERSION='v8.2.1'
YADM_VERSION='a4d39c75045bfca9277284ff0513eb022237a88e'
docker build -t "$CONTAINER_TAG" \
--build-arg "NEOVIM_VERSION=${NEOVIM_VERSION}" \
--build-arg "RIPGREP_VERSION=${RIPGREP_VERSION}" \
--build-arg "FD_VERSION=${FD_VERSION}" \
--build-arg "YADM_VERSION=${YADM_VERSION}" \
.
mkdir -p out
docker run --rm -it -v "$(pwd)/out:/binaries" binaries-pack
| true
|
9b75db4073b159de5c25d1d9d30f33e98bbbf623
|
Shell
|
l29ah/w3crapcli
|
/bitfinex.com/bfx_hist_plot
|
UTF-8
| 680
| 3.21875
| 3
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
[[ -f "$1" ]] || {
echo "$1 not found"
exit 1
}
t=$(mktemp)
tac "$1" |
awk -F, '
BEGIN {
OFS=",";
v["EXCHANGE"] = 0.0;
v["TRADING"] = 0.0;
v["DEPOSIT"] = 0.0;
}
{
sub(/^.* on wallet /, "", $2);
v[toupper($2)] = $4;
print $5, v["EXCHANGE"], v["TRADING"], v["DEPOSIT"]
}
' > "$t"
gp=$(mktemp)
cat >$gp <<EOF
set datafile separator ","
set xdata time
set timefmt "%Y-%m-%d %H:%M:%S"
set format x "%m.%y"
set style data lines
set grid
#set term png size 2000,600
#set output 'bfx_hist_usd.png'
plot \
'$t' using 1:2 title 'Exchange', \
'$t' using 1:3 title 'Trading', \
'$t' using 1:4 title 'Deposit'
pause -1
EOF
gnuplot "$gp"
rm "$t" "$gp"
| true
|
7ea1a207934b3d56f0f0db01b61662feec1e4f07
|
Shell
|
qiaojunfeng/texlive-fontconfig
|
/PKGBUILD
|
UTF-8
| 733
| 2.609375
| 3
|
[] |
no_license
|
# Maintainer: Junfeng Qiao <qiaojunfeng at outlook.com>
# Contributor:
# https://github.com/qiaojunfeng/texlive-fontconfig
pkgname=texlive-fontconfig
pkgdesc=""
pkgver=20190225
pkgrel=1
arch=(any)
url='https://github.com/qiaojunfeng/texlive-fontconfig'
license=(custom)
#depends=(fontconfig xorg-fonts-encodings xorg-mkfontscale xorg-mkfontdir)
depends=(texlive-bin)
#provides=(ttf-fangzheng)
package() {
SRC=/etc/fonts/conf.avail/09-texlive-fonts.conf
DEST=$(kpsewhich -var-value TEXMFSYSVAR)/fonts/conf/texlive-fontconfig.conf
mkdir -p "$pkgdir/$(dirname $DEST)"
#ln -s $SRC $pkgdir/$DEST
cp $SRC $pkgdir/$DEST
sed -i '4i <dir>/usr/share/texmf-dist/fonts/opentype/public/fontawesome/</dir>' $pkgdir/$DEST
}
| true
|
769c9acb4a51b5998524833b6c24d6234dd95f59
|
Shell
|
ividic/dotfiles
|
/shell/bash_functions
|
UTF-8
| 8,573
| 3.875
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# CDF
# ---------------------------
# Change working directory to the top-most Finder window location
# From: https://github.com/mathiasbynens/dotfiles/blob/master/.functions
function cdf() { # short for `cdfinder`
cd "$(osascript -e 'tell app "Finder" to POSIX path of (insertion location as alias)')";
}
# CleanUp
# ---------------------------
# Recursively delete files that match a certain pattern
# (by default delete all `.DS_Store` files)
# From: https://github.com/necolas/dotfiles/blob/master/shell/functions/cleanup
cleanup() {
local q="${1:-*.DS_Store}"
find . -type f -name "$q" -ls -delete
}
# CP_P
# ---------------------------
# Copy w/ progress
# From: https://github.com/paulirish/dotfiles/blob/master/.functions
cp_p () {
rsync -WavP --human-readable --progress $1 $2
}
# F
# ---------------------------
# find shorthand
# From: https://github.com/paulirish/dotfiles/blob/master/.functions
function f() {
find . -name "$1" 2>&1 | grep -v 'Permission denied'
}
# FCD
# ---------------------------
# Open current directory in Finder
function fcd() { # short for `findercd`
open -a Finder ./
}
# FS
# ---------------------------
# Determine size of a file or total size of a directory
# From: https://github.com/mathiasbynens/dotfiles/blob/master/.functions
function fs() {
if du -b /dev/null > /dev/null 2>&1; then
local arg=-sbh;
else
local arg=-sh;
fi
if [[ -n "$@" ]]; then
du $arg -- "$@";
else
du $arg .[^.]* *;
fi;
}
# GCM
# ---------------------------
# Git commit with message and default prefix
gcm() {
# Get git branch name and attempt to retrieve the JIRA card id from its prefix
local branch_name=$(git rev-parse --symbolic-full-name --abbrev-ref HEAD)
local card_id=$(echo ${branch_name} | cut -d "-" -f 1-2)
if [ -z "${branch_name}" ] ; then
echo "Error! Cannot read branch name"
exit 1
fi
if [ "${branch_name}" = "HEAD" ] ; then
# https://stackoverflow.com/a/11958481
# User is not on a branch
exit 1
fi
if [ "${branch_name}" = "master" ] || [ "${branch_name}" = "develop" ] ; then
# More care should be given to these types of branches
exit 1
fi
if [ $# -eq 0 ] ; then
echo "Commit message:"
read commit_message
else
local commit_message=$@
fi
git commit -m "${card_id}: ${commit_message}"
}
# gitup
# ---------------------------
# Update repo and delete current branch after it has been merged
function gitup() {
if [ -z "$1" ]; then
print_info "Usage: gitup [parent_branch]"
return 1;
else
local parent="$1";
fi;
if [ "$(git rev-parse --is-inside-work-tree 2>/dev/null)" ]; then
local branch=$(git rev-parse --abbrev-ref HEAD);
if [ "$branch" = "master" ]; then
print_error "Cannot perform action on master branch, exiting...";
return 1;
fi
else
print_error "Not in a git repo, exiting...";
return 1;
fi
if [ "$parent" = "$branch" ]; then
print_error "Select a different parent branch, exiting...";
print_info "Usage: gitup [parent_branch]"
return 1;
fi
git checkout $parent && git pull && git branch -d "$branch"
}
# GZcompare
# ---------------------------
# Compare original and gzipped file size
# From: https://github.com/necolas/dotfiles/blob/master/shell/functions/gz
# Advanced version: https://github.com/alrra/dotfiles/blob/master/shell/bash_functions
gz() {
local origsize=$(wc -c < "$1")
local gzipsize=$(gzip -c "$1" | wc -c)
local ratio=$(echo "$gzipsize * 100 / $origsize" | bc -l)
printf "orig: %d bytes\n" "$origsize"
printf "gzip: %d bytes (%2.2f%%)\n" "$gzipsize" "$ratio"
}
# MKD
# ---------------------------
# Create new directories and enter the first one
# From: https://github.com/alrra/dotfiles/blob/master/shell/bash_functions
# Alternative: https://github.com/mathiasbynens/dotfiles/blob/master/.functions
mkd() {
[ -n "$*" ] && mkdir -p "$@" && cd "$@"
# └─ make parent directories if needed
}
# QH
# ---------------------------
# Search history
# From: https://github.com/alrra/dotfiles/blob/master/shell/bash_functions
qh() {
# ┌─ enable colors for pipe
# │ ("--color=auto" enables colors only if
# │ the output is in the terminal)
grep --color=always "$*" "$HISTFILE" | less -RX
# display ANSI color escape sequences in raw form ─┘│
# don't clear the screen after quitting less ─┘
}
# QT
# ---------------------------
# Search for text within the current directory
qt() {
grep -ir --color=always "$*" --exclude-dir=".git" --exclude-dir="node_modules" . | less -RX
# │└─ search all files under each directory, recursively
# └─ ignore case
}
# WH
# ---------------------------
# Perform a Whois and Host lookup on a domain
wh() {
whois $1
host $1
host mail.$1
}
# Misc
# ---------------------------
# Add tab completion for SSH hostnames based on ~/.ssh/config, ignoring wildcards
# From: https://github.com/mathiasbynens/dotfiles/blob/master/.bash_profile
[ -e "$HOME/.ssh/config" ] && complete -o "default" -o "nospace" -W "$(grep "^Host" ~/.ssh/config | grep -v "[?*]" | cut -d " " -f2- | tr ' ' '\n')" scp sftp ssh;
# Create a Self-Signed SSL Certificate for a local dev domain
#
# Make sure SSL is enabled in Apache config (httpd.conf)
# Also make sure the reference to httpd-ssl.conf in httpd.conf is un-commented
function create-ssl-cert() {
# Check that we're not in the wwwroot folder
if [ ${PWD##*/} = "wwwroot" ]; then
print_error "Run this command from the project folder, not wwwroot"
return 1
fi
if [ -f "/etc/resolver/localhost" ]; then
TLD="localhost"
elif [ -f "/etc/resolver/test" ]; then
TLD="test"
else
TLD="dev"
fi
ask_for_confirmation "Create SSL Certificate for ${PWD}?"
if answer_is_yes; then
# Set up SSL config
cat > openssl.cnf <<-EOF
[req]
distinguished_name = req_distinguished_name
x509_extensions = v3_req
prompt = no
[req_distinguished_name]
CN = ${PWD##*/}.$TLD
[v3_req]
keyUsage = keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.${PWD##*/}.$TLD
DNS.2 = ${PWD##*/}.$TLD
EOF
# Generate Certificate
openssl req \
-new \
-newkey rsa:2048 \
-sha1 \
-days 3650 \
-nodes \
-x509 \
-keyout ssl.key \
-out ssl.crt \
-config openssl.cnf
# Delete config
rm -f openssl.cnf
# Set up the Virtual Host
ask_for_confirmation "Add Apache vhost entry and restart Apache?"
if answer_is_yes; then
# Make sure the current user owns the log files
touch access_log
touch error_log
if [ "$(which httpd)" == "$(brew --prefix)/bin/httpd" ]; then
FILE="$(brew --prefix)/etc/httpd/extra/httpd-ssl.conf"
else
FILE="/private/etc/apache2/extra/httpd-ssl.conf"
fi
cat << EOF | sudo tee -a $FILE
<VirtualHost ${PWD##*/}.$TLD:443>
# General setup for the virtual host
VirtualDocumentRoot "${PWD}/wwwroot"
ServerName ${PWD##*/}.$TLD
ServerAlias *.${PWD##*/}.$TLD
UseCanonicalName Off
ErrorLog "${PWD}/error_log"
CustomLog "${PWD}/access_log" common
# SSL Engine Switch:
# Enable/Disable SSL for this virtual host.
SSLEngine on
SSLCipherSuite ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP:+eNULL
# Site Certificate:
SSLCertificateFile "${PWD}/ssl.crt"
# Site Private Key:
SSLCertificateKeyFile "${PWD}/ssl.key"
</VirtualHost>
EOF
# Restart Apache
print_info "Testing Apache Config and Restarting Apache"
sudo apachectl configtest
sudo apachectl restart
fi
# Add SSL Certificate to Keychain Access
# This will bypass the browser CA error notification
ask_for_confirmation "Add Certificate to Keychain Access?"
if answer_is_yes; then
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ssl.crt
fi
print_success "Done."
else
print_error "Exiting..."
fi
}
| true
|
40d6fbfe196680d67f47683109661aa3246b14ea
|
Shell
|
oncoapop/data_reporting
|
/beast_scripts/FLD-barcode_lookup.sh
|
UTF-8
| 919
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
# Script to convert lookup unknown barcodes from FLD list
# Version 1.0
# by Damian Yap, Molecular Oncology, BCCRC
# on beast.cluster.bccrc.ca
# 10 Jun 2013
# This is the path and name of the file that we want to identify the barcodes in
qpath="/home/dyap/Projects/MiSeq_Data/MiSeq_QC"
query=$qpath/"Matched_BC2.csv"
# Path and name of file that contains all the barcode mappings
inpath="/home/dyap/Projects/MiSeq_Data/MiSeq_QC"
source=$inpath/"FLD_barcodes.txt"
# Output file
out=$qpath/"Matched_Identified_BC2.csv"
echo "Barcode,Name,FLD identity,Count,," > $out
# Read the barcodes from one file and match them with source mapping file
for i in `cat $query | awk -F, '{print $3}'`
do
FLD=`grep $i $source | awk -F"\t" '{print $1}'`
BC=$i
NAME=`grep $i $query | awk -F, '{print $2}'`
COUNT=`grep $i $query | awk -F, '{print $6}'`
echo $BC"," $NAME"," $FLD"," $COUNT",," >> $out
done
exit
| true
|
0f3581a1d189984d634f62bec9139a4174acd0ae
|
Shell
|
vsoftco/t-depth
|
/scripts/algo2_space_stats.sh
|
UTF-8
| 436
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# $@ - List of result files produced by algo2
# Computes the average maximum number of nodes per hyper-tree level and its
# standard deviation from the list of result files produced by algo2.
current_dir="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" >/dev/null 2>&1 && pwd)"
tmp_file=$(mktemp)
bash "$current_dir"/algo2_max_nodes.sh "$@" | tee "$tmp_file"
bash "$current_dir"/avg_std.sh <"$tmp_file"
rm "$tmp_file"
| true
|
a501d3af3f3643cb140510d298a7038f6e32c73b
|
Shell
|
jonasabreu/rgit
|
/recursive-git.sh
|
UTF-8
| 2,336
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
function column-err {
printf "\033[41m%-10s\033[0m" "$1"
}
function column-ok {
printf "\033[32m%-10s\033[0m" "$1"
}
function column-neutral {
printf "\033[33m%-10s\033[0m" "$1"
}
function column-heading {
printf "\e[48;5;237m%-${HEADER_SIZE}s\e[0m" "$1"
}
function proj_name() {
column-heading $(pwd | sed 's,^.*/,,')
}
function get_curr_branch() {
GIT_CURRENT_BRANCH=`git branch | sed -n '/^*/s/^* //p'`
}
function curr_branch() {
get_curr_branch;
if [ "$GIT_CURRENT_BRANCH" == "master" ]
then
column-ok $GIT_CURRENT_BRANCH
else
column-neutral $GIT_CURRENT_BRANCH
fi
}
function dirty_state() {
if ( ! git diff --no-ext-diff --quiet --exit-code ) ||
git ls-files --others --exclude-standard --error-unmatch -- '*' >/dev/null 2>/dev/null
then
column-err "dirty"
else
column-ok "clean"
fi
}
function ahead_of_upstream() {
count=$(git rev-list --count "@{upstream}"..HEAD 2>/dev/null) || return 100
(( $count != "0"))
}
function behind_upstream() {
count=$(git rev-list --count HEAD.."@{upstream}" 2>/dev/null) || return 100
(( $count != "0"))
}
function local_state() {
ahead_of_upstream && column-err "To push" || column-ok "push ok"
}
function remote_state() {
behind_upstream && column-err "To pull" || column-ok "pull ok"
}
function project_status() {
pushd $1/.. > /dev/null
proj_name;
curr_branch
git fetch 1> /dev/null 2> /dev/null
dirty_state; local_state; remote_state;
echo
popd > /dev/null
}
export -f ahead_of_upstream
export -f behind_upstream
export -f column-err
export -f column-ok
export -f column-neutral
export -f column-heading
export -f proj_name
export -f curr_branch
export -f dirty_state
export -f local_state
export -f remote_state
export -f project_status
export -f get_curr_branch
function define_header_size() {
local size=$(find . -type d -name .git -exec sh -c "echo {} | sed -e s,/.git,, -e s,^./,, | wc -c" \; | sort -n | tail -1)
export HEADER_SIZE=${size:=10}
}
function rgit_status() {
define_header_size
find . -type d -name '.git' -exec bash -c "project_status {}" \;
}
function rgit_pull() {
find . -type d -name '.git' -exec sh -c 'pushd {}/..; pwd; git pull --ff-only; popd' \;
}
function rgit() {
case $1 in
"pull")
rgit_pull
;;
*)
rgit_status
;;
esac
}
| true
|
274cdcb53bb0d221796c01f788952fbc0a86b95a
|
Shell
|
benbalter/dotfiles
|
/lib/aliases
|
UTF-8
| 1,325
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Alias all the things!
# Git
alias gs="git status"
alias ga="git add"
alias gad="git add ."
alias gco="git checkout"
alias gc="git commit"
alias gcm="git commit -m"
alias gl="git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --date=relative"
alias gp="git push"
alias gd="git diff"
alias gb="gh repo view --web"
alias gr="gh repo view"
alias reset="git reset HEAD --hard; git clean -f -d"
alias clean="git branch --merged | grep -v '\*' | xargs -n 1 git branch -d"
alias ggb="git commit -m ':gem: bump'"
gccd(){ gh repo clone "$1"; cd "$(basename "$1")" || exit; }
alias branches="git branch -v"
# fasd
alias j='z'
# Script/*
alias sb="script/bootstrap"
alias sc="script/cibuild"
alias console="script/console"
#simple server
alias server="python -m SimpleHTTPServer"
#ip
alias ip="curl icanhazip.com"
alias flushdns="sudo dscacheutil -flushcache;sudo killall -HUP mDNSResponder; echo 'Flushed DNS, btw here is your hosts file just in case:'; cat /etc/hosts"
# .files
alias up="~/.files/script/update"
alias setup="~/.files/script/setup"
# Misc
alias p="cd ~/projects"
alias gg="cd ~/github/github"
alias afk="/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend"
alias lc='colorls -lA --sd'
| true
|
ec2241249ffb8e6b0a108cd747020e4934179be0
|
Shell
|
sirech/shelf2-backend
|
/go
|
UTF-8
| 914
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
goal_containerize() {
docker build . -t shelf2-backend
}
goal_run() {
rails s -p 3001
}
goal_outdated() {
bundle outdated
}
goal_linter() {
bundle exec rubocop
}
goal_test-unit() {
bin/rake spec:all
}
goal_test-pact() {
bin/rake pact:verify
}
goal_test-container() {
bin/rake spec:infra
}
goal_help() {
echo "usage: $0 <goal>
goal:
containerize -- Build the docker container for the app
linter -- Run the linter
run -- Start the backend application
outdated -- Check which dependencies are outdated
test-unit -- Run unit tests
test-pact -- Test the pact
test-container -- Test the container
"
exit 1
}
main() {
TARGET=${1:-}
if [ -n "${TARGET}" ] && type -t "goal_$TARGET" &>/dev/null; then
"goal_$TARGET" "${@:2}"
else
goal_help
fi
}
main "$@"
| true
|
ba4ce9f1636b7d68a897218bfa71af2303898cf8
|
Shell
|
jonnyparris/ansible-role-deploy-with-githooks
|
/templates/post-receive_githook.j2
|
UTF-8
| 748
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
GIT_DIR=/home/{{ ansible_user }}/deployment_listeners/{{ app_name }}_{{ item }}
WORK_TREE=/home/{{ ansible_user }}/{{ app_name }}_{{ item }}
while read oldrev newrev ref
do
if [[ $ref =~ .*/master$ ]];
then
echo "Master ref received. Deploying master branch to {{ item }}..."
mkdir -p $WORK_TREE
git --work-tree=$WORK_TREE --git-dir=$GIT_DIR checkout -f
mkdir -p $WORK_TREE/shared/pids $WORK_TREE/shared/sockets $WORK_TREE/shared/log
# start deploy tasks
# end deploy tasks
echo "Git hooks {{ item }} deploy complete. Godspeed!"
else
echo "Ref $ref successfully received. Doing nothing: only the master branch may be deployed on this server."
fi
done
| true
|
7a5cf60352f56ec02d5cc9cbc719458ff21ec7ef
|
Shell
|
Svennito/gsaslanguage
|
/gsas_simulate_histogram
|
UTF-8
| 4,348
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# echo the call so we can see what's going on
echo "$0 $1 $2 $3 $4 $5 $6"
# Check syntax
if [ "$1" = "" ]; then
echo "$0 called without instrument parameter file for histogram!"
exit 0
fi
if [ "$2" = "" ]; then
echo "$0 called without bank number for histogram, assuming bank 1."
bank=1
else
bank=$2
fi
if [ "$3" = "" ]; then
echo "$0 called without minimum d-spacing for histogram, assuming 1 Angstrom!"
min_d=1
else
min_d=$3
fi
if [ "$4" = "" ]; then
echo "$0 called without maximum d-spacing for histogram, assuming 5 Angstrom!"
max_d=5
else
max_d=$4
fi
if [ "$5" = "" ]; then
echo "$0 called without histogram scale, assuming scale of 1000!"
scale=1000
else
scale=$5
fi
# Add the dummy histogram
echo "k p h j" > temp.txt
echo "$1" >> temp.txt
echo "$bank" >> temp.txt
# now we need to know if we are dealing with CW or TOF to find out which region to modify...
grep HTYPE $1 | head -c 17 > htype.txt
if [ "`tail -c 1 htype.txt`" == "C" ]; then
echo "We have a constant wavelength instrument, give range limits in 2theta"
# give dummy values since we will overwrite them anyways
echo "0.5 160 " >> temp.txt
echo "0.02" >> temp.txt
else
if [ "`tail -c 1 htype.txt`" == "T" ]; then
echo "We have a time-of-flight instrument, give range limits in TOF."
echo "l" >> temp.txt
grep "INS ${bank}DCALIB" $1 | tail -c 36 > temp_log.txt
echo `cat temp_log.txt` >> temp.txt
echo "/" >> temp.txt
rm temp_log.txt
else
echo "We have a unknown type of instrument - need debugging!"
read -p "Break here with Ctrl-c and debug!"
fi
fi
echo "d" >> temp.txt
echo "$max_d" >> temp.txt
echo "$min_d" >> temp.txt
echo "/" >> temp.txt
echo "x" >> temp.txt
echo "0" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
if [ "$scale" != "" ]; then
echo "l o h c" >> temp.txt
echo "$scale" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
fi
echo "x" >> temp.txt
expedt `cat GSAS_EXP` < temp.txt > out.txt
# read the TOF/2theta values from the otherwise meaningless d-exercise
if [ `echo "$max_d < 10" | bc ` ]; then
grep "d-minimum" out.txt | grep " of $max_d" > maxtof.txt
else
grep "d-minimum" out.txt | grep " of $max_d" > maxtof.txt
fi
tail -c 15 maxtof.txt > maxtof2.txt
head -c 8 maxtof2.txt > maxtof.txt
rm maxtof2.txt
if [ `echo "$min_d < 10" | bc ` ]; then
grep "d-minimum" out.txt | grep " of $min_d" > mintof.txt
else
grep "d-minimum" out.txt | grep " of $min_d" > mintof.txt
fi
tail -c 15 mintof.txt > mintof2.txt
head -c 8 mintof2.txt > mintof.txt
rm mintof2.txt
echo "k p h" > temp.txt
echo "e 1" >> temp.txt
# now we need to know if we are dealing with CW or TOF to find out which region to modify...
grep HTYPE $1 | head -c 17 > htype.txt
if [ "`tail -c 1 htype.txt`" == "C" ]; then
echo "We have a constant wavelength instrument, need to swap range limits"
cat maxtof.txt >> temp.txt
echo " " >> temp.txt
cat mintof.txt >> temp.txt
echo " " >> temp.txt
echo "0.02" >> temp.txt
else
if [ "`tail -c 1 htype.txt`" == "T" ]; then
echo "We have a time-of-flight instrument, give min and max range same as in d-spacing."
echo "l" >> temp.txt
echo "/" >> temp.txt
cat mintof.txt >> temp.txt
echo " " >> temp.txt
cat maxtof.txt >> temp.txt
echo " " >> temp.txt
else
echo "We have a unknown type of instrument - need debugging!"
read -p "Break here with Ctrl-c and debug!"
fi
fi
echo "x" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
echo "x" >> temp.txt
expedt `cat GSAS_EXP` < temp.txt > out.txt
rm htype.txt
# Test the EXP file
grep "$1" `cat GSAS_EXP`.EXP
#echo "grep returned $?"
if [ "$?" = "1" ]; then
echo "Instrument parameter file name is NOT in the EXP file"
exit 0
fi
if [ $bank -lt 10 ]; then
grep "BANK $2" `cat GSAS_EXP`.EXP
else
grep "BANK $2" `cat GSAS_EXP`.EXP
fi
#echo "grep returned $?"
if [ "$?" = "1" ]; then
echo "Entry with this bank is NOT in the EXP file"
exit 0
fi
# Which histogram in the EXP file did we add?
grep "Histogram no." out.txt > temp.txt
head -c 18 temp.txt > temp2.txt
tail -c 2 temp2.txt > hist.txt
echo "Added dummy histogram with instrument parameter file \verb=$2= (bank $2, min. d-spacing $3A, max. d-spacing $4A) as histogram `cat hist.txt` to EXP file.\newline" >> `cat GSAS_EXP`.tex
| true
|
1405150e7d2084255d98f6bd107bd9c03bcaaef7
|
Shell
|
Andrea-anliz2613/INSITOME
|
/pipeline/ancientDNA_qc_phase_impute.sh
|
UTF-8
| 18,255
| 2.953125
| 3
|
[] |
no_license
|
## must change directories towards end; not set up like other scripts
ROOT_DIR=./
INSITOME_DATA_DIR=00_data/insitomedata/
QC_DIR=${ROOT_DIR}01_QC/ancientDNA_qc/
PHASE_DIR=00_data/ancientDNA_impute/phased/
IMPUTE_DIR=00_data/ancientDNA_impute/imputeresults/
ALIGNMENTS_DIR=00_data/ancientDNA_impute/alignments/
# executable
# PLINK_EXEC=${ROOT_DIR}bin/plink
# SHAPEIT_EXEC=${ROOT_DIR}bin/shapeit
# reference data files
GENMAP_FILE=${ROOT_DIR}00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt
# insitome files
list_files=("Europe" "FuQ0.9" "FuQ2.2M" "Haak" "Mathfull" "ScythianSarmatian" "Sko" "StMarys")
# remove chromosome 90 information from Europe DNA
for i in "${list_files[@]}"; do
# declare bed, bim, fam names
GWASDATA_BED=${INSITOME_DATA_DIR}$i.bed
GWASDATA_BIM=${INSITOME_DATA_DIR}$i.bim
GWASDATA_FAM=${INSITOME_DATA_DIR}$i.fam
# echo $GWASDATA_BED
# echo $GWASDATA_BIM
# echo $GWASDATA_FAM
# rename chromosomes x, y, xy, and mitochondrial to 23, 24, 25, and 26
sed -i '' -- 's/X/23/g' ${GWASDATA_BIM}
sed -i '' -- 's/Y/24/g' ${GWASDATA_BIM}
sed -i '' -- 's/XY/25/g' ${GWASDATA_BIM}
sed -i '' -- 's/90/26/g' ${GWASDATA_BIM}
# echo ${GWASDATA_BIM}
# STEP 1: QUALITY CONTROL
# discordant sex information
plink --bed ${GWASDATA_BED} --bim ${GWASDATA_BIM} --fam ${GWASDATA_FAM} --check-sex --out ${QC_DIR}$i
## if [ -f ${QC_DIR}$i.sexcheck ]; then
## grep "PROBLEM" ${QC_DIR}$i.sexcheck > ${QC_DIR}$i_problem
## fi
## Haak and FuQ0.9only has one gender, so was excluded
## StMarys didn't have gender issues
grep "PROBLEM" 01_QC/ancientDNA_qc/Europe.sexcheck > 01_QC/ancientDNA_qc/Europe_problem
grep "PROBLEM" 01_QC/ancientDNA_qc/FuQ2.2M.sexcheck > 01_QC/ancientDNA_qc/FuQ2.2M_problem
grep "PROBLEM" 01_QC/ancientDNA_qc/Mathfull.sexcheck > 01_QC/ancientDNA_qc/Mathfull_problem
grep "PROBLEM" 01_QC/ancientDNA_qc/ScythianSarmatian.sexcheck > 01_QC/ancientDNA_qc/ScythianSarmatian_problem
grep "PROBLEM" 01_QC/ancientDNA_qc/Sko.sexcheck > 01_QC/ancientDNA_qc/Sko_problem
## Identification of individuals with elevated missing data rates or outlying heterozygosity rate
## find missing SNP; output = hgdp.imiss and hgdp.lmiss file
plink --bfile ${INSITOME_DATA_DIR}$i --missing --out ${QC_DIR}$i
# find heterozygous genotypes; output = hgdp.het file
plink --bfile ${INSITOME_DATA_DIR}$i --het --out ${QC_DIR}$i
# calculate heterozygosity rate per individual and save as pdf
# investigate plot to see if any individuals need to be removed
# did not loop in R file
R CMD BATCH script/imiss-vs-het.Rscript ${QC_DIR}$i
# identify duplicated or replicated individuals
# skipping to keep high LD
plink --file --exclude 00_data/high-LD-regions.txt --range --indep-pairwise 50 5 0.2 --out
## generate pair-wise IBS identity by state - detect individuals who looked more different than
## they would in a random, homogenous sample
## did not include extract argument because skipped removing high LD
plink --bfile ${INSITOME_DATA_DIR}$i -genome --out ${QC_DIR}$i
# identify all pairs of individuals with IBD > 0.185 to remove individual with lowest call rate
perl script/run-IBD-QC-ancientDNA.pl ${QC_DIR}$i
# skip steps 14-19 of paper
# note: couldnt figure out how to rename the files appropriately in perl so must rename now for next steps to work
# remove individuals failing QC
cat ${QC_DIR}fail-*-$i.txt | sort -k1 | uniq > ${QC_DIR}fail-$i-qc-inds.txt
# ISSUE: Error: Line 1 of --remove file has fewer tokens than expected.
# proceeding with only Sko bed format files
plink --bfile --bed ${GWASDATA_BED} --bim ${GWASDATA_BIM} --fam ${GWASDATA_FAM} \
--no-fid --no-parents --no-sex --no-pheno \
--remove ${QC_DIR}fail-$i-qc-inds.txt \
--make-bed \
--out ${QC_DIR}clean-inds-$i-data
done
# Sko bed format files
# identify markers with excessive missing data rate
plink --bfile ${QC_DIR}clean-inds-Sko-data --missing --out ${QC_DIR}clean-inds-Sko-data-excess-missing
# Plot a histogram of the missing genotype rate to identify a threshold for extreme
# genotype failure rate
# paper used call-rate threshold of 3%
R CMD BATCH script/lmiss-hist.Rscript ${QC_DIR}Sko-R-miss-geno
# SKIPPED: Test markers for different genotype call rates between cases and contols
# Remove all markers failing QC
# geno = filters out samples exceeding missing genotype rate of 3%
# maf = filters out below minor allele frequency threshold
# hwe = filters out below Hardy-Weinberg equilibrium exact test p-value
#Error: All variants excluded due to missing genotype data (--geno).
# ERROR FOUND IN Mathfull, FUQ2.2M and FUQ0.9
plink --bed ${QC_DIR}clean-inds-Sko-data.bed --bim ${QC_DIR}clean-inds-Sko-data.bim --fam ${QC_DIR}clean-inds-Sko-data.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean-Sko-data
plink --bed ${INSITOME_DATA_DIR}StMarys.bed --bim ${INSITOME_DATA_DIR}StMarys.bim --fam ${INSITOME_DATA_DIR}StMarys.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean_QC_marker_StMarys
plink --bed ${INSITOME_DATA_DIR}Europe.bed --bim ${INSITOME_DATA_DIR}Europe.bim --fam ${INSITOME_DATA_DIR}Europe.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean_QC_marker_Europe
plink --bed ${INSITOME_DATA_DIR}FuQ0.9.bed --bim ${INSITOME_DATA_DIR}FuQ0.9.bim --fam ${INSITOME_DATA_DIR}FuQ0.9.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean_QC_marker_FuQ0.9
plink --bed ${INSITOME_DATA_DIR}FuQ2.2M.bed --bim ${INSITOME_DATA_DIR}FuQ2.2M.bim --fam ${INSITOME_DATA_DIR}FuQ2.2M.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean_QC_marker_FuQ2.2M
plink --bed ${INSITOME_DATA_DIR}ScythianSarmatian.bed --bim ${INSITOME_DATA_DIR}ScythianSarmatian.bim --fam ${INSITOME_DATA_DIR}ScythianSarmatian.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean_QC_marker_ScythianSarmatian
plink --bed ${INSITOME_DATA_DIR}Haak.bed --bim ${INSITOME_DATA_DIR}Haak.bim --fam ${INSITOME_DATA_DIR}Haak.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed --out ${QC_DIR}clean_QC_marker_Haak
plink --bed ${INSITOME_DATA_DIR}Mathfull.bed --bim ${INSITOME_DATA_DIR}Mathfull.bim --fam ${INSITOME_DATA_DIR}Mathfull.fam \
-maf 0.01 --geno 0.03 --hwe 0.00001 --make-bed \
--out ${QC_DIR}clean_QC_marker_Mathfull
# no lifting over necessary for sko, stmarys, FUQ, haak, and europe
# unknown versions for ScythianSarmatian and mathfull
# phasing
# extract chromosome 1 data
# make ped and map files including only chr 1
plink --bfile ${QC_DIR}clean_QC_marker_Europe --chr 1 --recode --out ${INSITOME_DATA_DIR}clean_QC_marker_Europe_chr1
# make bed, bim, fam files including only chr 1
plink --file ${INSITOME_DATA_DIR}clean_QC_marker_Europe_chr1 --make-bed --out ${INSITOME_DATA_DIR}clean_QC_marker_Europe_chr1
plink --bfile ${QC_DIR}clean-Sko-data --chr 1 --recode --out ${INSITOME_DATA_DIR}clean-Sko-data_chr1
plink --file ${INSITOME_DATA_DIR}clean-Sko-data_chr1 --make-bed --out ${INSITOME_DATA_DIR}clean-Sko-data_chr1
plink --bfile ${QC_DIR}clean_QC_marker_Haak --chr 1 --recode --out ${INSITOME_DATA_DIR}clean_QC_marker_Haak_chr1
plink --file ${INSITOME_DATA_DIR}clean_QC_marker_Haak_chr1 --make-bed --out ${INSITOME_DATA_DIR}clean_QC_marker_Haak_chr1
plink --bfile ${QC_DIR}clean_QC_marker_ScythianSarmatian --chr 1 --recode --out ${INSITOME_DATA_DIR}clean_QC_marker_ScythianSarmatian_chr1
plink --file ${INSITOME_DATA_DIR}clean_QC_marker_ScythianSarmatian_chr1 --make-bed --out ${INSITOME_DATA_DIR}clean_QC_marker_ScythianSarmatian_chr1
plink --bfile ${QC_DIR}clean_QC_marker_StMarys --chr 1 --recode --out ${INSITOME_DATA_DIR}clean_QC_marker_StMarys_chr1
plink --file ${INSITOME_DATA_DIR}clean_QC_marker_StMarys_chr1 --make-bed --out ${INSITOME_DATA_DIR}clean_QC_marker_StMarys_chr1
# check strand alignment before pre-phasing
# -P = ped format
shapeit \
-check \
-P ${INSITOME_DATA_DIR}clean_QC_marker_Europe_chr1 \
--input-ref 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-log ${ALIGNMENTS_DIR}unphased-clean_Europe_chr1.alignments
shapeit \
-check \
-P ${INSITOME_DATA_DIR}clean_QC_marker_Haak_chr1 \
--input-ref 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-log ${ALIGNMENTS_DIR}unphased-clean_Haak_chr1.alignments
shapeit \
-check \
-P ${INSITOME_DATA_DIR}clean_QC_marker_ScythianSarmatian_chr1 \
--input-ref 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-log ${ALIGNMENTS_DIR}unphased-clean_ScythianSarmatian.alignments
shapeit \
-check \
-P ${INSITOME_DATA_DIR}clean_QC_marker_StMarys_chr1 \
--input-ref 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-log ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr1.alignments
shapeit \
-check \
-P ${INSITOME_DATA_DIR}clean-Sko-data_chr1 \
--input-ref 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-log ${ALIGNMENTS_DIR}unphased-clean_Sko_chr1.alignments
# "Europe"
# new_list_files=("Haak" "ScythianSarmatian" "Sko" "StMarys")
# for i in "${new_list_files[@]}"; do
# # declare bed, bim, fam names
# GWASDATA_BED=${INSITOME_DATA_DIR}$i.bed
# GWASDATA_BIM=${INSITOME_DATA_DIR}$i.bim
# GWASDATA_FAM=${INSITOME_DATA_DIR}$i.fam
# # echo $GWASDATA_BED
# # echo $GWASDATA_BIM
# # echo $GWASDATA_FAM
# shapeit \
# -check \
# -P ${INSITOME_DATA_DIR}clean*$i*_chr1 \
# --input-ref 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
# --output-log ${ALIGNMENTS_DIR}unphased-clean_$i_chr1.alignments
# done
## make a list of sites to flip and exclude
# grab the main id position in the 3rd field
# remove duplicates from final list
cat ${ALIGNMENTS_DIR}unphased-clean_Europe_chr1.alignments.snp.strand | grep "Strand" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_Europe_chr_1_strand_flip.txt
cat ${ALIGNMENTS_DIR}unphased-clean_Europe_chr1.alignments.snp.strand | grep "Missing" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_Europe_chr_1_missing.txt
cat ${ALIGNMENTS_DIR}unphased-clean_ScythianSarmatian_chr1.alignments.snp.strand | grep "Strand" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_ScythianSarmatian_chr_1_strand_flip.txt
cat ${ALIGNMENTS_DIR}unphased-clean_ScythianSarmatian_chr1.alignments.snp.strand | grep "Missing" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_ScythianSarmatian_chr_1_missing.txt
cat ${ALIGNMENTS_DIR}unphased-clean_Haak_chr1.alignments.snp.strand | grep "Strand" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_Haak_chr_1_strand_flip.txt
cat ${ALIGNMENTS_DIR}unphased-clean_Haak_chr1.alignments.snp.strand | grep "Missing" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_Haak_chr_1_missing.txt
cat ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr1.alignments.snp.strand | grep "Strand" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr_1_strand_flip.txt
cat ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr1.alignments.snp.strand | grep "Missing" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr_1_missing.txt
cat ${ALIGNMENTS_DIR}unphased-clean_Sko_chr1.alignments.snp.strand | grep "Strand" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_Sko_chr_1_strand_flip.txt
cat ${ALIGNMENTS_DIR}unphased-clean_Sko_chr1.alignments.snp.strand | grep "Missing" | cut -f 4 | uniq > ${ALIGNMENTS_DIR}unphased-clean_Sko_chr_1_missing.txt
## flip strand and exclude SNPs missing in reference panel using plink
## make new bed files
## Europe: ERROR
## Duplicate id error in map file: Error: Duplicate ID 'Aff23-50009726'
## tried removing duplicate; ERROR: Error: Line 1 of --remove file has fewer tokens than expected.
plink \
--file ${INSITOME_DATA_DIR}clean_QC_marker_Europe_chr1 \
--remove ${ALIGNMENTS_DIR}remove_indiv_Europe.txt \
--recode \
--out ${ALIGNMENTS_DIR}no_dup_unphased_Europe_chr_1
plink \
--file ${INSITOME_DATA_DIR}clean_QC_marker_Europe_chr1 \
--flip ${ALIGNMENTS_DIR}unphased-clean_Europe_chr_1_strand_flip.txt \
--exclude ${ALIGNMENTS_DIR}unphased-clean_Europe_chr_1_missing.txt \
--recode \
--out ${ALIGNMENTS_DIR}aligned_unphased_Europe_chr_1
# Scythian
plink \
--file ${INSITOME_DATA_DIR}clean_QC_marker_ScythianSarmatian_chr1 \
--exclude ${ALIGNMENTS_DIR}unphased-clean_ScythianSarmatian_chr_1_missing.txt \
--recode \
--out ${ALIGNMENTS_DIR}aligned_unphased_ScythianSarmatian_chr_1
# Haak
# plink \
# --file ${INSITOME_DATA_DIR}clean_QC_marker_Haak_chr1 \
# --flip ${ALIGNMENTS_DIR}unphased-clean_Haak_chr_1_strand_flip.txt \
# --exclude ${ALIGNMENTS_DIR}unphased-clean_Haak_chr_1_missing.txt \
# --recode \
# --out ${ALIGNMENTS_DIR}aligned_unphased_Haak_chr_1
plink \
--file ${INSITOME_DATA_DIR}clean_QC_marker_Haak_chr1 \
--exclude ${ALIGNMENTS_DIR}combined_unphased-clean_Haak_chr_1_missing.txt \
--recode \
--out ${ALIGNMENTS_DIR}aligned_unphased_Haak_chr_1
# StMarys ERROR
# Same error as Europe
# Error: Duplicate ID 'rs10737260'.
# Error: Line 1 of --remove file has fewer tokens than expected.
plink \
--file ${INSITOME_DATA_DIR}clean_QC_marker_StMarys_chr1 \
--remove ${ALIGNMENTS_DIR}remove_indiv_StMarys.txt \
--recode \
--out ${ALIGNMENTS_DIR}no_dup_unphased_StMarys_chr_1
# Error: Duplicate ID 'rs10737260'.
plink \
--file ${INSITOME_DATA_DIR}clean_QC_marker_StMarys_chr1 \
--flip ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr_1_strand_flip.txt \
--exclude ${ALIGNMENTS_DIR}unphased-clean_StMarys_chr_1_missing.txt \
--recode \
--out ${ALIGNMENTS_DIR}aligned_unphased_StMarys_chr_1
# Sko
# plink \
# --file ${INSITOME_DATA_DIR}clean-Sko-data_chr1 \
# --flip ${ALIGNMENTS_DIR}unphased-clean_Sko_chr_1_strand_flip.txt \
# --exclude ${ALIGNMENTS_DIR}unphased-clean_Sko_chr_1_missing.txt \
# --recode \
# --out ${ALIGNMENTS_DIR}aligned_unphased_Sko_chr_1
plink \
--file ${INSITOME_DATA_DIR}clean-Sko-data_chr1 \
--flip ${ALIGNMENTS_DIR}unphased-clean_Sko_chr_1_strand_flip.txt \
--exclude ${ALIGNMENTS_DIR}combined_unphased-clean_Sko_chr_1_missing.txt \
--recode \
--out ${ALIGNMENTS_DIR}aligned_unphased_Sko_chr_1
# phase using SHAPEIT
## ScythianSarmatian
shapeit \
--input-ped ${ALIGNMENTS_DIR}aligned_unphased_ScythianSarmatian_chr_1.ped ${ALIGNMENTS_DIR}aligned_unphased_ScythianSarmatian_chr_1.map \
--input-map 00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt \
-R 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-max ${PHASE_DIR}aligned_ScythianSarmatian_chr_1.phased \
--thread 4 \
--output-log ${PHASE_DIR}aligned_ScythianSarmatian_chr_1_LOG.phased
## Haak
shapeit \
--input-ped ${ALIGNMENTS_DIR}aligned_unphased_Haak_chr_1.ped ${ALIGNMENTS_DIR}aligned_unphased_Haak_chr_1.map \
--input-map 00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt \
-R 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-max ${PHASE_DIR}aligned_Haak_chr_1.phased \
--thread 4 \
--output-log ${PHASE_DIR}aligned_Haak_chr_1_LOG.phased
# ERROR
# ERROR: Reference and Main panels are not well aligned:
# * #Missing sites in reference panel = 0
# * #Misaligned sites between panels = 13
# * #Multiple alignments between panels = 0
# will include the flipped strands into the mssing file and remove; file combined_unphased-clean_Haak_chr_1_missing.txt
# Aff23-15487538
# Aff23-10183760
# Aff23-10986945
# Aff23-6298600
# Aff23-5505842
# Aff23-9585960
# Aff23-13063949
# Aff23-16273477
# Aff23-50008367
# Aff23-5274155
# Aff23-5372333
# Aff23-5527576
# Aff23-5796562
# Aff23-6575511
# Aff23-7319400
# Aff23-7322716
# Aff23-7406976
# Aff23-7507973
# Aff23-50020381
## Sko
## ERROR: Reference and Main panels are not well aligned: combined_unphased-clean_Sko_chr_1_missing.txt
# * #Missing sites in reference panel = 0
# * #Misaligned sites between panels = 9
# * #Multiple alignments between panels = 0
# ADDED the following to new missing file to remove
# rs476527
# rs4655836
# rs7532826
# rs10912657
# rs12031354
# rs1795244
# rs10798410
# rs2609351
# rs12405469
# rs2502408
# rs637280
# rs570661
shapeit \
--input-ped ${ALIGNMENTS_DIR}aligned_unphased_Sko_chr_1.ped ${ALIGNMENTS_DIR}aligned_unphased_Sko_chr_1.map \
--input-map 00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt \
-R 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz 00_data/1000GP_Phase3/1000GP_Phase3.sample \
--output-max ${PHASE_DIR}aligned_Sko_chr_1.phased \
--thread 4 \
--output-log ${PHASE_DIR}aligned_Sko_chr_1_LOG.phased
# Imputation
## Sko
impute2 \
-use_prephased_g \
-known_haps_g ${PHASE_DIR}aligned_Sko_chr_1.phased.haps \
-m 00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt \
-h 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz \
-l 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz \
-int 168549811 170549811\
-Ne 20000 \
-allow_large_regions \
-seed 367946 \
-phase \
-o ${IMPUTE_DIR}Sko_imputed.gen
## Haak
impute2 \
-use_prephased_g \
-known_haps_g ${PHASE_DIR}aligned_Haak_chr_1.phased.haps \
-m 00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt \
-h 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz \
-l 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz \
-int 168549811 170549811\
-Ne 20000 \
-allow_large_regions \
-seed 367946 \
-phase \
-o ${IMPUTE_DIR}Haak_imputed.gen
## ScythianSarmatian
impute2 \
-use_prephased_g \
-known_haps_g ${PHASE_DIR}aligned_ScythianSarmatian_chr_1.phased.haps \
-m 00_data/1000GP_Phase3/genetic_map_chr1_combined_b37.txt \
-h 00_data/1000GP_Phase3/1000GP_Phase3_chr1.hap.gz \
-l 00_data/1000GP_Phase3/1000GP_Phase3_chr1.legend.gz \
-int 168549811 170549811\
-Ne 20000 \
-allow_large_regions \
-seed 367946 \
-phase \
-o ${IMPUTE_DIR}ScythianSarmatian_imputed.gen
| true
|
6819c9cfccb56d7280821da0d1ec95ee1d0b71c1
|
Shell
|
prinschristo4/UNIX-2018-CMR-
|
/KISHAN-LS5.sh
|
UTF-8
| 203
| 3.4375
| 3
|
[] |
no_license
|
count=0
echo "enter the number"
read n
for (( i=1;i<=n;i++))
do
if [ `expr $n % $i` -eq 0 ]
then
((count++))
fi
done
if [ $count -eq 2 ]
then
echo "its a prime number"
else
echo "not prime"
fi
| true
|
df6819e0064a2d01271ab7c3199f49d08f9421b2
|
Shell
|
fac21/E-commerce-app-CSCS
|
/scripts/db:setup
|
UTF-8
| 336
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/bash
# stop script when an error occurs
set -e
psql -q -c "CREATE USER username SUPERUSER PASSWORD '123'"
echo "Created Postgres user 'username'"
psql -q -c "CREATE DATABASE projectcscs WITH OWNER username"
echo "Created Postgres database 'projectcscs'"
# cp -r example.env .env
# echo "Created .env containing DATABASE_URL"
| true
|
f8441c8bc4e39f63234afb0102fd16559ef7bd45
|
Shell
|
skiv71/BMSLink-core
|
/com/bms-sql
|
UTF-8
| 202
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# args
sql=$@
# initialise
db=bmslink
user=root
pass=
# user/pass
[ -n "$db" ] && [ -n "$user" ] || exit 1
[ -n "$pass" ] && pass=-p$pass
# main
mysql -u $user $pass $db -sNe "$sql"
| true
|
4a6ae9b5c048839297a46697ef636c5c8cdbe9dd
|
Shell
|
otus-devops-2019-02/v1k3ng_infra
|
/packer/scripts/install_mongodb.sh
|
UTF-8
| 418
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# install Mongo-DB
# add key
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
# add repo
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" > /etc/apt/sources.list.d/mongodb-org-3.2.list
# update info about repo
apt update
# install package
apt install -y mongodb-org
# enable and start mongodb
systemctl enable mongod
systemctl start mongod
| true
|
3d2913937316db2a240374c3f93dad954556fd33
|
Shell
|
lrakai/quickstart-linux-bastion
|
/scripts/backend_bootstrap.sh
|
UTF-8
| 14,300
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# Bastion Bootstrapping
# authors: tonynv@amazon.com, sancard@amazon.com, ianhill@amazon.com, logan.rakai@cloudacademy.com
# NOTE: This requires GNU getopt. On Mac OS X and FreeBSD you must install GNU getopt and mod the checkos function so that it's supported
# Configuration
PROGRAM='Backend'
##################################### Functions Definitions
function checkos () {
platform='unknown'
unamestr=`uname`
if [[ "${unamestr}" == 'Linux' ]]; then
platform='linux'
else
echo "[WARNING] This script is not supported on MacOS or freebsd"
exit 1
fi
echo "${FUNCNAME[0]} Ended"
}
function setup_environment_variables() {
REGION=$(curl -sq http://169.254.169.254/latest/meta-data/placement/availability-zone/)
#ex: us-east-1a => us-east-1
REGION=${REGION: :-1}
ETH0_MAC=$(/sbin/ip link show dev eth0 | /bin/egrep -o -i 'link/ether\ ([0-9a-z]{2}:){5}[0-9a-z]{2}' | /bin/sed -e 's,link/ether\ ,,g')
_userdata_file="/var/lib/cloud/instance/user-data.txt"
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
LOCAL_IP_ADDRESS=$(curl -sq 169.254.169.254/latest/meta-data/network/interfaces/macs/${ETH0_MAC}/local-ipv4s/)
CWG=$(grep CLOUDWATCHGROUP ${_userdata_file} | sed 's/CLOUDWATCHGROUP=//g')
# LOGGING CONFIGURATION
BACKEND_MNT="/var/log/backend"
BACKEND_LOG="backend.log"
echo "Setting up backend session log in ${BACKEND_MNT}/${BACKEND_LOG}"
mkdir -p ${BACKEND_MNT}
BACKEND_LOGFILE="${BACKEND_MNT}/${BACKEND_LOG}"
BACKEND_LOGFILE_SHADOW="${BACKEND_MNT}/.${BACKEND_LOG}"
touch ${BACKEND_LOGFILE}
ln ${BACKEND_LOGFILE} ${BACKEND_LOGFILE_SHADOW}
mkdir -p /usr/bin/backend
touch /tmp/messages
chmod 770 /tmp/messages
export REGION ETH0_MAC CWG BACKEND_MNT BACKEND_LOG BACKEND_LOGFILE BACKEND_LOGFILE_SHADOW \
LOCAL_IP_ADDRESS INSTANCE_ID
}
function verify_dependencies(){
if [[ "a$(which aws)" == "a" ]]; then
pip install awscli
fi
echo "${FUNCNAME[0]} Ended"
}
function usage() {
echo "$0 <usage>"
echo " "
echo "options:"
echo -e "--help \t Show options for this script"
}
function chkstatus () {
if [ $? -eq 0 ]
then
echo "Script [PASS]"
else
echo "Script [FAILED]" >&2
exit 1
fi
}
function osrelease () {
OS=`cat /etc/os-release | grep '^NAME=' | tr -d \" | sed 's/\n//g' | sed 's/NAME=//g'`
if [ "${OS}" == "Ubuntu" ]; then
echo "Ubuntu"
elif [ "${OS}" == "Amazon Linux AMI" ] || [ "${OS}" == "Amazon Linux" ]; then
echo "AMZN"
elif [ "${OS}" == "CentOS Linux" ]; then
echo "CentOS"
else
echo "Operating System Not Found"
fi
echo "${FUNCNAME[0]} Ended" >> /var/log/cfn-init.log
}
function harden_ssh_security () {
# Allow ec2-user only to access this folder and its content
#chmod -R 770 /var/log/backend
#setfacl -Rdm other:0 /var/log/backend
# Make OpenSSH execute a custom script on logins
echo -e "\nForceCommand /usr/bin/backend/shell" >> /etc/ssh/sshd_config
cat <<'EOF' >> /usr/bin/backend/shell
BACKEND_mnt="/var/log/backend"
BACKEND_log="backend.log"
# Check that the SSH client did not supply a command. Only SSH to instance should be allowed.
export Allow_SSH="ssh"
export Allow_SCP="scp"
if [[ -z $SSH_ORIGINAL_COMMAND ]] || [[ $SSH_ORIGINAL_COMMAND =~ ^$Allow_SSH ]] || [[ $SSH_ORIGINAL_COMMAND =~ ^$Allow_SCP ]]; then
#Allow ssh to instance and log connection
if [ -z "$SSH_ORIGINAL_COMMAND" ]; then
/bin/bash
exit 0
else
$SSH_ORIGINAL_COMMAND
fi
log_shadow_file_location="${BACKEND_mnt}/.${BACKEND_log}"
log_file=`echo "$log_shadow_file_location"`
DATE_TIME_WHOAMI="`whoami`:`date "+%Y-%m-%d %H:%M:%S"`"
LOG_ORIGINAL_COMMAND=`echo "$DATE_TIME_WHOAMI:$SSH_ORIGINAL_COMMAND"`
echo "$LOG_ORIGINAL_COMMAND" >> "${BACKEND_mnt}/${BACKEND_log}"
log_dir="/var/log/backend/"
else
# The "script" program could be circumvented with some commands
# (e.g. bash, nc). Therefore, I intentionally prevent users
# from supplying commands.
echo "This backend supports interactive sessions only. Do not supply a command"
exit 1
fi
EOF
# Make the custom script executable
chmod a+x /usr/bin/backend/shell
release=$(osrelease)
if [ "${release}" == "CentOS" ]; then
semanage fcontext -a -t ssh_exec_t /usr/bin/backend/shell
fi
echo "${FUNCNAME[0]} Ended"
}
function amazon_os () {
echo "${FUNCNAME[0]} Started"
chown root:ec2-user /usr/bin/script
service sshd restart
echo -e "\nDefaults env_keep += \"SSH_CLIENT\"" >>/etc/sudoers
cat <<'EOF' >> /etc/bashrc
#Added by linux backend bootstrap
declare -rx IP=$(echo $SSH_CLIENT | awk '{print $1}')
EOF
echo " declare -rx BACKEND_LOG=${BACKEND_MNT}/${BACKEND_LOG}" >> /etc/bashrc
cat <<'EOF' >> /etc/bashrc
declare -rx PROMPT_COMMAND='history -a >(logger -t "ON: $(date) [FROM]:${IP} [USER]:${USER} [PWD]:${PWD}" -s 2>>${BACKEND_LOG})'
EOF
chown root:ec2-user ${BACKEND_MNT}
chown root:ec2-user ${BACKEND_LOGFILE}
chown root:ec2-user ${BACKEND_LOGFILE_SHADOW}
chmod 662 ${BACKEND_LOGFILE}
chmod 662 ${BACKEND_LOGFILE_SHADOW}
chattr +a ${BACKEND_LOGFILE}
chattr +a ${BACKEND_LOGFILE_SHADOW}
touch /tmp/messages
chown root:ec2-user /tmp/messages
#Install CloudWatch Log service on AMZN
yum update -y
yum install -y awslogs
echo "file = ${BACKEND_LOGFILE_SHADOW}" >> /tmp/groupname.txt
echo "log_group_name = ${CWG}" >> /tmp/groupname.txt
cat <<'EOF' >> ~/cloudwatchlog.conf
[/var/log/backend]
datetime_format = %b %d %H:%M:%S
buffer_duration = 5000
log_stream_name = {instance_id}
initial_position = start_of_file
EOF
LINE=$(cat -n /etc/awslogs/awslogs.conf | grep '\[\/var\/log\/messages\]' | awk '{print $1}')
END_LINE=$(echo $((${LINE}-1)))
head -${END_LINE} /etc/awslogs/awslogs.conf > /tmp/awslogs.conf
cat /tmp/awslogs.conf > /etc/awslogs/awslogs.conf
cat ~/cloudwatchlog.conf >> /etc/awslogs/awslogs.conf
cat /tmp/groupname.txt >> /etc/awslogs/awslogs.conf
export TMPREGION=$(grep region /etc/awslogs/awscli.conf)
sed -i.back "s/${TMPREGION}/region = ${REGION}/g" /etc/awslogs/awscli.conf
#Restart awslogs service
local OS=`cat /etc/os-release | grep '^NAME=' | tr -d \" | sed 's/\n//g' | sed 's/NAME=//g'`
if [ "$OS" == "Amazon Linux" ]; then # amazon linux 2
systemctl start awslogsd.service
systemctl enable awslogsd.service
else
service awslogs restart
chkconfig awslogs on
fi
#Run security updates
cat <<'EOF' >> ~/mycron
0 0 * * * yum -y update --security
EOF
crontab ~/mycron
rm ~/mycron
echo "${FUNCNAME[0]} Ended"
}
function ubuntu_os () {
chown syslog:adm /var/log/backend
chown root:ubuntu /usr/bin/script
cat <<'EOF' >> /etc/bash.bashrc
#Added by linux backend bootstrap
declare -rx IP=$(who am i --ips|awk '{print $5}')
EOF
echo " declare -rx BACKEND_LOG=${BACKEND_MNT}/${BACKEND_LOG}" >> /etc/bash.bashrc
cat <<'EOF' >> /etc/bash.bashrc
declare -rx PROMPT_COMMAND='history -a >(logger -t "ON: $(date) [FROM]:${IP} [USER]:${USER} [PWD]:${PWD}" -s 2>>${BACKEND_LOG})'
EOF
chown root:ubuntu ${BACKEND_MNT}
chown root:ubuntu ${BACKEND_LOGFILE}
chown root:ubuntu ${BACKEND_LOGFILE_SHADOW}
chmod 662 ${BACKEND_LOGFILE}
chmod 662 ${BACKEND_LOGFILE_SHADOW}
chattr +a ${BACKEND_LOGFILE}
chattr +a ${BACKEND_LOGFILE_SHADOW}
touch /tmp/messages
chown root:ubuntu /tmp/messages
#Install CloudWatch logs on Ubuntu
echo "file = ${BACKEND_LOGFILE_SHADOW}" >> /tmp/groupname.txt
echo "log_group_name = ${CWG}" >> /tmp/groupname.txt
cat <<'EOF' >> ~/cloudwatchlog.conf
[general]
state_file = /var/awslogs/state/agent-state
[/var/log/backend]
log_stream_name = {instance_id}
datetime_format = %b %d %H:%M:%S
EOF
cat /tmp/groupname.txt >> ~/cloudwatchlog.conf
curl https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py -O
export DEBIAN_FRONTEND=noninteractive
apt-get install -y python
chmod +x ./awslogs-agent-setup.py
./awslogs-agent-setup.py -n -r ${REGION} -c ~/cloudwatchlog.conf
#Install Unit file for Ubuntu 16.04
ubuntu=`cat /etc/os-release | grep VERSION_ID | tr -d \VERSION_ID=\"`
if [ "${ubuntu}" == "16.04" ]; then
cat <<'EOF' >> /etc/systemd/system/awslogs.service
[Unit]
Description=The CloudWatch Logs agent
After=rc-local.service
[Service]
Type=simple
Restart=always
KillMode=process
TimeoutSec=infinity
PIDFile=/var/awslogs/state/awslogs.pid
ExecStart=/var/awslogs/bin/awslogs-agent-launcher.sh --start --background --pidfile $PIDFILE --user awslogs --chuid awslogs &
[Install]
WantedBy=multi-user.target
EOF
fi
#Restart awslogs service
service awslogs restart
export DEBIAN_FRONTEND=noninteractive
apt-get install sysv-rc-conf -y
sysv-rc-conf awslogs on
#Restart SSH
service ssh stop
service ssh start
#Run security updates
apt-get install unattended-upgrades
echo "0 0 * * * unattended-upgrades -d" >> ~/mycron
crontab ~/mycron
rm ~/mycron
echo "${FUNCNAME[0]} Ended"
}
function cent_os () {
echo -e "\nDefaults env_keep += \"SSH_CLIENT\"" >>/etc/sudoers
echo -e "#Added by the Linux Bastion Bootstrap\ndeclare -rx IP=$(echo ${SSH_CLIENT} | awk '{print $1}')" >> /etc/bashrc
echo "declare -rx BACKEND_LOG=${BACKEND_MNT}/${BACKEND_LOG}" >> /etc/bashrc
cat <<- EOF >> /etc/bashrc
declare -rx PROMPT_COMMAND='history -a >(logger -t "ON: $(date) [FROM]:${IP} [USER]:${USER} [PWD]:${PWD}" -s 2>>${BACKEND_LOG})'
EOF
chown root:centos ${BACKEND_MNT}
chown root:centos /usr/bin/script
chown root:centos /var/log/backend/backend.log
chmod 770 /var/log/backend/backend.log
touch /tmp/messages
chown root:centos /tmp/messages
restorecon -v /etc/ssh/sshd_config
/bin/systemctl restart sshd.service
# Install CloudWatch Log service on Centos Linux
centos=`cat /etc/os-release | grep VERSION_ID | tr -d \VERSION_ID=\"`
if [ "${centos}" == "7" ]; then
echo "file = ${BACKEND_LOGFILE_SHADOW}" >> /tmp/groupname.txt
echo "log_group_name = ${CWG}" >> /tmp/groupname.txt
cat <<EOF >> ~/cloudwatchlog.conf
[general]
state_file = /var/awslogs/state/agent-state
use_gzip_http_content_encoding = true
logging_config_file = /var/awslogs/etc/awslogs.conf
[/var/log/backend]
datetime_format = %Y-%m-%d %H:%M:%S
file = /var/log/messages
buffer_duration = 5000
log_stream_name = {instance_id}
initial_position = start_of_file
EOF
cat /tmp/groupname.txt >> ~/cloudwatchlog.conf
curl https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py -O
chmod +x ./awslogs-agent-setup.py
./awslogs-agent-setup.py -n -r ${REGION} -c ~/cloudwatchlog.conf
cat << EOF >> /etc/systemd/system/awslogs.service
[Unit]
Description=The CloudWatch Logs agent
After=rc-local.service
[Service]
Type=simple
Restart=always
KillMode=process
TimeoutSec=infinity
PIDFile=/var/awslogs/state/awslogs.pid
ExecStart=/var/awslogs/bin/awslogs-agent-launcher.sh --start --background --pidfile $PIDFILE --user awslogs --chuid awslogs &
[Install]
WantedBy=multi-user.target
EOF
service awslogs restart
chkconfig awslogs on
else
chown root:centos /var/log/backend
yum update -y
yum install -y awslogs
export TMPREGION=`cat /etc/awslogs/awscli.conf | grep region`
sed -i.back "s/${TMPREGION}/region = ${REGION}/g" /etc/awslogs/awscli.conf
echo "file = ${BACKEND_LOGFILE_SHADOW}" >> /tmp/groupname.txt
echo "log_group_name = ${CWG}" >> /tmp/groupname.txt
cat <<EOF >> ~/cloudwatchlog.conf
[/var/log/backend]
datetime_format = %b %d %H:%M:%S
buffer_duration = 5000
log_stream_name = {instance_id}
initial_position = start_of_file
EOF
export TMPGROUP=`cat /etc/awslogs/awslogs.conf | grep ^log_group_name`
export TMPGROUP=`echo ${TMPGROUP} | sed 's/\//\\\\\//g'`
sed -i.back "s/${TMPGROUP}/log_group_name = ${CWG}/g" /etc/awslogs/awslogs.conf
cat ~/cloudwatchlog.conf >> /etc/awslogs/awslogs.conf
cat /tmp/groupname.txt >> /etc/awslogs/awslogs.conf
yum install ec2-metadata -y
export TMPREGION=`cat /etc/awslogs/awscli.conf | grep region`
sed -i.back "s/${TMPREGION}/region = ${REGION}/g" /etc/awslogs/awscli.conf
sleep 3
service awslogs stop
sleep 3
service awslogs start
chkconfig awslogs on
fi
#Run security updates
echo "0 0 * * * yum -y update --security" > ~/mycron
crontab ~/mycron
rm ~/mycron
echo "${FUNCNAME[0]} Ended"
}
function prevent_process_snooping() {
# Prevent backend host users from viewing processes owned by other users.
mount -o remount,rw,hidepid=2 /proc
awk '!/proc/' /etc/fstab > temp && mv temp /etc/fstab
echo "proc /proc proc defaults,hidepid=2 0 0" >> /etc/fstab
echo "${FUNCNAME[0]} Ended"
}
##################################### End Function Definitions
# Call checkos to ensure platform is Linux
checkos
# Verify dependencies are installed.
verify_dependencies
# Assuming it is, setup environment variables.
setup_environment_variables
# Read the options from cli input
TEMP=`getopt -o h: --long help -n $0 -- "$@"`
eval set -- "${TEMP}"
# extract options and their arguments into variables.
while true; do
case "$1" in
-h | --help)
usage
exit 1
;;
--)
break
;;
*)
break
;;
esac
done
release=$(osrelease)
# Ubuntu Linux
if [ "${release}" == "Ubuntu" ]; then
#Call function for Ubuntu
ubuntu_os
# AMZN Linux
elif [ "${release}" == "AMZN" ]; then
#Call function for AMZN
amazon_os
# CentOS Linux
elif [ "${release}" == "CentOS" ]; then
#Call function for CentOS
cent_os
else
echo "[ERROR] Unsupported Linux Bastion OS"
exit 1
fi
prevent_process_snooping
echo "Bootstrap complete."
| true
|
5007eceb9e8bc030c62265ca099d5045d6e1eeb6
|
Shell
|
TejaAmbati-02/talks
|
/frp/fig2pdf.sh
|
UTF-8
| 5,224
| 4.03125
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/sh
###############################################################################
# #
# Converts a .fig file into an .eps file suitable for including #
# in a latex document via, say, the graphicx package. #
# #
# This program typesets any latex commands (as "specially flagged" text) #
# within foo.fig #
# #
# The argument of this program is the name of #
# a fig file "foo.fig", or simply "foo" #
# #
# The probgram looks for an optional file foo.preamble in the same #
# directory as foo.fig, where additional #
# latex preamble commands such as "\newcommand{\es}{\emptyset}" can be #
# placed. #
# This script relies on fig2dev, latex, dvips, ps2eps, and eps2pdf #
# These are common in UNIX (LINUX, MAC OS X) #
# It assumes your temp directory is /tmp, but it cleans up after itself. #
# #
# Error handling and help have not been implemented. #
# #
# This was written by Luis Goddyn August 2004 #
# and updated November 2011. #
# It may be freely distributed or modified with comment #
# provided this header remains intact. #
# #
# It comes without any guarantee, warantee etc. #
# #
###############################################################################
#echo "Starting shell script [$0]."
if [ $# -eq 0 ]
then
echo "Syntax: $0 file.fig, or simply $0 file"
exit 1
fi
#extract file base filename if .fig extension used
fileBase=`echo $1 | sed 's/\.fig$//'`
fileName=${fileBase}.fig
curDir=`pwd`
if [ ! -f $fileName ]
then
echo "Input file [${fileName}] not found - Aborting"
exit 1
fi
tempBase="/tmp/a_$$_fig2pdf"
echo "Temporary files start with ${tempBase}"
#if [ -f ${fileBase}.eps ]
if [ -f ${fileBase}.pdf ]
then
# echo "Output file [${fileBase}.eps] already exists."
echo "Output file [${fileBase}.pdf] already exists."
echo "Okay to overwrite? ( y/n ) : \c"
read answer
# echo ""
if [ "$answer" != "y" ] && [ "$answer" != "Y" ] && [ "$answer" != "yes" ] && [ "$answer" != "Yes" ]
then
echo "Aborting"
exit 1
fi
fi
unset noclobber;
echo "Generating base .ps from .fig"
fig2dev -L pstex ${fileBase}.fig > ${tempBase}.pstex_t
#generate .tex commands from .fig using "pens specification in the .ps file
echo "Generating .tex commands"
fig2dev -L pstex_t -p ${tempBase}.pstex_t ${fileBase}.fig > ${tempBase}.temptex
echo "Generating latex file"
printf '%s\n' '\documentclass{article}' >> ${tempBase}.tex
printf '%s\n' '\usepackage{graphicx,epsfig,color}' >> ${tempBase}.tex
printf '%s\n' '\pagestyle{empty}' >> ${tempBase}.tex
if [ -f ${fileBase}.preamble ]
then
echo "Including preamble commands in [${fileBase}.preamble]"
printf '%s\n' "\input{${curDir}/${fileBase}.preamble}" >> ${tempBase}.tex
fi
printf '%s\n' '\begin{document}' >> ${tempBase}.tex
printf '%s\n' "\input{${tempBase}.temptex}" >> ${tempBase}.tex
printf '%s\n' '\end{document}' >> ${tempBase}.tex
echo "Starting latex"
#(cd /tmp; latex ${tempBase}.tex ; )
(cd /tmp; latex ${tempBase}.tex > /dev/null ; ) #Makes things less verbose
echo "Starting dvips"
dvips -E -q -o ${tempBase}.ps ${tempBase}.dvi
echo "Starting ps2eps"
#ps2eps ${tempBase}.ps
ps2eps -B -l ${tempBase}.ps #Slightly extends BoundingBox
##The following may be uncommented if you would like a .pdf file made (for pdflatex)
##You should also uncomment the /bin/rm below
##BoundingBox info may be corrupted: See
## http://phaseportrait.blogspot.com/2007/06/bounding-boxes-and-eps-to-pdf.html
## if you have trouble.
#echo "Starting ps2pdf"
#ps2pdf -dEPSCrop ${tempBase}.eps ${tempBase}.pdf
echo "Starting epstopdf"
epstopdf -outfile=${tempBase}.pdf ${tempBase}.eps
echo "Writing to ${fileBase}.pdf"
#cp ${tempBase}.eps ${fileBase}.eps
#cp ${tempBase}.ps ${fileBase}.ps
cp ${tempBase}.pdf ${fileBase}.pdf
set noclobber;
echo "Cleaning up"
/bin/rm ${tempBase}.temptex ${tempBase}.tex ${tempBase}.pstex_t
/bin/rm ${tempBase}.dvi ${tempBase}.log ${tempBase}.aux
/bin/rm ${tempBase}.ps
/bin/rm ${tempBase}.eps
/bin/rm ${tempBase}.pdf
| true
|
ca640c14d30c92d403439982fbdbc79272f50a14
|
Shell
|
jasheppa5/SRI-Monitor
|
/deploy.sh
|
UTF-8
| 5,430
| 3.46875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
AWS_PROFILE=
AWS_REGION=
BATCH_COMPUTE_EC2_SUBNET="172.31.0.0/20"
BATCH_COMPUTE_VPC_CIDR="172.31.0.0/16"
STACK_NAME=SubResourceIntegrityMonitor
if [ "$AWS_REGION" != "" ]
then
AWS_REGION_STR="--region ${AWS_REGION}"
else
AWS_REGION_STR=""
fi
if [ "$AWS_PROFILE" != "" ]
then
AWS_PROFILE_STR="--profile ${AWS_PROFILE}"
else
AWS_PROFILE_STR=""
fi
FIRST_RUN=0
BUCKET_NAME="$(aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} s3api list-buckets --query 'Buckets[?starts_with(Name, `srimonitor`) == `true`].Name' --output text)"
if [ "$BUCKET_NAME" == "" ]
then
FIRST_RUN=1
BUCKET_NAME="srimonitor-$(date +%s)"
fi
INFO=$(tput setaf 3)
FAILURE=$(tput setaf 1)
SUCCESS=$(tput setaf 2)
WARNING=$(tput setaf 4)
END=$(tput sgr0)
#################################################################################
if [ $FIRST_RUN -eq 1 ]; then
printf "${INFO}Mounting S3 Bucket${END}\n"
printf "${INFO}....Please wait.${END}\n"
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} s3 mb s3://${BUCKET_NAME} >> srimonitor.log 2>&1
if [ $? -ne 0 ]
then
printf "${FAILURE}....Failed to mount ${BUCKET_NAME} S3 Bucket! See srimonitor.log for details.${END}\n"
exit
else
printf "${SUCCESS}....Successfully mounted ${BUCKET_NAME} S3 Bucket!${END}\n"
fi
#################################################################################
printf "${INFO}Adding S3 LifeCycle Policy${END}\n"
printf "${INFO}....Please wait.${END}\n"
#3 Create 30 day expiration for results
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} s3api put-bucket-lifecycle \
--bucket ${BUCKET_NAME} \
--lifecycle-configuration '{"Rules":[{"ID":"PurgeAfter30Days","Prefix":"alerts/","Status":"Enabled","Expiration":{"Days":30}}]}' >> srimonitor.log 2>&1
if [ $? -ne 0 ]
then
printf "${FAILURE}....Failed to add S3 Bucket LifeCycle Policy! See srimonitor.log for details.${END}\n"
exit
else
printf "${SUCCESS}....Successfully added S3 Bucket LifeCycle Policy!${END}\n"
fi
fi
#################################################################################
printf "${INFO}Uploading URLs to S3${END}\n"
printf "${INFO}....Please wait.${END}\n"
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} s3 cp urls/ s3://${BUCKET_NAME}/urls --recursive --exclude '*' --include "*.csv" >> srimonitor.log 2>&1
if [ $? -ne 0 ]
then
printf "${FAILURE}....Failed to upload urls (to monitor) to the S3 Bucket! See srimonitor.log for details.${END}\n"
exit
else
printf "${SUCCESS}....Successfully uploaded urls (to monitor) to the S3 Bucket!${END}\n"
fi
#################################################################################
printf "${INFO}Create Packaged CloudFormation Template${END}\n"
printf "${INFO}....Please wait.${END}\n"
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} cloudformation package \
--template-file template.yaml \
--s3-bucket ${BUCKET_NAME} \
--s3-prefix src \
--output-template-file packaged-template.yaml >> srimonitor.log 2>&1
if [ $? -ne 0 ]
then
printf "${FAILURE}....Failed to create packaged CloudFormation template! See srimonitor.log for details.${END}\n"
exit
else
printf "${SUCCESS}....Successfully created packaged CloudFormation template!${END}\n"
fi
#################################################################################
printf "${INFO}Deploying AWS CloudFormation Template${END}\n"
printf "${INFO}....Please wait.${END}\n"
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} cloudformation deploy \
--stack-name $STACK_NAME \
--template-file packaged-template.yaml \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides EC2SUBNET=$BATCH_COMPUTE_EC2_SUBNET VPCRANGE=$BATCH_COMPUTE_VPC_CIDR BUCKETNAME=$BUCKET_NAME >> srimonitor.log 2>&1
if [ $? -eq 1 ]
then
printf "${FAILURE}....Failed to deploy CloudFormation template! See srimonitor.log for details.${END}\n"
exit
else
printf "${SUCCESS}....Successfully deployed CloudFormation template!${END}\n"
fi
#################################################################################
if [ $FIRST_RUN -eq 1 ]; then
printf "${INFO}Generating Access Key${END}\n"
printf "${INFO}....Please wait.${END}\n"
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} iam create-access-key --user-name SRIMonitorUser
if [ $? -ne 0 ]
then
exit
else
printf "${SUCCESS}....Successfully created access key for SRIMonitorUser!${END}\n"
printf "${WARNING}Please safely store the SecretAccessKey and AccessKeyID output above. You'll use this key to programmatically access query results stored in S3.${END}\n"
fi
#################################################################################
printf "\n\n${INFO}This appears to be your first time setting up SRIMonitor - Would you like to go ahead and run an initial scan of the URLs to create a baseline?.${END}\n"
read -p "y/n: " answer
if [ "$answer" == "Y" ] || [ "$answer" == "y" ]
then
printf "${INFO}....Please wait.${END}\n"
aws ${AWS_PROFILE_STR} ${AWS_REGION_STR} lambda invoke --function-name 'SRIMonitor-Dispatch' - >> srimonitor.log 2>&1
if [ $? -ne 0 ]
then
printf "${FAILURE}....Failed to invoke 'SRIMonitor-Dispatch' lambda function! Please see srimonitor.log for details.${END}\n"
exit
else
printf "${SUCCESS}....Successfully added URLs to the download and analysis queue!${END}\n"
fi
fi
fi
printf "\n${SUCCESS}....Deployment Complete!${END}\n"
| true
|
2047e4ce2239c8fd459da377fbb6e854b6cfcf2e
|
Shell
|
QuitosMM/NGINX-Demos
|
/zookeeper-demo/provision.sh
|
UTF-8
| 278
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ ${HOST_IP} ]; then
ipaddr=${HOST_IP}
else
ipaddr=`/sbin/ifconfig eth0 | grep "inet addr" | awk -F: '{print $2}' | awk '{print $1}'`
fi
echo "export HOST_IP=$ipaddr" | tee -a ~/.bash_aliases
. ~/.bash_aliases
/usr/local/bin/docker-compose up -d
| true
|
ef4c9df03d62a9ef049ac434efde3fba12a5c685
|
Shell
|
panchohumeres/superJupyter
|
/bi-init.sh
|
UTF-8
| 1,139
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -a
. ./.env
set +a
echo "creating certs dir: "${HOST_DATA_PATH}"/certs"
mkdir ${HOST_DATA_PATH}/certs
echo "creating certbot dir"
mkdir ${CERTBOT_PATH}
echo "changing ownership of certbot dir (for docker execution)"
sudo chmod -R g+rwx ${CERTBOT_PATH}
sudo chgrp -R ${UID} ${CERTBOT_PATH}
sudo chown -R ${UID} ${CERTBOT_PATH}
#CREATE CERTIFICATES FOR INTERNAL ELASTICSEARCH-KIBANA COMMUNICATION (TSL LAYER)
#echo "creating certificates for TSL internal layer of stack"
#docker-compose -f ./certs/create-certs.yml run --rm create_certs
#echo "changing ownership of certificates files (for docker execution)"
echo "creating redis dir: "${HOST_DATA_PATH}"/redis"
mkdir ${HOST_DATA_PATH}/redis
echo "creating postgres dir"
mkdir ${CERTBOT_PATH}/postgres
echo "creating superset dir"
mkdir ${CERTBOT_PATH}/superset
echo "changing ownership of superset dirs (for docker execution)"
echo "creating ETLcache dir"
mkdir ${CERTBOT_PATH}/ETLcache
echo "changing ownership of ETLcache dirs (for docker execution)"
sudo chmod -R g+rwx ${HOST_DATA_PATH}
sudo chgrp -R ${UID} ${HOST_DATA_PATH}
sudo chown -R ${UID} ${HOST_DATA_PATH}
| true
|
d8a9d71dcd75b4959397f99a206a46f5d403464d
|
Shell
|
smalnha/oldscripts
|
/mail/printEmail.sh
|
UTF-8
| 221
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "printEmail.sh called: $0 $* in directory $PWD"
if [ "" ] && which gv; then
# 2-up
a2ps -Email --medium=Letter --output=- | gv -
else
# 1-up
a2ps -Email -1 --medium=Letter --output=$HOME/email.ps
fi
| true
|
063b742e4cc49158687ed8949398f31c16c5c80b
|
Shell
|
daisukei777/tf-aks-frontdoor-cluster
|
/cluster/cleanup.sh
|
UTF-8
| 833
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Destroy Shared Resources
terraform init \
-backend-config="storage_account_name=${TF_VAR_rp_prefix}tfstate" \
-backend-config="container_name=tfstate-cluster" \
-backend-config="key=terraform.tfstate" \
-reconfigure
terraform destroy -auto-approve
RET=$?
# Delete Resource Group for Remote State
RESOURCE_GROUP_NAME=${TF_VAR_rp_prefix}-rp-tfstate-rg
[ ${RET} -eq 0 ] && az group delete -n $RESOURCE_GROUP_NAME -y
# Delete Service Principal
az ad sp delete --id $(az ad sp show --id https://${TF_VAR_rp_prefix}-rp-sp-aks-location-1 --query appId --output tsv)
az ad sp delete --id $(az ad sp show --id https://${TF_VAR_rp_prefix}-rp-sp-aks-location-2 --query appId --output tsv)
az ad sp delete --id $(az ad sp show --id https://${TF_VAR_rp_prefix}-rp-sp-aks-location-3 --query appId --output tsv)
| true
|
2b39cb9462b0006a18df73a105879cc15867599f
|
Shell
|
gtraines/robot1
|
/scripts/install_arduino.sh
|
UTF-8
| 1,123
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
echo "/*========================*/"
echo "Downloading Arduino 1.8.5"
echo "/*========================*/"
cd ~/Source
wget https://github.com/arduino/Arduino/releases/download/1.8.5/Arduino-1.8.5.tar.xz
tar --xz -xf Arduino-1.8.5.tar.xz
cd Arduino-1.8.5/build/
cp ~/Source/configure-it-oot/build_arduino_arm.sh .
wget https://downloads.arduino.cc/libastylej-2.05.1-3.zip
wget https://downloads.arduino.cc/liblistSerials/liblistSerials-1.4.0.zip
wget https://downloads.arduino.cc/tools/arduino-builder-linuxarm-1.3.25.tar.bz2
cd shared/
wget https://downloads.arduino.cc/reference-1.6.6-3.zip
wget https://downloads.arduino.cc/Galileo_help_files-1.6.2.zip
wget https://downloads.arduino.cc/Edison_help_files-1.6.2.zip
cd ~/Source/Arduino-1.8.5/build/linux/
wget https://downloads.arduino.cc/tools/avr-gcc-4.9.2-atmel3.5.4-arduino2-armhf-pc-linux-gnu.tar.bz2
wget https://downloads.arduino.cc/tools/avrdude-6.3.0-arduino9-armhf-pc-linux-gnu.tar.bz2
cd ~/Source/Arduino-1.8.5/build/
chmod +x ~/Source/Arduino-1.8.5/build/build_arduino_arm.sh
bash ~/Source/Arduino-1.8.5/build/build_arduino_arm.sh
| true
|
2cfffcf0e7c75a916385f69f6d64b2c0cb569402
|
Shell
|
luckypoem/tup
|
/test/t7027-move-dir-out.sh
|
UTF-8
| 337
| 2.765625
| 3
|
[] |
no_license
|
#! /bin/sh -e
# Make sure we can move a directory outside of tup and have it be deleted.
. ./tup.sh
mkdir tuptest
cd tuptest
re_init
tup monitor
mkdir foo
cd foo
echo 'int main(void) {return 0;}' > foo.c
echo ': foreach *.c |> gcc %f -o %o |> %B' > Tupfile
cd ..
update
mv foo ..
stop_monitor
update
tup_object_no_exist . foo
eotup
| true
|
cb6466c274f6d95332fe91b6750fa51ba1802097
|
Shell
|
bneff84/pop-os-bootstrap
|
/helpers/mageclean
|
UTF-8
| 1,163
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
PWD="$(pwd)"
SCRIPT=$(readlink -f "$0")
BASEDIR="$( cd "$(dirname "$SCRIPT")" ; pwd -P )"
REDIS="$(command -v redis-cli)"
if [ -f "$PWD/app/etc/local.xml" ]; then
#Magento 1
echo "Detected Magento Version 1.x"
echo "Flushing cache via magerun cache:flush"
"$BASEDIR/magerun" cache:clean
if [ -x "$REDIS" ]; then
#Redis is installed, flush it
echo "Detected Redis is installed, issuing flushall"
"$REDIS" flushall
fi
elif [ -f "$PWD/app/etc/env.php" ]; then
#Magento 2
echo "Detected Magento Version 2.x"
echo "Flushing cache via magerun2 cache:flush"
"$BASEDIR/magerun2" cache:clean
if [ -d "$PWD/var/view_preprocessed" ]; then
echo "Removing $PWD/var/view_preprocessed"
rm -rf "$PWD/var/view_preprocessed"
fi
if [ -d "$PWD/pub/static" ]; then
echo "Removing $PWD/pub/static"
rm -rf "$PWD/pub/static"
fi
if [ -x "$REDIS" ]; then
#Redis is installed, flush it
echo "Detected Redis is installed, issuing flushall"
"$REDIS" flushall
fi
else
echo "Could not detect a Magento version."
echo "Ensure that you run the mageclean command from the Magento site root."
fi
echo "Done!"
| true
|
5fa215091f04c51256ab496081200fe407df6f95
|
Shell
|
aponsero/454meta_simulator
|
/scripts/config.sh
|
UTF-8
| 964
| 2.671875
| 3
|
[] |
no_license
|
export CWD=$PWD
# where programs are
export BIN_DIR="/rsgrps/bhurwitz/hurwitzlab/bin"
# user configs
export OUT_DIR="/rsgrps/bhurwitz/alise/my_scripts/454meta_simulator/test_pipeline"
export NB_READ=100000
export PROFILE="/rsgrps/bhurwitz/alise/my_scripts/454meta_simulator/test_pipeline/test_uniq.txt"
export DB_DIR="/rsgrps/bhurwitz/alise/my_scripts/454meta_simulator/test_pipeline/ref"
export MODEL_CHOICE=3 #Choose error model 1, 2 or 3
# scripts configs
export SCRIPT_DIR="$PWD/scripts"
export WORKER_DIR="$SCRIPT_DIR/workers"
# user info
export MAIL_USER="aponsero@email.arizona.edu"
export MAIL_TYPE="bea"
export GROUP="bhurwitz"
export QUEUE="standard"
#
# --------------------------------------------------
function init_dir {
for dir in $*; do
if [ -d "$dir" ]; then
rm -rf $dir/*
else
mkdir -p "$dir"
fi
done
}
# --------------------------------------------------
function lc() {
wc -l $1 | cut -d ' ' -f 1
}
| true
|
8833e9abd8e675cc84491babdd22d16694ad0949
|
Shell
|
mwichmann/lsb-checkers
|
/libchk/test_libchk
|
UTF-8
| 765
| 3.59375
| 4
|
[
"Artistic-1.0"
] |
permissive
|
#!/bin/sh
# Regression tests for libchk
# Copyright (C) 2001 The Free Standards Group Inc.
# Chris Yeoh <cyeoh@au1.ibm.com>
# Try runing lsblibchk
printf "Running lsblibchk..."
./lsblibchk > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "lsblibchk failed to run properly"
exit 1
else
# We can't really expect that all of the tests pass, as we may
# build on systems which are not lsb compliant themselves
# So check that the journal file at least has an end marker
# obviously, sanity-testing could be improved beyond this...
LAST_CODE=`tail -1 journal.libchk | cut -f1 -d'|'`
if [ "$LAST_CODE" -ne "900" ]; then
echo Journal file is incomplete
exit 1
else
rm -f $JOURNAL_FILE
echo OK
fi
fi
echo
echo All tests succeeded
exit 0
| true
|
8ec25a57ac46801c489b605c9f942388785816e7
|
Shell
|
jayp16-cuete/alprazolam
|
/wireguard/gce.sh
|
UTF-8
| 2,971
| 3.671875
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# currently only supports Google Compute Engine
function generate_port
{
while :; do
local port=$(shuf -i 30000-60000 -n 1)
ss -lau | grep $port > /dev/null
if [[ $? == 1 ]] ; then
echo "$port"
break 2;
fi
done
}
if [ "$EUID" -ne 0 ]; then
echo "you must run this as root"
exit 1
fi
if [[ -e /etc/debian_version ]]; then
source /etc/os-release
OS=$ID # debian-based
else
echo "currently only debian-based system is supported, sorry"
exit 1
fi
WG_INTERFACE="$(ip -4 route ls | grep default | grep -Po '(?<=dev )(\S+)' | head -1)"
WG_CONFIG_NAME="wg0"
WG_IPV4_ADDR="192.71.0.1/16"
WG_LOCAL_PORT=$(generate_port)
CLIENT_IPV4_ADDR="192.71.0.2/24"
CLIENT_WG_IPV4="192.71.0.2"
DNS="1.1.1.1, 1.0.0.1"
# currently only support Google Compute Engine because we rely on this secure way to fetch machine's public IP
PUBLIC_IP=$(curl -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
ENDPOINT="$PUBLIC_IP:$WG_LOCAL_PORT"
DEBIAN_FRONTEND=noninteractive add-apt-repository ppa:wireguard/wireguard -y
apt-get update
apt-get install -y "linux-headers-$(uname -r)" wireguard iptables qrencode
# Make sure the directory exists (this does not seem the be the case on fedora)
mkdir /etc/wireguard > /dev/null 2>&1
# Generate key pair for the server
PRIVATE_KEY=$(wg genkey)
PUBLIC_KEY=$(echo "$PRIVATE_KEY" | wg pubkey)
# Generate key pair for the client
CLIENT_PRIVATE_KEY=$(wg genkey)
CLIENT_PUBLIC_KEY=$(echo "$CLIENT_PRIVATE_KEY" | wg pubkey)
# Add server interface
echo "[Interface]
Address = $WG_IPV4_ADDR
ListenPort = $WG_LOCAL_PORT
PrivateKey = $PRIVATE_KEY
SaveConfig = true
MTU = 1360
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o $WG_INTERFACE -j MASQUERADE
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o $WG_INTERFACE -j MASQUERADE" > "/etc/wireguard/$WG_CONFIG_NAME.conf"
# Add the client as a peer to the server
echo "[Peer]
PublicKey = $CLIENT_PUBLIC_KEY
AllowedIPs = 192.71.0.0/24" >> "/etc/wireguard/$WG_CONFIG_NAME.conf"
# Create client file with interface
echo "[Interface]
PrivateKey = $CLIENT_PRIVATE_KEY
Address = $CLIENT_IPV4_ADDR
DNS = $DNS" > "$HOME/$WG_CONFIG_NAME-client.conf"
# Add the server as a peer to the client
echo "[Peer]
PublicKey = $PUBLIC_KEY
Endpoint = $ENDPOINT
AllowedIPs = 0.0.0.0/0
MTU = 1360
PersistentKeepalive = 16" >> "$HOME/$WG_CONFIG_NAME-client.conf"
chmod 600 -R /etc/wireguard/
# Enable routing on the server
echo "net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1" > /etc/sysctl.d/wg.conf
sysctl --system
systemctl start "wg-quick@$WG_CONFIG_NAME"
systemctl enable "wg-quick@$WG_CONFIG_NAME"
echo "here is the QR-code for you client (e.g. iPhone)"
qrencode -t ansiutf8 < "$HOME/$WG_CONFIG_NAME-client.conf"
| true
|
fffbfc1b7c03d132a6d71b7983896452641283e5
|
Shell
|
yenalp/vagrant_demo
|
/scripts/app/commands/create_test_symlinks/extensions/symlinks.sh
|
UTF-8
| 2,088
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
create_test_symlinks.symlinks.init() {
create_test_symlinks.symlinks.create() {
local __test_dir="$1"
local __component="$2"
local __feature_length="${#__features[@]}"
logger.beginTask --message \
"Creating Symlinks"
logger.step --message \
"Removing Symlinks" \
--number "1" \
--total "2"
# remove symlinks in component directory
create_test_symlinks.symlinks.removeSymlinks \
"${__test_dir}/${__component}"
logger.step --message \
"Adding features" \
--number "2" \
--total "2"
local __i
local __feature_counter=1
for __i in "${!__features[@]}"; do
local __key="${__i}"
local __feature_path="${__features[${__key}]}"
local __source_path="${__test_dir}/${__feature_path}"
local __dest_path="${__test_dir}/${__component}/${__key}.feature"
logger.step --message \
"Adding feature \"${__key}\"" \
--number "2-${__feature_counter}" \
--total "${__feature_length}"
logger.info --message \
"Source Path: ${__source_path}"
logger.info --message \
"Dest Path: ${__dest_path}"
ln -s "${__source_path}" "${__dest_path}" || {
logger.warning --message \
"Not good - could not create symbolic link"
}
logger.info --message \
"Dest Path: ${__dest_path}"
((__feature_counter++))
done
logger.endTask --message \
"Creating Symlinks"
}
create_test_symlinks.symlinks.removeSymlinks() {
local __line
find "$1" -name '*.feature' -type l | while read __line
do
rm -v "${__line}" || {
logger.warning --message \
"Could not remove symbolic ${$i}/${__line}"
}
done
}
}
| true
|
88965cf38baa1fe1aea2a7db1c00da92c15b34ec
|
Shell
|
svmheroku/cj4svm
|
/predictions/fx_past/fx_past_png.bash
|
UTF-8
| 716
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
# fx_past_png.bash
# I use this script to create a png file for the splash page.
# The png file shows a weekly summary of fx-DanBot performance.
# Assume that fx_past.sql (via index_spec.bash) has been run and CSV data landed here:
# /tmp/fx_sunday_l.txt
# /tmp/fx_sunday_s.txt
set -x
cd /pt/s/rl/cj/predictions/fx_past/
echo 'WK, WEEK_OF, RROWNUM, PREDICTION_COUNT, SUM_G5N, CUM_SUM' > /tmp/fx_sunday_l.csv
echo 'WK, WEEK_OF, RROWNUM, PREDICTION_COUNT, SUM_G5N, CUM_SUM' > /tmp/fx_sunday_s.csv
grep '^201' /tmp/fx_sunday_l.txt >> /tmp/fx_sunday_l.csv
grep '^201' /tmp/fx_sunday_s.txt >> /tmp/fx_sunday_s.csv
/usr/bin/R -f fx_past_png.r
cp /tmp/fx_sunday.png /pt/s/rl/svm/public/images/
exit 0
| true
|
44063862a34557d6eefd3aa4c78f8d5d949217c4
|
Shell
|
SaltyGrandpa/Shell-Scripts
|
/ClamAV-cron/clamav.sh
|
UTF-8
| 1,434
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#Update this variable to be your email address
email_address="youremail@domain.com"
return_code=0
# Check for root privileges
if [[ $EUID -ne 0 ]]; then
echo "Virus scan: only root can do that" 1>&2
exit 1
fi
# Display starting message
echo "Starting ClamAV automated scan..." ; echo ""
# Dump old ClamAV log (a permanent one is created if infections are found)
echo "Cleaning old log files..."
clamlog="/var/log/virus-scan.log"
sudo rm -f "$clamlog"
sudo touch "$clamlog"
sudo chmod 640 "$clamlog"
echo "Starting system scan..."
sudo clamscan -r / --log="$clamlog"
return_code=$?
# Echo scan results
if [ $return_code -ne 0 ] && [ $return_code -ne 1 ]; then
echo "Failed to complete virus scan"
else
echo ""; echo "";
echo -n "Virus scan completed successfully, "
#Determine if any infections were found
if sudo grep -rl 'Infected files: 0' "$clamlog" > /dev/null 2>&1; then
echo "No infections detected."; echo ""
else
#If detections are found, create a permanent log file and email the results
echo "INFECTIONS DETECTED!"
virus_results="/var/log/virus-scan-results_$(date +%F_%R).log"
sudo touch "$virus_results"
sudo chmod 640 "$virus_results"
tail -n 10 "$clamlog" >> "$virus_results"
echo "" >> "$virus_results"
grep -i ' FOUND' "$clamlog" >> "$virus_results"
mail -s 'ClamAV: Infections Detected!' "$email_address" < "$virus_results"
fi
fi
exit $return_code
| true
|
2fed9cfc5143a01f6e32504b3bc2f4070eb30337
|
Shell
|
cnlubo/auto_lnmp
|
/function/install/Tengine.sh
|
UTF-8
| 8,252
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2164
#---------------------------------------------------------------------------
# @Author: ak47(454331202@qq.com)
# @file_name: Tengine.sh
# @Desc
#----------------------------------------------------------------------------
Tengine_Dep_Install(){
# echo -e "${CMSG}[ Openssl-${openssl_latest_version:?} ]***********************************>>${CEND}\n"
# # openssl
# # shellcheck disable=SC2034
# cd ${script_dir:?}/src
# src_url=https://www.openssl.org/source/openssl-${openssl_latest_version:?}.tar.gz
# [ ! -f openssl-${openssl_latest_version:?}.tar.gz ] && Download_src
# [ -d openssl-${openssl_latest_version:?} ] && rm -rf openssl-${openssl_latest_version:?}
# tar xf openssl-${openssl_latest_version:?}.tar.gz
echo ""
}
Install_Tengine(){
# echo -e "${CMSG}[create user and group ]***********************************>>${CEND}\n"
#
# grep ${run_user:?} /etc/group >/dev/null 2>&1
# if [ ! $? -eq 0 ]; then
# groupadd $run_user
# fi
# id $run_user >/dev/null 2>&1
# if [ ! $? -eq 0 ]; then
# useradd -g $run_user -M -s /sbin/nologin $run_user
# fi
echo -e "${CMSG}[prepare Tengine install ]***********************************>>${CEND}\n"
[ -d ${tengine_install_dir:?} ] && rm -rf ${tengine_install_dir:?}
cd ${script_dir:?}/src
# shellcheck disable=SC2034
src_url=http://tengine.taobao.org/download/tengine-${tengine_install_version:?}.tar.gz
[ ! -f nginx-${tengine_install_version:?}.tar.gz ] && Download_src
[ -d tengine-${tengine_install_version:?} ] && rm -rf tengine-${tengine_install_version:?}
tar xf tengine-${tengine_install_version:?}.tar.gz
cd tengine-${tengine_install_version:?}
# http_stub_status_module 自带的状态页面 默认关闭
if [ ${lua_install:?} = 'y' ]; then
export LUAJIT_LIB=/usr/local/luajit/lib
export LUAJIT_INC=/usr/local/luajit/include/luajit-2.1
nginx_modules_options="--with-ld-opt='-Wl,-rpath,/usr/local/luajit/lib'"
nginx_modules_options=$nginx_modules_options" --with-http_lua_module"
nginx_modules_options=$nginx_modules_options" --add-module=../ngx_devel_kit-${ngx_devel_kit_version:?}"
# 替换lua-nginx-module 代码为最新版本
mv modules/ngx_http_lua_module modules/ngx_http_lua_module_old
cp -ar ${script_dir:?}/src/lua-nginx-module-${lua_nginx_module_version:?} modules/ngx_http_lua_module
else
nginx_modules_options=''
fi
./configure --prefix=${tengine_install_dir:?} \
--sbin-path=${tengine_install_dir:?}/sbin/nginx \
--conf-path=${tengine_install_dir:?}/conf/nginx.conf \
--error-log-path=${tengine_install_dir:?}/logs/error.log \
--http-log-path=${tengine_install_dir:?}/logs/access.log \
--pid-path=${tengine_install_dir:?}/run/nginx.pid \
--lock-path=${tengine_install_dir:?}/run/nginx.lock \
--user=${run_user:?} --group=$run_user \
--with-file-aio \
--with-http_v2_module \
--with-http_realip_module \
--with-http_gzip_static_module \
--with-http_degradation_module \
--with-http_ssl_module \
--with-http_addition_module=shared \
--with-http_sub_module=shared \
--with-http_concat_module=shared \
--with-http_random_index_module=shared \
--with-http_sysguard_module=shared \
--http-client-body-temp-path=${tengine_install_dir:?}/tmp/client/ \
--http-proxy-temp-path=${tengine_install_dir:?}/tmp/proxy/ \
--http-fastcgi-temp-path=${tengine_install_dir:?}/tmp/fcgi/ \
--http-uwsgi-temp-path=${tengine_install_dir:?}/tmp/uwsgi \
--http-scgi-temp-path=${tengine_install_dir:?}/tmp/scgi \
--with-openssl=../openssl-${openssl_latest_version:?} \
--with-pcre=../pcre-${pcre_version:?} --with-pcre-jit \
--with-jemalloc \
--with-zlib=../zlib-${zlib_version:?} \
--add-module=../incubator-pagespeed-ngx-${pagespeed_version:?} $nginx_modules_options
# close debug
sed -i 's@CFLAGS="$CFLAGS -g"@#CFLAGS="$CFLAGS -g"@' auto/cc/gcc
#打开UTF8支持
# sed -i 's@./configure --disable-shared@./configure --disable-shared --enable-utf8 --enable-unicode-properties@' objs/Makefile
echo -e "${CMSG}[step4 Tengine install ........ ]***********************************>>${CEND}\n"
make -j${CpuProNum:?} && make install
if [ -e "$tengine_install_dir/conf/nginx.conf" ]; then
echo -e "${CMSG}[Tengine installed successfully !!!]***********************************>>${CEND}\n"
mkdir -p ${tengine_install_dir:?}/tmp/client
# install echo-nginx-module
$tengine_install_dir/sbin/dso_tool --add-module=${script_dir:?}/src/echo-nginx-module
Config_Tengine
else
echo -e "${CFAILURE}[Tengine install failed, Please Contact the author !!!]*************>>${CEND}\n"
kill -9 $$
fi
}
Config_Tengine(){
if [ -e $tengine_install_dir/conf/nginx.conf ]; then
echo -e "${CMSG}[Step5 configure Tengine]***********************************>>${CEND}\n"
mkdir -p ${tengine_install_dir:?}/conf.d
mv $tengine_install_dir/conf/nginx.conf $tengine_install_dir/conf/nginx.conf_bak
if [ ${lua_install:?} = 'y' ]; then
cp ${script_dir:?}/template/nginx/tengine_lua_template.conf $tengine_install_dir/conf/nginx.conf
else
cp ${script_dir:?}/template/nginx/tengine_template.conf $tengine_install_dir/conf/nginx.conf
fi
# 修改配置
sed -i "s#@run_user#${run_user:?}#g" $tengine_install_dir/conf/nginx.conf
# sed -i "s#@worker_processes#2#g" $tengine_install_dir/conf/nginx.conf
sed -i "s#@tengine_install_dir#$tengine_install_dir#g" $tengine_install_dir/conf/nginx.conf
# logrotate nginx log
[ -f /etc/logrotate.d/tengine ] && rm -rf /etc/logrotate.d/tengine
cat > /etc/logrotate.d/tengine << EOF
$tengine_install_dir/logs/*.log {
daily
rotate 5
missingok
dateext
compress
notifempty
sharedscripts
postrotate
[ -e $tengine_install_dir/run/nginx.pid ] && kill -USR1 \`cat $tengine_install_dir/run/nginx.pid\`
endscript
}
EOF
#启动脚本
mkdir -p ${tengine_install_dir:?}/init.d
cp $script_dir/template/init.d/tengine.centos ${tengine_install_dir:?}/init.d/tengine
chmod 775 ${tengine_install_dir:?}/init.d/tengine
sed -i "s#^nginx_basedir=.*#nginx_basedir=${tengine_install_dir:?}#1" ${tengine_install_dir:?}/init.d/tengine
#
#systemd
if ( [ $OS == "Ubuntu" ] && [ ${Ubuntu_version:?} -ge 15 ] ) || ( [ $OS == "CentOS" ] && [ ${CentOS_RHEL_version:?} -ge 7 ] );then
[ -L /lib/systemd/system/tengine.service ] && systemctl disable tengine.service && rm -f /lib/systemd/system/tengine.service
cp $script_dir/template/systemd/tengine.service /lib/systemd/system/tengine.service
sed -i "s#@nginx_basedir#${tengine_install_dir:?}#g" /lib/systemd/system/tengine.service
systemctl enable tengine.service
echo -e "${CMSG}[starting Tengine ] **************************************************>>${CEND}\n"
systemctl start tengine.service
echo -e "${CMSG}[start Tengine OK ] **************************************************>>${CEND}\n"
else
[ -L /etc/init.d/tengine ] && rm -f /etc/init.d/tengine
ln -s ${nginx_install_dir:?}/init.d/tengine /etc/init.d/tengine
echo -e "${CMSG}[starting Tengine ] **************************************************>>${CEND}\n"
service start tengine
echo -e "${CMSG}[start Tengine OK ] **************************************************>>${CEND}\n"
fi
else
echo -e "${CFAILURE}[Tengine install failed, Please Contact the author !!!]*************>>${CEND}\n"
kill -9 $$
fi
}
Tengine_Install_Main() {
Nginx_Var && Nginx_Base_Dep_Install && Tengine_Dep_Install && Install_Tengine
}
| true
|
e15bda5cf790da88bbe635d8831ea77a273a7af5
|
Shell
|
perng1220/script
|
/2.sh
|
UTF-8
| 3,081
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
export PATH=/bin:/sbin:/usr/bin:/usr/sbin
csv_file=$1
db_name=phone
table_name=ten
while (true)
do
data=`head -n 1 $csv_file`
if [ -n "$data" ]; then
# vat_number=`echo $data | cut -d ";" -f 1`
company=`echo $data | cut -d ";" -f 1`
# person_in_charge=`echo $data | cut -d ";" -f 3`
address=`echo $data | cut -d ";" -f 2`
company_tel1=`echo $data | cut -d ";" -f 3`
# company_tel1_ext=`echo $data | cut -d ";" -f 6`
company_tel2=`echo $data | cut -d ";" -f 4`
# company_tel2_ext=`echo $data | cut -d ";" -f 8`
fax=`echo $data | cut -d ";" -f 5`
website=`echo $data | cut -d ";" -f 6`
# contact=`echo $data | cut -d ";" -f 10`
# contact_tel=`echo $data | cut -d ";" -f 11`
# contact_tel_ext=`echo $data | cut -d ";" -f 12`
# contact_mobile=`echo $data | cut -d ";" -f 13`
mail=`echo $data | cut -d ";" -f 7`
# sales=`echo $data | cut -d ";" -f 15`
if [ -z "$company_tel2" ]; then
query=`mysql -uroot -e "use $db_name;select company,company_tel1 from $table_name where company='$company' or company_tel1='$company_tel1'" || exit 1`
if [ -n "$query" ]; then
# echo "$vat_number;$company;$person_in_charge;$address;$company_tel1;$company_tel1_ext;$company_tel2;$company_tel2_ext;$fax;$contact;$contact_tel;$contact_tel_ext;$contact_mobile;$mail;$sales" >> ${1}_match.csv
echo "$company;$address;$company_tel1;$company_tel2;$fax;$website;$mail" >> ${1}_match
sed -i '1d' $csv_file
else
# echo "$vat_number;$company;$person_in_charge;$address;$company_tel1;$company_tel1_ext;$company_tel2;$company_tel2_ext;$fax;$contact;$contact_tel;$contact_tel_ext;$contact_mobile;$mail;$sales" >> ${1}_mismatch.csv
echo "$company;$address;$company_tel1;$company_tel2;$fax;$website;$mail" >> ${1}_mismatch
sed -i '1d' $csv_file
fi
else
query=`mysql -uroot -e "use $db_name;select company,company_tel1,company_tel2 from $table_name where company='$company' or company_tel1='$company_tel1' or company_tel2='$company_tel2'" || exit 1`
if [ -n "$query" ]; then
# echo "$vat_number;$company;$person_in_charge;$address;$company_tel1;$company_tel1_ext;$company_tel2;$company_tel2_ext;$fax;$contact;$contact_tel;$contact_tel_ext;$contact_mobile;$mail;$sales" >> ${1}_match.csv
echo "$company;$address;$company_tel1;$company_tel2;$fax;$website;$mail" >> ${1}_match
sed -i '1d' $csv_file
else
# echo "$vat_number;$company;$person_in_charge;$address;$company_tel1;$company_tel1_ext;$company_tel2;$company_tel2_ext;$fax;$contact;$contact_tel;$contact_tel_ext;$contact_mobile;$mail;$sales" >> ${1}_mismatch.csv
echo "$company;$address;$company_tel1;$company_tel2;$fax;$website;$mail" >> ${1}_mismatch
sed -i '1d' $csv_file
fi
fi
else
exit 2
fi
done
| true
|
d0b5d45adb04381f5695080ab3a44146533f3546
|
Shell
|
marcosmunis/myCV
|
/outros_arquivos/push_to_start.bash
|
UTF-8
| 1,465
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#
set -e
set -u
install_packs() {
echo "install_packs"
apt update
apt full-upgrade -y
apt-get install python -y
apt-get install idle-python -y
apt-get install python-pip -y
apt-get install libmysqlclient-dev -y
apt-get install python-dev -y
apt-get install default-jdk -y
apt-get install mysql-server -y
apt-get install python-mysqldb -y
apt-get install openssh-server -y
apt-get install apache2 -y
apt-get install phpmyadmin -y
apt-get install git
pip install --upgrade pip
pip install --upgrade setuptools
pip install MySQL-python
pip install pymysql
pip install django
pip install django-secure
pip install django-sslserver
pip install setuptools
pip install django-extensions
python -V
django-admin --version
}
#
# MAIN
#
case "${1}" in
'install')
install_packs
;;
'fw-start')
echo "firewall-start"
/bin/fw.start
;;
'fw-stop')
echo "firewall-stop"
/bin/fw.stop
;;
*)
echo ""
echo "Usage: "
echo " push_to_install install | fw.start | fw.stop "
echo ""
exit 1
;;
esac
exit 0
| true
|
7cb6a741a87783a8e957567b309a385925dc673d
|
Shell
|
AndrewCEmil/YCSBScripts
|
/achilleSetup.sh
|
UTF-8
| 1,297
| 2.84375
| 3
|
[] |
no_license
|
# YCSB Prep
sudo rm -rf /usr/lib/maven jdk* /usr/lib/jvm/jdk1.6.0_33
sudo yum -y update
sudo yum -y install git
wget http://mirror.cc.columbia.edu/pub/software/apache/maven/maven-3/3.0.5/binaries/apache-maven-3.0.5-bin.tar.gz
chmod 700 apache-maven*.tar.gz
tar xzf apache-maven*.tar.gz
rm -f apache-maven*.tar.gz
sudo mv ./apache-maven*/ maven
sudo mv maven /usr/lib
#note this is the oracle version of java, replace this line
wget http://dl.dropbox.com/u/14846803/jdk-6u33-linux-x64.bin
chmod a=rwx ./jdk-6u33-linux-x64.bin
yes " " | ./jdk-6u33-linux-x64.bin
rm ./jdk-6u33-linux-x64.bin
sudo mv ./jdk1.6.0_33/ /usr/lib/jvm/jdk1.6.0_33
echo "export M2_HOME=/usr/lib/maven" >> ~/.bashrc
echo "export JAVA_HOME=/usr/lib/jvm/jdk1.6.0_33" >> ~/.bashrc
echo "export PATH=$PATH:$M2_HOME/bin:$JAVA_HOME/bin" >> ~/.bashrc
echo "export PATH=/usr/lib/maven/bin/:$PATH" >> ~/.bashrc
source ~/.bashrc
# Re-login and configure alternatives
sudo alternatives --install /usr/bin/java java $JAVA_HOME/bin/java 2
for i in /usr/lib/jvm/*openjdk*/bin/java; do
echo sudo alternatives --remove java $i;
done
#sudo alternatives --config java
#clone ycsb
git clone https://github.com/achille/YCSB.git
cd YCSB
mvn package
#./bin/ycsb load mongodb -s -P workloads/workloada -threads 10 | egrep -v " [0]$"
| true
|
8bb9c713b70e480c405afc6d339d5a413c95a3b4
|
Shell
|
Bnei-Baruch/archive-backend
|
/misc/release.sh
|
UTF-8
| 821
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Usage: misc/release.sh
# Build package, tag a commit, push it to origin, and then deploy the
# package on production server.
set -e
echo "Building..."
make build
version="$(./archive-backend version | awk '{print $NF}')"
[ -n "$version" ] || exit 1
echo $version
echo "Tagging commit and pushing to remote repo"
git commit --allow-empty -a -m "Release $version"
git tag "v$version"
git push origin master
git push origin "v$version"
echo "Uploading executable to server"
scp archive-backend archive@app.archive.bbdomain.org:/sites/archive-backend/"archive-backend-$version"
ssh archive@app.archive.bbdomain.org "ln -sf /sites/archive-backend/archive-backend-$version /sites/archive-backend/archive-backend"
echo "Restarting application"
ssh archive@app.archive.bbdomain.org "supervisorctl restart archive"
ssh archive@app.archive.bbdomain.org "supervisorctl restart events"
| true
|
9c0b378292d8d42d59203a3beb7d1770809ff2a9
|
Shell
|
xpzouying/dotfiles
|
/neovim_config/utils/pypi_conf/config.sh
|
UTF-8
| 222
| 3.421875
| 3
|
[] |
no_license
|
#/bin/bash
if [ ! -d $HOME/.pip ]; then
echo "make $HOME/.pip"
mkdir $HOME/.pip
fi
if [ ! -f $HOME/.pip/pip.conf ]; then
echo "pip.conf not exists. link it."
ln -s $(pwd)/pip.conf $HOME/.pip/pip.conf
fi
| true
|
4dc6a225dc14211ebce1258d7f30aa64d63dac70
|
Shell
|
adorn331/CompetitionPlatform
|
/docker/run_django.sh
|
UTF-8
| 1,648
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Waiting for database connection..."
until netcat -z -v -w30 $DB_HOST $DB_PORT
do
sleep 1
done
echo "WEB IS RUNNING"
# # Static files
# npm cache clean
# npm install .
# npm install -g less
# npm run build-css
# python manage.py collectstatic --noinput
pip install -r /app/CompetitionPlatform/requirements/requirements.txt
python manage.py makemigrations
python manage.py migrate
python manage.py collectstatic --noinput
# Automatically create superuser when start
USER="admin"
PASS="admin"
MAIL="admin@admin.com"
script="
from apps.authenz.models import User;
username = '$USER';
password = '$PASS';
email = '$MAIL';
if User.objects.filter(username=username).count()==0:
User.objects.create_superuser(username, email, password, isactivate=True);
print('Superuser created.');
else:
print('Superuser creation skipped.');
"
printf "$script" | python manage.py shell
# If the above migrations are failing upgrade an older database like so:
# # Unsure why I had to specially migrate this one
# $ python manage.py migrate oauth2_provider --fake
# $ python manage.py migrate --fake-initial
# Insert initial data into the database
# python scripts/initialize.py
# start development server on public ip interface, on port 8000
PYTHONUNBUFFERED=TRUE gunicorn CompetitionPlatform.wsgi \
--bind django:$DJANGO_PORT \
--access-logfile=/var/log/django/access.log \
--error-logfile=/var/log/django/error.log \
--log-level $DJANGO_LOG_LEVEL \
--reload \
--timeout 4096 \
--enable-stdio-inheritance \
--workers=${GUNICORN_CONCURRENCY:-1} \
--worker-class eventlet
| true
|
e8ea86ce330677c8e7bb412485c3ce7045662ebb
|
Shell
|
nemethf/lsp-adapter
|
/dockerfiles/bash/populate-explainshell-db.sh
|
UTF-8
| 1,651
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is intended to be used during the build of the bash language
# server to initialize the explainshell database with a few thousand man pages.
# This is a pretty slow process which takes ~30 minutes.
#
# This script assumes:
#
# - The current working directory is explainshell
# - Everything that explainshell needs is installed (mongo, python, etc.)
# - The mongo db is at /data/db2 (instead of /data/db, because that's read-only
# in the mongo image)
wait_for_mongo_up() {
while true; do
nc -zvv localhost 27017 && return
sleep 1
done
}
wait_for_server_up() {
while true; do
nc -zvv localhost 5000 && return
sleep 1
done
}
# Start mongo and the explainshell web server in the background
mongod --dbpath /data/db2 &
make serve &
wait_for_mongo_up
wait_for_server_up
# Load the classifiers for flags in man pages
mongorestore -d explainshell dump/explainshell && mongorestore -d explainshell_tests dump/explainshell
# Sanity check
make tests
# Download a few thousand man pages
git clone https://github.com/idank/explainshell-manpages
cd explainshell-manpages
git checkout bb7f4dfb037b890de58e0541b369cad1eb6ae07f
# Avoid busting the cache if newer commits get pushed
rm -rf .git
cd ..
# Actually load the man pages. Last I checked there were ~26,000 man pages and
# it took ~30 minutes to load. It loads in batches for speed, and the batch size
# is capped at 1000 to avoid hitting the limit on number of shell arguments.
find . -name "*.gz" | xargs -n 1000 env PYTHONPATH=. python explainshell/manager.py
# Done loading, can stop mongo and the explainshell web server.
jobs -p | xargs kill
| true
|
915277e131f25e9806b73faae9410dc4be50d7ed
|
Shell
|
mstniy/safepm
|
/benchmarks/crash_consistency/run-variants.sh
|
UTF-8
| 323
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if ! [ -d "$BENCHMARK_PM_PATH" ]
then
echo "Please set BENCHMARK_PM_PATH to a valid directory." 2>&1
exit 1
fi
mkdir -p results
docker build -t safepm-crash_consistency .
docker run -v "$BENCHMARK_PM_PATH:/mnt/crash_consistency" -v "$(pwd)/results":/results -t safepm-crash_consistency bash inner_run.sh
| true
|
b64a36a1e968b67711be1f0ea4e475d68f96700b
|
Shell
|
vilas20-10-98/codeinclub
|
/empWAGE.sh
|
UTF-8
| 161
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash -x
isPrasent=1;
randomcheck=$((RANDOM%2));
if [ $isPrasent -eq $randomcheck ];
then
emprateperhr=20;
emphrs=8;
salary=$(($emprateperhr*emphrs));
fi
| true
|
d7a182fa5cd189f67d5c3aa24862a0cf2454f838
|
Shell
|
scriptzteam/SCENE-SCRiPTS
|
/Others/Other/glftpd_scripts/turranius/trialgroup.cron_0-2.sh
|
UTF-8
| 4,012
| 3.875
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
############################################################################
# Group Trial Cron Script 0.2 by Turranius #
# This script will check how much users in a group uploaded this week and #
# add it togheter. #
# This is the script ment to be run as a cron job to create a log file. #
# Otherwise, you would have to use the trialgroup.sh before midnight at #
# saturday night. With this you can just check the logfile. It basically #
# runs trialgroup.sh for you. #
# Copy this to /glftpd/bin and set crontab to #
# 55 23 * * 6 /glftpd/bin/trialgroup.cron.sh #
# This will run it at 23:55 on Saturday. #
# Setup the parameters below. If you have any folders in your #
# users folder, add them to the EXLUDE line, | delimited. You can also #
# add any users you do not want to count, even if they are in a group with #
# trial. #
############################################################################
# Changelog #
# 0.1 - Initial script. #
# 0.2 - Added TRiAL-2 which is to be used if they are up for trial next #
# week. Have to change it to TRiAL manually when its time for them #
# to start. Nice to know how they are doing. #
# Changed how the script checks if they are on trial. Instead of #
# reading a file, it checks the groups NFO. To set groups on trial #
# change the groups nfo (with site grpnfo) to either TRiAL or #
# TRiAL-2. #
############################################################################
USERPATH=/glftpd/ftp-data/users
GROUPFILE=/glftpd/etc/group
LOGFILE=/glftpd/ftp-data/logs/grouptrial.log
EXCLUDE="default.user|glftpd|backup|disabled"
## Dont change anything below here unless you know what you are doing.
UPPED="0"
COUNT="0"
TOTAL="0"
EXISTS="NO"
cd $USERPATH
touch $LOGFILE
echo "" >> $LOGFILE
echo "----------------------------------------------------" >> $LOGFILE
date >> $LOGFILE
for i in `cat $GROUPFILE`
do
NAME="$(echo $i | awk -F":" '{print $1}')"
TRIAL="$(echo $i | awk -F":" '{print $2}')"
if [ "$TRIAL" = "TRiAL" ]; then
for u in `ls -f -A | /bin/egrep -v $EXCLUDE`
do
if [ "$LAST" != $i ]; then
TOTAL="0"
echo " -----[ New group found: $i ]----- "
fi
GROUP="$(cat $u | grep GROUP | awk -F" " '{print $2}')"
INIT="$( echo $GROUP | grep $NAME )"
if [ "$INIT" != "" ]; then
UPPED="$(cat $u | grep WKUP | awk -F" " '{print $3}')"
UPPED="$(expr $UPPED \/ 1024)"
COUNT="$(expr $COUNT \+ 1)"
TOTAL="$(expr $UPPED \+ $TOTAL)"
echo "Group: $NAME. User: $u - $UPPED MB this week. Total: $TOTAL" >> $LOGFILE
fi
LAST=$i
done
fi
done
## NEXT WEEKS TRIAL
i=""
u=""
for i in `cat $GROUPFILE`
do
NAME="$(echo $i | awk -F":" '{print $1}')"
TRIAL2="$(echo $i | awk -F":" '{print $2}')"
if [ "$TRIAL2" = "TRiAL-2" ]; then
for u in `ls -f -A | /bin/egrep -v $EXCLUDE`
do
if [ "$LAST" != $i ]; then
TOTAL="0"
echo " -----[ Found group for next week: $i ]----- "
fi
GROUP="$(cat $u | grep GROUP | awk -F" " '{print $2}')"
INIT="$( echo $GROUP | grep $NAME )"
if [ "$INIT" != "" ]; then
UPPED="$(cat $u | grep WKUP | awk -F" " '{print $3}')"
UPPED="$(expr $UPPED \/ 1024)"
COUNT="$(expr $COUNT \+ 1)"
TOTAL="$(expr $UPPED \+ $TOTAL)"
echo "Group: $NAME. User: $u - $UPPED MB this week. Total: $TOTAL" >> $LOGFILE
fi
LAST=$i
done
fi
done
exit 0
| true
|
07195173d1759f6880107bea12201bcc2b1d853f
|
Shell
|
tectronics/portaboom
|
/lack/portaboom/scripts/make_release_files.sh
|
UTF-8
| 2,990
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
# script to create the release files for XXXX release;
# script will be run from the top level project directory
# program locations
CAT=$(which cat)
SED=$(which sed)
GIT=$(which git)
PERL_GTK2_SRC="/tmp/perl-Gtk2"
if [ -z $FILELIST ]; then
echo "ERROR: FILELIST variable empty!"
exit 1
fi # if [ -z $FILELIST ]; then
if [ -z $TEMP_DIR ]; then
echo "ERROR: TEMP_DIR variable empty!"
exit 1
fi # if [ -z $FILELIST ]; then
# grab a copy of the gtk2-Perl source via git
if [ ! -d $PERL_GTK2_SRC ]; then
if [ -z $GIT ]; then
echo "ERROR: 'git' command not found!"
echo "ERROR: git is required to sync perl-Gtk2 examples"
exit 1
fi # if [ -z $GIT ]
echo "Cloning perl-Gtk2 source for 'examples' and 'gtk-demo'..."
git clone git://git.gnome.org/perl-Gtk2 $PERL_GTK2_SRC
fi # if [ ! -d "/tmp/gtk2-perl-examples" ];
# copy the combined SSL key/cert for shellinabox
if [ -f ~/stuff_tars/lack.googlecode.com.key-cert.pem ]; then
cp ~/stuff_tars/lack.googlecode.com.key-cert.pem $TEMP_DIR/certificate.pem
else
echo "ERROR: missing SSL key/cert for shellinabox"
echo "ERROR checked ${HOME}/stuff_tars"
exit 1
fi # if [ -f ~/stuff_tars/lack.googlecode.com.key-cert.pem ]
# any files in this list get enumerated over and the substitutions below are
# performed on them
INPUT_FILES="issue.${PROJECT_NAME} issue.${PROJECT_NAME}.nogetty"
# verify the base file exists
if [ ! -e $PROJECT_DIR/${PROJECT_NAME}.base.txt ]; then
echo "ERROR: ${PROJECT_DIR}/${PROJECT_NAME}.base.txt file does not exist"
exit 1
fi # if [ $PROJECT_DIR/${PROJECT_NAME}.base.txt ]
### create the initramfs filelist
if [ -e $PROJECT_DIR/kernel_configs/linux-image-$1.txt ]; then
cat $PROJECT_DIR/${PROJECT_NAME}.base.txt \
$PROJECT_DIR/kernel_configs/linux-image-$1.txt \
> $PROJECT_DIR/initramfs-filelist.txt
else
echo "make_release_files.sh: linux-image-$1.txt file does not exist"
echo "make_release_files.sh: in ${PROJECT_DIR}/kernel_configs directory"
exit 1
fi
### create the hostname file
echo "${PROJECT_NAME}" > $TEMP_DIR/hostname
# build the file with the correct substitutions performed
# below variables are set in the initramfs.cfg file
for SEDFILE in $(echo $INPUT_FILES);
do
$CAT $PROJECT_DIR/etcfiles/$SEDFILE \
| $SED "{
s!:KERNEL_VER:!${KERNEL_VER}!g;
s!:RELEASE_VER:!${RELEASE_VER}!g;
s!:LACK_PASS:!${LACK_PASS}!g;
}" \
> $TEMP_DIR/$SEDFILE
done
# create the new init.sh script, which will be appended to
#$TOUCH $TEMP_DIR/init.sh
$CAT $BUILD_BASE/common/initscripts/_initramfs_init.sh | $SED \
"{
s!:PROJECT_NAME:!${PROJECT_NAME}!g;
s!:PROJECT_DIR:!${PROJECT_DIR}!g;
s!:BUILD_BASE:!${BUILD_BASE}!g;
s!:VERSION:!${KERNEL_VER}!g;
}" >> $TEMP_DIR/init.sh
# add the init script to the filelist
echo "file /init /${TEMP_DIR}/init.sh 0755 0 0" >> $TEMP_DIR/$FILELIST
# vi: set sw=4 ts=4 paste:
| true
|
e89491d3c3150f7e3cfbff193636fdc00ad85f0b
|
Shell
|
BrandonBielicki/dotconfigs
|
/scripts/package_installs.sh
|
UTF-8
| 3,893
| 4.09375
| 4
|
[] |
no_license
|
#! /bin/sh
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
LOGFILE=$SRCDIR/../logs/log
PACKAGESLOG=$SRCDIR/../logs/packages
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" >> $LOGFILE
echo " Entered $0" >> $LOGFILE
USER=$1
HOMEDIR="$(eval echo ~$USER)"
echo " Running as user $USER" >> $LOGFILE
echo " $USER's home directory: $HOMEDIR" >> $LOGFILE
while getopts ":r" OPTION; do
case $OPTION in
r)
echo " r flag detected" >> $LOGFILE
reinstall=true
;;
\?)
echo "pass r to reinstall"
exit
;;
esac
done
reinstall=true
# $1 = package name
# $2 = git url
# $3 = makefile location
function gitInstall {
if [ ! -f "$HOMEDIR/bin/$1" ]; then
RETDIR=$PWD
echo " Installing $1..." >> $LOGFILE
cd $HOMEDIR/bin
su $USER -c "git clone $2 $1.d"
cd $1.d/$3
echo " Installing to $PWD" >> $LOGFILE
su $USER -c "make"
su $USER -c "cp $HOMEDIR/bin/$1.d/$3/$1 $HOMEDIR/bin/"
if [ -f "$HOMEDIR/bin/$1" ]; then
echo "$1 Installed" >> $PACKAGESLOG
else
echo "$1 FAILED" >> $PACKAGESLOG
fi
rm -r -f $HOMEDIR/bin/$1.d
cd $RETDIR
else
echo " $1 already installed" >> $LOGFILE
if [ "$reinstall" = true ]; then
echo " Reinstalling $1" >> $LOGFILE
rm $HOMEDIR/bin/$1
rm -r -f $HOMEDIR/bin/$1.d
install $1 $2 $3
fi
fi
}
# $1 = package name
# $2 = git url
function aurInstall {
if [ ! -d "$HOMEDIR/bin/src/$1.d" ]; then
RETDIR=$PWD
echo " Installing $1..." >> $LOGFILE
cd $HOMEDIR/bin/src
su $USER -c "git clone $2 $1.d"
cd $1.d
echo " Installing to $PWD" >> $LOGFILE
su $USER -c "makepkg -sri --needed --noconfirm"
if [ $? = 0 ]; then
echo "$1 Installed" >> $PACKAGESLOG
else
echo "$1 FAILED" >> $PACKAGESLOG
fi
rm -r -f $1.d
cd $RETDIR
else
echo " $1 already installed" >> $LOGFILE
if [ "$reinstall" = true ]; then
echo " Reinstalling $1" >> $LOGFILE
sudo rm -r -f $HOMEDIR/bin/src/$1.d
install $1 $2
fi
fi
}
function fileByLine {
echo " Reading from file: $1" >> $LOGFILE
while read -r PACKAGE || [ -n "$PACKAGE" ]; do
case "$PACKAGE" in
[\AUR\]*)
aurInstall ${PACKAGE:5}
;;
[\GIT\]*)
gitInstall ${PACKAGE:5}
;;
*)
$PM $PACKAGE
if [ $? = 0 ]; then
echo "$PACKAGE Installed" >> $PACKAGESLOG
else
echo "$PACKAGE FAILED" >> $PACKAGESLOG
fi
;;
esac
done < $1
}
if [ -f /etc/debian_version ]; then
PM="sudo apt-get -y install"
PACKAGE_LIST="apt_packages"
GROUPDIR="apt"
echo " package manager command: $PM" >> $LOGFILE
sudo apt-get -y update
elif [ -f /etc/arch-release ]; then
PM="sudo pacman -S --noconfirm --needed"
echo " package manager command: $PM" >> $LOGFILE
PACKAGE_LIST="pacman_packages"
GROUPDIR="pacman"
sudo pacman -Syu --noconfirm
fi
fileByLine ../setup/package_lists/$PACKAGE_LIST
fileByLine ../setup/package_lists/universal_packages
while read -r ITEM || [ -n "$ITEM" ]; do
echo " Installing packages from group $ITEM" >> $LOGFILE
case "$ITEM" in \#*) continue ;; esac
../setup/package_lists/.groups/$GROUPDIR/$ITEM/pre_$ITEM.sh
fileByLine ../setup/package_lists/.groups/$GROUPDIR/$ITEM/$ITEM
../setup/package_lists/.groups/$GROUPDIR/$ITEM/post_$ITEM.sh
done < ../setup/package_lists/group_packages
cd $SRCDIR
touch ../work/package_installs
| true
|
a6b3722bdad7e173df5813e55489930c0f814e19
|
Shell
|
ericaenjoy3/bioconda-recipes
|
/recipes/biobambam/build.sh
|
UTF-8
| 316
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
mkdir -p $PREFIX/bin
export CPP_INCLUDE_PATH=${PREFIX}/include
export CPLUS_INCLUDE_PATH=${PREFIX}/include
export CXX_INCLUDE_PATH=${PREFIX}/include
export LD_LIBRARY_PATH=${PREFIX}/lib
export LIBRARY_PATH=${PREFIX}/lib
./configure --with-libmaus2=${PREFIX}/lib --prefix=${PREFIX}
make install
| true
|
32357d311857fe670b67f5f849de06b07f0f7264
|
Shell
|
gcfavorites/umonkey-tools
|
/misc/sayhours/debian/usr/bin/sayhours
|
UTF-8
| 123
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
SAMPLES=/usr/share/sayhours
HOURS=$(date +'%H')
FILE=$(ls $SAMPLES/$HOURS/* | sort -R | head -1)
play -q "$FILE"
| true
|
41087758849949b3e618d8b17ef0f90044b0b3fd
|
Shell
|
alanzhaonys/aws-ebs-deployment-boilerplate
|
/aws.sh
|
UTF-8
| 10,639
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Begin functions
#
function begin() {
echo
echo +++++++++++++++++++++++++++ AWS DEPLOYMENT ++++++++++++++++++++++++++++++
}
function end() {
echo +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
echo
exit
}
function create_environment() {
readonly ENV_TO_CREATE=$1
# Create new Application version
aws elasticbeanstalk create-application-version --application-name $APP_NAME \
--version-label $APP_FILE_VERSIONED --description $ENV_TO_CREATE \
--source-bundle S3Bucket="$S3_BUCKET",S3Key="$S3_BUCKET_FILE" \
>/dev/null 2>&1
# Create new environment
aws elasticbeanstalk create-environment --cname-prefix $ENV_TO_CREATE \
--application-name $APP_NAME --version-label $APP_FILE_VERSIONED \
--environment-name $ENV_TO_CREATE --solution-stack-name "$STACK" \
--option-settings "[
{
\"Namespace\": \"aws:autoscaling:launchconfiguration\",
\"OptionName\": \"InstanceType\",
\"Value\": \"${INSTANCE_TYPE}\"
},
{
\"Namespace\": \"aws:autoscaling:launchconfiguration\",
\"OptionName\": \"SecurityGroups\",
\"Value\": \"${SECURITY_GROUP}\"
},
{
\"Namespace\": \"aws:autoscaling:launchconfiguration\",
\"OptionName\": \"EC2KeyName\",
\"Value\": \"${EC2_KEY_NAME}\"
}
]" >/dev/null 2>&1
}
function update_environment() {
readonly ENV_TO_UPDATE=$1
aws elasticbeanstalk create-application-version --application-name $APP_NAME \
--version-label $APP_FILE_VERSIONED --description $ENV_TO_UPDATE \
--source-bundle S3Bucket="$S3_BUCKET",S3Key="$S3_BUCKET_FILE" \
>/dev/null 2>&1
ENV_ID=($(aws elasticbeanstalk describe-environments \
--environment-names $ENV_TO_UPDATE | jq -r '.Environments[].EnvironmentId'))
aws elasticbeanstalk update-environment --environment-id $ENV_ID \
--version-label "$APP_FILE_VERSIONED" >/dev/null 2>&1
}
function swap_environment() {
readonly ENV_TO_WAIT=$1
readonly ENV_TO_SWAP=$2
# Wait for it to complete
try=10
i="0"
while [ $i -lt $try ]; do
echo "WAIT FOR SECONDARY ENVIRONMENT TO BE READY, DON'T QUIT"
# Give it a min
sleep 30
((i++))
ENV_TO_WAIT_HEALTH=($(aws elasticbeanstalk describe-environments \
--environment-names $ENV_TO_WAIT | jq -r '.Environments[].Health'))
if [ "$ENV_TO_WAIT_HEALTH" == "Green" ]; then
aws elasticbeanstalk swap-environment-cnames \
--source-environment-name $ENV_TO_SWAP \
--destination-environment-name $ENV_TO_WAIT
echo "SUCCESSFULLY SWAPPED ENVIRONMENTS"
break
fi
if [ $i -eq $(( $try - 1 )) ]; then
echo "UNABLE TO SWAPPED ENVIRONMENT"
fi
done
}
#
# End functions
#
begin
# Usage
if [ "${1}" != "deploy" ] && [ "${1}" != "terminate" ]; then
echo "Usage: ./aws.sh deploy | terminate | terminate app"
end
fi
# Get platform
PLATFORM=$(uname)
# Check platform
if [ "$PLATFORM" != "Linux" ] && [ "$PLATFORM" != "Darwin" ]; then
echo Your platform \"$PLATFORM\" is not supported
end
fi
# Check awscli installation
if ! hash aws 2>/dev/null; then
echo awscli is not installed
end
fi
# Check jq installation
if ! hash jq 2>/dev/null; then
echo jq is not installed
end
fi
# Check gdate (macOS only)
if [ "$PLATFORM" == "Darwin" ]; then
if ! hash gdate 2>/dev/null; then
echo gdate is not installed
end
fi
fi
# Check awscli configurations
if [ ! -f ~/.aws/config ] || [ ! -f ~/.aws/credentials ]; then
echo awscli is not configured
end
fi
########################
# Start configurations #
########################
# AWS application name
readonly APP_NAME="MUST CHANGE"
# Detect git branch
readonly APP_BRANCH=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p')
# Application file name
readonly APP_FILE=${APP_NAME}-${APP_BRANCH}
# Environment name (AWS Elastic Beanstalk CNAME)
readonly ENV_NAME=${APP_FILE}
# Use timestamp as unique build number
readonly BUILD_NUMBER=$(date '+%Y%m%d-%H%M%S')
# Unique file name used for versioning
readonly APP_FILE_VERSIONED=${APP_FILE}-${BUILD_NUMBER}
# Public web directory
readonly PUBLIC_WEB_DIR="public_html"
# Platform stack
readonly STACK="64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1"
# EC2 instance type
readonly INSTANCE_TYPE="t2.micro"
# Security group
readonly SECURITY_GROUP="MUST CHANGE"
# EC2 key pair name
readonly EC2_KEY_NAME="MUST CHANGE"
# S3 bucket name
readonly S3_BUCKET="MUST CHANGE"
# S3 directory
readonly S3_BUCKET_DIR="apps/${APP_NAME}/${APP_BRANCH}"
# S3 file name
readonly S3_BUCKET_FILE=${S3_BUCKET_DIR}/${APP_FILE_VERSIONED}.zip
# Delete S3 file?
readonly S3_DELETE=1
# Delete S3 file "n" days old
readonly S3_DELETE_DAYS_OLD=7
# Open environment in browser after update
readonly OPEN_IN_BROWSER_AFTER_UPDATE=1
######################
# End configurations #
######################
# Whether or not anything has been updated
UPDATED=0
# Check if app exists
APP_EXISTS=($(aws elasticbeanstalk describe-application-versions \
--application-name $APP_NAME | jq -r '.ApplicationVersions[].ApplicationName'))
# Check if environment available
ENV_AVAILABLE=($(aws elasticbeanstalk check-dns-availability \
--cname-prefix $ENV_NAME | jq -r '.Available'))
# Check environment health
ENV_HEALTH=($(aws elasticbeanstalk describe-environments \
--environment-names $ENV_NAME | jq -r '.Environments[].Health'))
# Terminate
if [ "${1}" == "terminate" ]; then
if [ "$APP_EXISTS" == "" ]; then
echo "APPLICATION DOESN'T EXIST"
end
fi
# Terminate application
if [ "${2}" == "app" ]; then
echo "APPLICATION AND ALL IT'S RUNNING ENVIRONMENTS ARE TERMINATING..."
aws elasticbeanstalk delete-application --application-name $APP_NAME \
--terminate-env-by-force >/dev/null 2>&1
end
elif [ "$ENV_AVAILABLE" == "false" ]; then
# Terminate environment
if [ "$ENV_HEALTH" == "Green" ]; then
echo "EVIRONMENT IS TERMINATING..."
aws elasticbeanstalk terminate-environment --environment-name $ENV_NAME \
>/dev/null 2>&1
end
else
echo "ENVIRONMENT IS NOT READY, TRY AGAIN LATER"
end
fi
else
echo "ENVIRONMENT NOT FOUND"
end
fi
fi
# Continue with deployment
#####################################################
# BEGIN - BUILD YOUR WEB CONTENT (public_html) HERE #
#####################################################
touch ./public_html/build.txt
echo $BUILD_NUMBER >> ./public_html/build.txt
#####################################################
# END #
#####################################################
# Remove previous build
rm -f /tmp/$APP_FILE.zip
# Zip up web content
echo ZIPPING UP WEB CONTENT IN $PUBLIC_WEB_DIR
cd ./$PUBLIC_WEB_DIR
zip -qr /tmp/$APP_FILE.zip .
cd - >/dev/null 2>&1
echo "BUILT APP LOCALLY ON /tmp/${APP_FILE}.zip"
# Send app to S3
echo "SENDING APP TO S3: s3://${S3_BUCKET}/${S3_BUCKET_FILE}"
aws s3 cp --quiet /tmp/${APP_FILE}.zip s3://${S3_BUCKET}/${S3_BUCKET_FILE}
echo "DEPLOYING..."
# App doesn't exists
if [ "$APP_EXISTS" == "" ]; then
# Environment CNAME available
if [ "$ENV_AVAILABLE" == "true" ]; then
# Create NEW application and environment
aws elasticbeanstalk create-application --application-name $APP_NAME \
--description "$APP_NAME" >/dev/null 2>&1
create_environment $ENV_NAME
UPDATED=1
echo "SUCCESSFULLY CREATED APPLICATION AND ENVIRONMENT"
else
# Can't create
echo "ENVIRONMENT NAME $APP_NAME IS NOT AVAILABLE"
# Clean up
aws s3 rm s3://${S3_BUCKET}/$S3_BUCKET_FILE >/dev/null 2>&1
fi
else
# App already exists
# Environment CNAME available
if [ "$ENV_AVAILABLE" == "true" ]; then
# Create environment
create_environment $ENV_NAME
UPDATED=1
echo "SUCCESSFULLY CREATED ENVIRONMENT"
else
# Update environment
if [ "$ENV_HEALTH" == "Green" ]; then
# Deploying to production and environment exists:
# - We don't want to update exsting environment as it will create down time.
# - Instead we create a secondary environment and swap CNAME when it's ready.
# - This secondary environment will stay there for future use, don't delete.
if [ "$APP_BRANCH" == "master" ]; then
echo "YOU'RE PUSHING TO PRODUCTION..."
readonly MASTER_ALT_ENV=${ENV_NAME}-alt
readonly MASTER_ALT_ENV_AVAILABLE=($(aws elasticbeanstalk \
check-dns-availability --cname-prefix $MASTER_ALT_ENV | jq -r '.Available'))
if [ "$MASTER_ALT_ENV_AVAILABLE" == "true" ]; then
echo "LET'S MAKE A SECONDARY ENVIRONMENT TO SWAP OVER TO AVOID DOWNTIME"
# Create a secondary master environment
create_environment $MASTER_ALT_ENV
swap_environment $MASTER_ALT_ENV $ENV_NAME
else
# If secondary environment has already been used as production,
# use the main environment for swapping
ENV_URL=($(aws elasticbeanstalk describe-environments \
--environment-names $ENV_NAME | jq -r '.Environments[].CNAME'))
if [[ $ENV_URL == "${ENV_NAME}."* ]]; then
echo "UPDATING SECONDARY ENVIRONMENT (${MASTER_ALT_ENV})"
update_environment $MASTER_ALT_ENV
swap_environment $MASTER_ALT_ENV $ENV_NAME
else
echo "UPDATING SECONDARY ENVIRONMENT (${ENV_NAME})"
update_environment $ENV_NAME
swap_environment $ENV_NAME $MASTER_ALT_ENV
fi
fi
else
# Not production, just update
update_environment $ENV_NAME
fi
UPDATED=1
echo "SUCCESSFULLY UPDATED ENVIRONMENT"
else
echo "ENVIRONMENT IS NOT READY, TRY AGAIN LATER"
# Clean up
aws s3 rm s3://${S3_BUCKET}/$S3_BUCKET_FILE >/dev/null 2>&1
fi
fi
fi
# Clean up old app files
if [ "$S3_DELETE" -eq 1 ] && [ "$UPDATED" -eq 1 ]; then
echo "TRY TO DELETE OLD S3 FILES($S3_DELETE_DAYS_OLD days old) at s3://${S3_BUCKET}/${S3_BUCKET_DIR}"
./delete-s3.sh "s3://${S3_BUCKET}/$S3_BUCKET_DIR" "${S3_DELETE_DAYS_OLD} days"
fi
if [ "$UPDATED" -eq 1 ]; then
# Get environment URL
ENV_URL=($(aws elasticbeanstalk describe-environments \
--environment-names $ENV_NAME | jq -r '.Environments[].CNAME'))
echo "LATEST BUILD NUMBER IS: ${BUILD_NUMBER}"
echo "ENVIRONMENT WILL BE SHORTLY AT: http://${ENV_URL}"
# Open in browser
if [ "$OPEN_IN_BROWSER_AFTER_UPDATE" -eq 1 ]; then
open http://$ENV_URL
fi
fi
end
| true
|
0cf562c738de725b4bb26a3cc7d94826e0b2231f
|
Shell
|
sdey-sag/sagdevops-ansible-roles
|
/provision-connx-from-s3/templates/install_products.sh.j2
|
UTF-8
| 1,485
| 3.75
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
echo "Begin $0"
THIS=`basename $0`
THISDIR=`dirname $0`; THISDIR=`cd $THISDIR;pwd`
THIS_HOSTNAME=`hostname --long`
echo "Current hostname: $THIS_HOSTNAME"
echo "Current folder: $THISDIR"
## load utils functions
function replace_global {
local pattern=$1;
local replacement=$2;
local file=$3;
sed -i 's#'"$pattern"'#'"$replacement"'#g' $file
}
## set params from ansible variable values
__work_dir="{{ webmethods_localrepo_target_dir }}"
if [ "x$__work_dir" == "x" ]; then
echo "[ERROR!! __work_dir value is not set ]"
exit 2;
fi
__installer_target_path="{{ webmethods_install_target_dir | default('/opt/softwareag', true) }}"
__installer_script_path="${__work_dir}/responseFile"
__installer_script_path_runtime="${__installer_script_path}.runtime"
#create a new __installer_script_path file before the actual in-text replacement so we always re-create a new file before execution
cp -rf ${__installer_script_path} ${__installer_script_path_runtime}
## replace values in the webMethods Installer script
replace_global "{{ '{{' }} __installer_target_path {{ '}}' }}" "${__installer_target_path}" "${__installer_script_path_runtime}"
echo "Before install - target install dir: $__installer_target_path"
ls -al $__installer_target_path
## run installer
cat ${__installer_script_path_runtime} | /bin/sh ${__work_dir}/installclient
echo "After install - target install dir: $__installer_target_path"
ls -al $__installer_target_path
echo DONE!!
| true
|
a367dc1476159ad6f27dabf46f2751a58c6672f1
|
Shell
|
intelfx/bin
|
/misc/style-devel/colors
|
UTF-8
| 1,221
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# Source: http://crunchbang.org/forums/viewtopic.php?pid=137566#p137566
colors=($(xrdb -query | sed -n 's/.*color\([0-9]\)/\1/p' | sort -nu | cut -f2))
echo -e "\e[1;37m
BLK RED GRN YEL BLU MAG CYN WHT
────────────────────────────────────────────────────────────────────────────────────────
\e[0m"
for i in {0..7}; do echo -en "\e[$((30+i))m █ $(printf "%7s" "color$i") \e[0m"; done; echo
for i in {8..15}; do echo -en "\e[$((90+i-8))m █ $(printf "%7s" "color$i") \e[0m"; done; echo
echo ""
for i in {0..7}; do echo -en "\e[1;$((30+i))m █ $(printf "%7s" "bold$i") \e[0m"; done; echo
for i in {8..15}; do echo -en "\e[1;$((90+i-8))m █ $(printf "%7s" "bold$i") \e[0m"; done; echo
echo -e "\e[1;37m
────────────────────────────────────────────────────────────────────────────────────────\e[0m"
| true
|
fde50d5b4be7c9fec41de570588025acb4289ea3
|
Shell
|
awcip/docker-autodl-irssi
|
/download.sh
|
UTF-8
| 935
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Download torrent file and check if it already exists
filterName=$1
Category=$2
TrackerShort=$3
TorrentName=$4
TorrentUrl=$5
echo "$filterName $Category $TrackerShort $TorrentName $TorrentUrl" \
>> "/home/user/download.log"
file="/autotorrent/$Category/$TorrentName-$TrackerShort.torrent"
wget $TorrentUrl \
--output-document="$file" \
--append-output="/home/user/download.log" \
--tries 3 \
--no-verbose
filehead=$(head -c 15 "$file")
if [[ "$filehead" == *"announce"* ]]; then
echo "Good filehead: $filehead" >> "/home/user/download.log"
else
echo "Bad filehead: $filehead" >> "/home/user/download.log"
echo "Removing $file" >> "/home/user/download.log"
rm -f "$file"
exit
fi
flock -s /autotorrent/autotorrent.lock \
autotorrent-env/bin/autotorrent \
--config /autotorrent/autotorrent.conf --client deluge \
-d -a "$file" \
>> /home/user/download.log
| true
|
fbd748a3cfdac1a54a2f3c8df026efb1a00784a0
|
Shell
|
booleangate/dotfiles
|
/configure.sh
|
UTF-8
| 1,695
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
. ~/.bash_functions
configure_deps() {
if is_mac; then
if ! has_bin 'brew'; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
has_bin 'node' || brew install node
if [ ! -d /usr/local/opt/coreutils ]; then
brew install coreutils
fi
fi
}
configure_git() {
if [ ! -e ~/.ssh/id_github ]; then
echo 'Install ~/.ssh/id_github first'
exit
fi
eval `ssh-agent -s`
ssh-add -k ~/.ssh/id_github
}
configure_npm_packages() {
echo 'sudo npm install -g ...'
sudo npm install -g eslint omnivore-io/eslint-config.git gulp prettyjson
}
configure_vim() {
mkdir -p ~/.vim/tmp
mkdir -p ~/.vim/bundle
mkdir -p ~/.vim/colors
install_vim_vundle
vim +PluginInstall +qall
install_vim_ycm
}
install_vim_vundle() {
[ ! -d ~/.vim/bundle/Vundle.vim ] && git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim
cd ~/.vim/bundle/Vundle.vim
git pull
cd -
}
install_vim_ycm() {
## see https://github.com/Valloric/YouCompleteMe#full-installation-guide
# Uncomment this business if you need C-family auto-completion
if ! command -v cmake >/dev/null; then
if is_mac; then
brew install cmake
else
sudo apt-get install cmake
fi
fi
#mkdir /tmp/ycm_build
#cd /tmp/ycm_build
#cmake -G "Unix Makefiles" . ~/.vim/bundle/YouCompleteMe/third_party/ycmd/cpp
# JavaScript
~/.vim/bundle/YouCompleteMe/install.py --tern-completer
cd ~
}
configure_deps
configure_git
configure_npm_packages
configure_vim
| true
|
03dc158f6e2749c6d73e8d9e75efbe6396d67c66
|
Shell
|
fredlim/canvas
|
/tools/scripts/canvas-build-debug.sh
|
UTF-8
| 2,342
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cd ../../packages/canvas/src-native/canvas-ios
set -e
DEV_TEAM=${DEVELOPMENT_TEAM:-}
DIST="CanvasNative/Dist"
mkdir -p $DIST
echo "Cleanup"
xcodebuild -project CanvasNative.xcodeproj -target "CanvasNative" -configuration Debug clean
echo "Building for iphone simulator"
xcodebuild archive -project CanvasNative.xcodeproj \
-scheme "CanvasNative" \
-configuration Debug \
-arch x86_64 \
-sdk iphonesimulator \
-quiet \
DEVELOPMENT_TEAM=$DEV_TEAM \
SKIP_INSTALL=NO \
BUILD_LIBRARY_FOR_DISTRIBUTION=YES \
-archivePath $DIST/CanvasNative.iphonesimulator.xcarchive
echo "Building for ARM64 device"
xcodebuild archive -project CanvasNative.xcodeproj \
-scheme "CanvasNative" \
-configuration Debug \
-arch arm64 \
-sdk iphoneos \
-quiet \
DEVELOPMENT_TEAM=$DEV_TEAM \
SKIP_INSTALL=NO \
BUILD_LIBRARY_FOR_DISTRIBUTION=YES \
-archivePath $DIST/CanvasNative.iphoneos.xcarchive
echo "Creating CanvasNative.xcframework"
OUTPUT_DIR="$DIST/CanvasNative.xcframework"
rm -rf $OUTPUT_DIR
xcodebuild -create-xcframework \
-framework "$DIST/CanvasNative.iphonesimulator.xcarchive/Products/Library/Frameworks/CanvasNative.framework" \
-framework "$DIST/CanvasNative.iphoneos.xcarchive/Products/Library/Frameworks/CanvasNative.framework" \
-output "$OUTPUT_DIR"
DSYM_OUTPUT_DIR="$DIST/CanvasNative.framework.dSYM"
cp -r "$DIST/CanvasNative.iphoneos.xcarchive/dSYMs/CanvasNative.framework.dSYM/" $DSYM_OUTPUT_DIR
lipo -create \
"$DIST/CanvasNative.iphonesimulator.xcarchive/dSYMs/CanvasNative.framework.dSYM/Contents/Resources/DWARF/CanvasNative" \
"$DIST/CanvasNative.iphoneos.xcarchive/dSYMs/CanvasNative.framework.dSYM/Contents/Resources/DWARF/CanvasNative" \
-output "$DSYM_OUTPUT_DIR/Contents/Resources/DWARF/CanvasNative"
pushd $DIST
zip -qr "CanvasNative.framework.dSYM.zip" "CanvasNative.framework.dSYM"
rm -rf "CanvasNative.framework.dSYM"
popd
rm -rf "$DIST/CanvasNative.iphonesimulator.xcarchive"
rm -rf "$DIST/CanvasNative.iphoneos.xcarchive"
| true
|
487bccd541fb6fb8a95b88986fa585dd58a7ad15
|
Shell
|
Rainynite25/playbook
|
/ssh_scripts.sh
|
UTF-8
| 602
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#1生成秘钥
if [ -f /root/.ssh/id_rsa ]
then
echo "密钥已存在"
else
ssh-keygen -t dsa -f /root/.ssh/id_rsa -N '' &>/dev/null
echo "密钥已创建"
fi
for ip in {31,41,7,8}
do
echo “=====================================”
#2分发秘钥
sshpass -p '1' ssh-copy-id 172.16.1.${ip} -o StrictHostKeyChecking=no &>/dev/null
if [ $?==0 ]
then
echo "密钥分发完成:172.16.1.${ip}"
else
echo "密钥分发失败"
fi
#3测试
ssh 172.16.1.${ip} hostname
if [ $?==0 ]
then
echo "success!"
else
echo "fail!"
fi
done
| true
|
6e04e39668482e90c98a830988c2e4d36a3b19e8
|
Shell
|
cinaeco/dotfiles
|
/shell/common/proxy.sh
|
UTF-8
| 365
| 3.53125
| 4
|
[] |
no_license
|
# Set proxy environment variables
function setproxy() {
proxy_address=${1:-} # default value of nothing
export HTTP_PROXY=$proxy_address
export HTTPS_PROXY=$proxy_address
export FTP_PROXY=$proxy_address
export http_proxy=$proxy_address
export https_proxy=$proxy_address
export ftp_proxy=$proxy_address
echo "Proxy envvars set to '$proxy_address'"
}
| true
|
00b1b02ca37156c37a2d4dd3bc44b86f95b998ef
|
Shell
|
kamigaito/SLAHAN
|
/scripts/predict/google/predict.sh
|
UTF-8
| 10,314
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -u
DATADIR=${PWD}/dataset/conv
MODELDIR=${PWD}/models
BINDIR=${PWD}/build_compressor
SCRIPTSDIR=${PWD}/scripts/tools
RESULTSDIR=${PWD}/results
if [ ! -d ${RESULTSDIR} ]; then
mkdir ${RESULTSDIR}
fi
# models
NAMES=("lstm")
ROOTDIRS=("${MODELDIR}/lstm")
BINS=(${BINDIR}/lstm)
NAMES=("${NAMES[@]}" "lstm-dep")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/lstm-dep")
BINS=("${BINS[@]}" ${BINDIR}/lstm)
NAMES=("${NAMES[@]}" "attn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/attn")
BINS=("${BINS[@]}" ${BINDIR}/attn)
NAMES=("${NAMES[@]}" "tagger")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/tagger")
BINS=("${BINS[@]}" ${BINDIR}/tagger)
NAMES=("${NAMES[@]}" "base")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/base")
BINS=("${BINS[@]}" ${BINDIR}/base)
NAMES=("${NAMES[@]}" "slahan_w_syn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/slahan_w_syn")
BINS=("${BINS[@]}" ${BINDIR}/slahan)
NAMES=("${NAMES[@]}" "parent_w_syn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/parent_w_syn")
BINS=("${BINS[@]}" ${BINDIR}/slahan)
NAMES=("${NAMES[@]}" "child_w_syn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/child_w_syn")
BINS=("${BINS[@]}" ${BINDIR}/slahan)
NAMES=("${NAMES[@]}" "slahan_wo_syn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/slahan_wo_syn")
BINS=("${BINS[@]}" ${BINDIR}/slahan)
NAMES=("${NAMES[@]}" "parent_wo_syn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/parent_wo_syn")
BINS=("${BINS[@]}" ${BINDIR}/slahan)
NAMES=("${NAMES[@]}" "child_wo_syn")
ROOTDIRS=("${ROOTDIRS[@]}" "${MODELDIR}/child_wo_syn")
BINS=("${BINS[@]}" ${BINDIR}/slahan)
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/dev_google_p.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/dev_google_r.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/dev_google_f.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/dev_google_cr.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/dev_google_em.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_p.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_r.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_f.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_cr.csv
# For all sentences
i=0
for name in "${NAMES[@]}"; do
NAME=${NAMES[$i]}
BIN=${BINS[$i]}
echo ${NAME}
echo ${BIN}
if [ ! -d ${ROOTDIRS[$i]}_0 ]; then
let i++
continue
fi
for model_id in `seq 0 2`; do
ROOTDIR=${ROOTDIRS[$i]}_${model_id}
echo ${ROOTDIR}
max_em=0
max_id=0
for cur_id in `seq 1 20`; do
if [ ! -e ${ROOTDIR}/dev_${cur_id}.txt ]; then
continue
fi
cur_em=$(python ${SCRIPTSDIR}/eval_overlap_macro.py -i ${DATADIR}/dev.cln.sent -r ${DATADIR}/dev.cln.label -s ${ROOTDIR}/dev_${cur_id}.txt |grep ^Exact |sed 's/ //g'|cut -d':' -f 2)
gt=`echo ${cur_em}" > "${max_em} |bc -l`
#echo ${cur_id}
#echo ${cur_em}" > "${max_em}
if [ ${gt} == 1 ]; then
max_id=${cur_id}
max_em=${cur_em}
fi
done
# output
cp ${ROOTDIR}/dev_${max_id}.txt ${ROOTDIR}/dev.label
echo ${max_id} > ${ROOTDIR}/dev.info
python ${SCRIPTSDIR}/eval_overlap_macro.py -i ${DATADIR}/dev.cln.sent -r ${DATADIR}/dev.cln.label -s ${ROOTDIR}/dev_${max_id}.txt >> ${ROOTDIR}/dev.info
# decode
time ${BIN} \
--mode predict \
--rootdir ${ROOTDIR} \
--modelfile <(gzip -dc ${ROOTDIR}/save_epoch_${max_id}.model.gz) \
--srcfile ${DATADIR}/test.cln.sent \
--trgfile ${ROOTDIR}/test_result_greedy \
--alignfile ${DATADIR}/test.cln.dep \
--elmo_hdf5_files ${DATADIR}/test.cln.strip.sent.glove.hdf5,${DATADIR}/test.cln.strip.sent.elmo.hdf5,${DATADIR}/test.cln.strip.sent.bert.hdf5 \
--elmo_hdf5_dims 300,1024,768 \
--elmo_hdf5_layers 1,3,12 \
--guided_alignment 1 \
--max_batch_pred 16 \
--sort_sent_type_pred "sort_default" \
--batch_type_pred "same_length" \
--shuffle_batch_type_pred "default" \
--decoder_type "greedy" \
--beam_size 1
done
for model_id in `seq 0 2`; do
ROOTDIR=${ROOTDIRS[$i]}_${model_id}
python ${SCRIPTSDIR}/eval_overlap_macro.py -i ${DATADIR}/test.cln.sent -r ${DATADIR}/test.cln.label -s ${ROOTDIR}/test_result_greedy.sents > ${ROOTDIR}/test.info &
python ${SCRIPTSDIR}/eval_overlap_macro.py -i ${DATADIR}/test.cln.sent -r ${DATADIR}/test.cln.label -s ${ROOTDIR}/test_result_greedy.sents -a > ${ROOTDIR}/test.all &
done
wait
echo -n ${NAME} >> ${RESULTSDIR}/dev_google_p.csv
echo -n ${NAME} >> ${RESULTSDIR}/dev_google_r.csv
echo -n ${NAME} >> ${RESULTSDIR}/dev_google_f.csv
echo -n ${NAME} >> ${RESULTSDIR}/dev_google_cr.csv
echo -n ${NAME} >> ${RESULTSDIR}/dev_google_em.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_p.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_r.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_f.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_cr.csv
for model_id in `seq 0 2`; do
ROOTDIR=${ROOTDIRS[$i]}_${model_id}
dev_p=$(cat ${ROOTDIR}/dev.info |grep '^P' |sed 's/ //g'|cut -d':' -f 2)
dev_r=$(cat ${ROOTDIR}/dev.info |grep '^R' |sed 's/ //g'|cut -d':' -f 2)
dev_f=$(cat ${ROOTDIR}/dev.info |grep '^F' |sed 's/ //g'|cut -d':' -f 2)
dev_cr=$(cat ${ROOTDIR}/dev.info |grep '^C' |sed 's/ //g'|cut -d':' -f 2)
dev_em=$(cat ${ROOTDIR}/dev.info |grep '^E' |sed 's/ //g'|cut -d':' -f 2)
test_p=$(cat ${ROOTDIR}/test.info |grep '^P' |sed 's/ //g'|cut -d':' -f 2)
test_r=$(cat ${ROOTDIR}/test.info |grep '^R' |sed 's/ //g'|cut -d':' -f 2)
test_f=$(cat ${ROOTDIR}/test.info |grep '^F' |sed 's/ //g'|cut -d':' -f 2)
test_cr=$(cat ${ROOTDIR}/test.info |grep '^C' |sed 's/ //g'|cut -d':' -f 2)
echo -n ",${dev_p}" >> ${RESULTSDIR}/dev_google_p.csv
echo -n ",${dev_r}" >> ${RESULTSDIR}/dev_google_r.csv
echo -n ",${dev_f}" >> ${RESULTSDIR}/dev_google_f.csv
echo -n ",${dev_cr}" >> ${RESULTSDIR}/dev_google_cr.csv
echo -n ",${dev_em}" >> ${RESULTSDIR}/dev_google_em.csv
echo -n ",${test_p}" >> ${RESULTSDIR}/test_google_p.csv
echo -n ",${test_r}" >> ${RESULTSDIR}/test_google_r.csv
echo -n ",${test_f}" >> ${RESULTSDIR}/test_google_f.csv
echo -n ",${test_cr}" >> ${RESULTSDIR}/test_google_cr.csv
done
dev_p=$(tail -n 1 ${RESULTSDIR}/dev_google_p.csv |awk -F"," '{print ($2+$3+$4) / 3}')
dev_r=$(tail -n 1 ${RESULTSDIR}/dev_google_r.csv |awk -F"," '{print ($2+$3+$4) / 3}')
dev_f=$(tail -n 1 ${RESULTSDIR}/dev_google_f.csv |awk -F"," '{print ($2+$3+$4) / 3}')
dev_cr=$(tail -n 1 ${RESULTSDIR}/dev_google_cr.csv |awk -F"," '{print ($2+$3+$4) / 3}')
dev_em=$(tail -n 1 ${RESULTSDIR}/dev_google_em.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_p=$(tail -n 1 ${RESULTSDIR}/test_google_p.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_r=$(tail -n 1 ${RESULTSDIR}/test_google_r.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_f=$(tail -n 1 ${RESULTSDIR}/test_google_f.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_cr=$(tail -n 1 ${RESULTSDIR}/test_google_cr.csv |awk -F"," '{print ($2+$3+$4) / 3}')
echo ",${dev_p}" >> ${RESULTSDIR}/dev_google_p.csv
echo ",${dev_r}" >> ${RESULTSDIR}/dev_google_r.csv
echo ",${dev_f}" >> ${RESULTSDIR}/dev_google_f.csv
echo ",${dev_cr}" >> ${RESULTSDIR}/dev_google_cr.csv
echo ",${dev_em}" >> ${RESULTSDIR}/dev_google_em.csv
echo ",${test_p}" >> ${RESULTSDIR}/test_google_p.csv
echo ",${test_r}" >> ${RESULTSDIR}/test_google_r.csv
echo ",${test_f}" >> ${RESULTSDIR}/test_google_f.csv
echo ",${test_cr}" >> ${RESULTSDIR}/test_google_cr.csv
let i++
done
# For long sentences
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_long_p.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_long_r.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_long_f.csv
echo "Model,1,2,3,AVG" > ${RESULTSDIR}/test_google_long_cr.csv
i=0
for name in "${NAMES[@]}"; do
NAME=${NAMES[$i]}
BIN=${BINS[$i]}
echo ${NAME}
echo ${BIN}
if [ ! -e ${ROOTDIRS[$i]}_0 ]; then
let i++
continue
fi
for model_id in `seq 0 2`; do
ROOTDIR=${ROOTDIRS[$i]}_${model_id}
python ${SCRIPTSDIR}/eval_overlap_macro.py -l 30 -i ${DATADIR}/test.cln.sent -r ${DATADIR}/test.cln.label -s ${ROOTDIR}/test_result_greedy.sents > ${ROOTDIR}/test.long.info &
python ${SCRIPTSDIR}/eval_overlap_macro.py -l 30 -i ${DATADIR}/test.cln.sent -r ${DATADIR}/test.cln.label -s ${ROOTDIR}/test_result_greedy.sents -a > ${ROOTDIR}/test.long.all &
done
wait
echo -n ${NAME} >> ${RESULTSDIR}/test_google_long_p.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_long_r.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_long_f.csv
echo -n ${NAME} >> ${RESULTSDIR}/test_google_long_cr.csv
for model_id in `seq 0 2`; do
ROOTDIR=${ROOTDIRS[$i]}_${model_id}
test_p=$(cat ${ROOTDIR}/test.long.info |grep '^P' |sed 's/ //g'|cut -d':' -f 2)
test_r=$(cat ${ROOTDIR}/test.long.info |grep '^R' |sed 's/ //g'|cut -d':' -f 2)
test_f=$(cat ${ROOTDIR}/test.long.info |grep '^F' |sed 's/ //g'|cut -d':' -f 2)
test_cr=$(cat ${ROOTDIR}/test.long.info |grep '^C' |sed 's/ //g'|cut -d':' -f 2)
echo -n ",${test_p}" >> ${RESULTSDIR}/test_google_long_p.csv
echo -n ",${test_r}" >> ${RESULTSDIR}/test_google_long_r.csv
echo -n ",${test_f}" >> ${RESULTSDIR}/test_google_long_f.csv
echo -n ",${test_cr}" >> ${RESULTSDIR}/test_google_long_cr.csv
done
test_p=$(tail -n 1 ${RESULTSDIR}/test_google_long_p.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_r=$(tail -n 1 ${RESULTSDIR}/test_google_long_r.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_f=$(tail -n 1 ${RESULTSDIR}/test_google_long_f.csv |awk -F"," '{print ($2+$3+$4) / 3}')
test_cr=$(tail -n 1 ${RESULTSDIR}/test_google_long_cr.csv |awk -F"," '{print ($2+$3+$4) / 3}')
echo ",${test_p}" >> ${RESULTSDIR}/test_google_long_p.csv
echo ",${test_r}" >> ${RESULTSDIR}/test_google_long_r.csv
echo ",${test_f}" >> ${RESULTSDIR}/test_google_long_f.csv
echo ",${test_cr}" >> ${RESULTSDIR}/test_google_long_cr.csv
let i++
done
| true
|
d4d922b9b11374e674377cd5001cd6a56ae73cf2
|
Shell
|
eugenestarchenko/automation-for-the-people
|
/infrastructure/init.sh
|
UTF-8
| 696
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash -e
echo "enter the name of the artifacts bucket that is created for this application. Name must be globally unique (e.g. michael-aftp)"
read ARTIFACT_BUCKET_NAME
echo "enter a personal GitHub access token. Generate one here: https://github.com/settings/tokens (only public_repo access needed)"
read GITHUB_OAUTH_TOKEN
aws --region eu-west-1 s3 mb "s3://${ARTIFACT_BUCKET_NAME}"
aws --region eu-west-1 cloudformation create-stack --stack-name "aftp-pipeline" --template-body file://pipeline.yaml --parameters "ParameterKey=ArtifactsBucketName,ParameterValue=${ARTIFACT_BUCKET_NAME}" "ParameterKey=GitHubOAuthToken,ParameterValue=${GITHUB_OAUTH_TOKEN}" --capabilities CAPABILITY_IAM
| true
|
481bc324f836c0ae2c3717ec32ec25afb98e119b
|
Shell
|
SrinivasPithani/devops
|
/bash/find_files.sh
|
UTF-8
| 116
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
LOGS=`mktemp -d /tmp/outputXXXX`
touch $LOGS/$$
ls -l $LOGS
sleep 10
sleep 5
trap "rm -rf $LOGS/" INT
| true
|
29840749dc927d6f4aab12cb4b975cb35cb18ff0
|
Shell
|
Nokorot/MyConfigs
|
/home/.scripts/i3cmds/i3resize
|
UTF-8
| 576
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# This script was made by `goferito` on Github.
[ -z "$1" ] && echo "No direction provided" && exit 1
distanceStr=$( [ -z "$2"] && "50px or 2ppt" || echo "$2" )
moveChoice() {
if [ $(i3-msg resize "$1" "$2" "$distanceStr" | grep '"success":true') ];
then i3-msg resize "$1" "$4" "$distanceStr"
else i3-msg resize "$3" "$4" "$distanceStr"
fi
}
case $1 in
up) moveChoice grow up shrink down ;;
down) moveChoice shrink up grow down ;;
left) moveChoice grow left shrink right ;;
right) moveChoice shrink left grow right ;;
esac
| true
|
e559cc8ccde9675debb9bc93fa778c7e2e24fe86
|
Shell
|
i2p/i2p.scripts
|
/source/git-reviewmissing.sh
|
UTF-8
| 408
| 2.875
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#! /usr/bin/env sh
#
# view the changes in the current workspace for files NOT listed
# in filelist.txt; this is a good way to make sure you're not
# forgetting something that should be in filelist.txt before you
# check it in
# zzz 2008-10
#
# Re-written for git.
# idk 2020-10
git diff `cat filelist.txt` > out.diff
git diff > all.diff
diff all.diff out.diff | cut -c3- > missing.diff
$EDITOR missing.diff
| true
|
2c8b4311b16455f8ec9ef2896e6699d4b63515f6
|
Shell
|
leobudima/custom-ubuntu-setup
|
/install-defaults.sh
|
UTF-8
| 4,637
| 2.8125
| 3
|
[] |
no_license
|
bold=$(tput bold)
normal=$(tput sgr0)
echo ""
echo "${bold}Set ubuntu mirror to fi${normal}"
echo ""
sudo sed -i 's/us.archive/fi.archive/' /etc/apt/sources.list
echo ""
echo "${bold}Install nodejs 12 repo${normal}"
echo ""
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
##
echo ""
sudo echo "${bold}Install defaults${normal}"
echo ""
sudo apt-get install -y zsh \
git \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
python3 \
python3-pip \
neovim \
python-dev \
build-essential \
cmake \
xsel \
rlwrap \
gnome-tweak-tool \
tilix \
code \
shellcheck \
redshift \
snapd \
stow \
nodejs \
chrome-gnome-shell \
xclip \
copyq \
flameshot \
silversearcher-ag \
powerline \
tmux \
ranger \
libxext-dev \
atool \
caca-utils \
highlight \
w3m \
poppler-utils \
mediainfo \
ripgrep \
zathura
##
echo ""
echo "${bold}Install go${normal}"
echo ""
sudo snap install --classic go
##
echo ""
echo "${bold}Setup tilix as default and get default config${normal}"
echo ""
sudo update-alternatives --config x-terminal-emulator
echo ""
echo "${bold}Install cheat${normal}"
echo ""
curl https://cht.sh/:cht.sh | sudo tee /usr/local/bin/cheat
sudo chmod +x /usr/local/bin/cheat
##
echo ""
echo "${bold}Install google-chrome${normal}"
echo ""
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -O ~/chrome.deb
sudo dpkg -i ~/chrome.deb
rm ~/chrome.deb
##
echo ""
echo "${bold}Install discord${normal}"
echo ""
wget "https://discordapp.com/api/download?platform=linux&format=deb" -O ~/discord.deb
sudo dpkg -i ~/discord.deb
rm ~/discord.deb
##
echo ""
echo "${bold}Install docker + docker-compose${normal}"
echo ""
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
bionic \
stable"
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
##
echo ""
echo "${bold}Check latest version at https://docs.docker.com/compose/install/${normal}"
echo ""
sudo curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
#sudo chmod +x /usr/local/bin/docker-compose
#sudo usermod -aG docker $USER
#newgrp docker
##
echo ""
echo "${bold}Install albert${normal}"
echo ""
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/manuelschneid3r/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/home:manuelschneid3r.list"
wget -nv https://download.opensuse.org/repositories/home:manuelschneid3r/xUbuntu_20.04/Release.key -O Release.key
sudo apt-key add - < Release.key
rm Release.key
sudo apt-get update
sudo apt-get instal -y albert
##
echo ""
echo "${bold}Install gnome templates${normal}"
echo ""
cp ./files/Templates.tar.gz ~/Templates.tar.gz
tar -xzvf ~/Templates.tar.gz
rm ~/Templates.tar.gz
##
echo ""
echo "${bold}Install tmux plugin manager${normal}"
echo ""
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
##
echo ""
echo "${bold}Install ueberzug (plugin for image preview in ranger)${normal}"
echo ""
sudo pip3 install ueberzug
##
echo ""
echo "${bold}Install ranger icons${normal}"
echo ""
git clone https://github.com/alexanderjeurissen/ranger_devicons ~/.config/ranger/plugins/ranger_devicons
##
echo ""
echo "${bold}Install lazygit${normal}"
echo ""
sudo add-apt-repository ppa:lazygit-team/release -y
sudo apt install -y lazygit
##
echo ""
echo "${bold}Set cursor repeat rate (xset r rate 210 30) (add also to .profile)${normal}"
echo ""
xset r rate 210 30
##
echo ""
echo "${bold}(copied to clipboard) run manually sudo chmod +x /usr/local/bin/docker-compose && sudo usermod -aG docker $USER && newgrp docker${normal}"
echo ""
echo "sudo chmod +x /usr/local/bin/docker-compose && sudo usermod -aG docker \$USER && newgrp docker" | xclip -sel clip
| true
|
1270f30514e37eaee094b17e874cb205f9d31955
|
Shell
|
luca992/sqldelight
|
/.buildscript/before_linux.sh
|
UTF-8
| 570
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
# Update tools so that --licenses works
yes | sdkmanager tools
# Install SDK license so Android Gradle plugin can install deps.
mkdir "$ANDROID_HOME/licenses" || true
yes | sdkmanager --licenses
# Install the system image
sdkmanager "system-images;android-18;default;armeabi-v7a"
# Create and start emulator for the script. Meant to race the install task.
echo no | avdmanager create avd --force -n test -k "system-images;android-18;default;armeabi-v7a"
$ANDROID_HOME/emulator/emulator -avd test -no-audio -no-window -gpu swiftshader_indirect &
| true
|
5885083fc7f93e2e5ac4be50dfcc9ec2159b8ac7
|
Shell
|
kg2280/openvpn
|
/create_vpn_server.sh
|
UTF-8
| 3,396
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
## Scrip to create an openvpn server
## Coded by: Kevin Gagne
## Date: 21 Sept 2017
##
VPN_NAME=mastervpn
## Check if there is an argument passed. If so, set the $VPN_NAME variable
if (( $#==1 ))
then
VPN_NAME=$1
echo "VPN_NAME will be $VPN_NAME"
fi
## Check if there is already a config this vpn.
if [ -d /etc/openvpn/$VPN_NAME ]
then
echo "There is already a config under /etc/openvpn/$VPN_NAME. Delete the directory first. rm -rf /etc/openvpn/$VPN_NAME"
echo "Or you can pass an other name to the vpn as an argument to the script"
echo "Ex. $0 vpn2"
exit
fi
## Check if openvpn is installed. If not, install it
if [[ `dpkg -s openvpn | grep Status` =~ "Status: install ok installed" ]]
then
echo "OpenVPN already installed"
else
echo "Installing OpenVPN"
apt-get update && apt-get dist-upgrade -y && install openvpn easy-rsa ntp ntpdate -y
fi
## Create vpn folder and download openvpn_server.conf.
mkdir -p /etc/openvpn/$VPN_NAME
cp ./openvpn_server.conf /etc/openvpn/$VPN_NAME.conf
## Enable ip forward
echo 1 > /proc/sys/net/ipv4/ip_forward
sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
## Create RSA working directory
make-cadir /etc/openvpn/$VPN_NAME/easy-rsa && cd /etc/openvpn/$VPN_NAME/easy-rsa/
ln -s /etc/openvpn/$VPN_NAME/easy-rsa/openssl-1.0.0.cnf /etc/openvpn/$VPN_NAME/easy-rsa/openssl.cnf
## Setup RSA vars
sed -i 's/export KEY_SIZE=.*/export KEY_SIZE=4096/g' /etc/openvpn/$VPN_NAME/easy-rsa/vars
sed -i 's/export KEY_COUNTRY=.*/export KEY_COUNTRY="CA"/g' /etc/openvpn/$VPN_NAME/easy-rsa/vars
sed -i 's/export KEY_PROVINCE=.*/export KEY_PROVINCE="QC"/g' /etc/openvpn/$VPN_NAME/easy-rsa/vars
sed -i 's/export KEY_CITY=.*/export KEY_CITY="MTL"/g' /etc/openvpn/$VPN_NAME/easy-rsa/vars
sed -i 's/export KEY_ORG=.*/export KEY_ORG="IT"/g' /etc/openvpn/$VPN_NAME/easy-rsa/vars
sed -i 's/export KEY_EMAIL=.*/export KEY_EMAIL="noc@cdmsfirst.com"/g' /etc/openvpn/$VPN_NAME/easy-rsa/vars
## Setup key, cert and dh path in server.conf
sed -i "s|ca /etc/openvpn/mastervpn/easy-rsa/keys/ca.crt|ca /etc/openvpn/$VPN_NAME/easy-rsa/keys/ca.crt|g" /etc/openvpn/$VPN_NAME.conf
sed -i "s|cert /etc/openvpn/mastervpn/easy-rsa/keys/vpn.crt|cert /etc/openvpn/$VPN_NAME/easy-rsa/keys/server.crt|g" /etc/openvpn/$VPN_NAME.conf
sed -i "s|key /etc/openvpn/mastervpn/easy-rsa/keys/vpn.key|key /etc/openvpn/mastervpn/easy-rsa/keys/server.key|g" /etc/openvpn/$VPN_NAME.conf
sed -i "s|dh /etc/openvpn/mastervpn/easy-rsa/keys/dh4096.pem|dh /etc/openvpn/$VPN_NAME/easy-rsa/keys/dh4096.pem|g" /etc/openvpn/$VPN_NAME.conf
sed -i "s|tls-auth /etc/openvpn/mastervpn/easy-rsa/keys/ta.key|tls-auth /etc/openvpn/$VPN_NAME/easy-rsa/keys/ta.key|g" /etc/openvpn/$VPN_NAME.conf
## Source the vars script and delete all key
cd /etc/openvpn/$VPN_NAME/easy-rsa && source ./vars
## Build the keys directory
./clean-all
## Generate Diffie-Hellman
openssl dhparam 4096 > /etc/openvpn/$VPN_NAME/easy-rsa/keys/dh4096.pem
## Generate the HMAC key file
openvpn --genkey --secret /etc/openvpn/$VPN_NAME/easy-rsa/keys/ta.key
## Build CA
./build-ca
## Create server private key
./build-key-server server
## Create openvpn user so that openvpn does not run under root nor nobody
adduser --system --shell /usr/sbin/nologin --no-create-home openvpn_server
## Create client directory
mkdir -p /etc/openvpn/ccd-datacenter
## Restart openvpn service
service openvpn restart
| true
|
dcc1795ad18a3c9d91ba3dade8c73b6b18dd93f8
|
Shell
|
aligulle1/kuller
|
/pardus/playground/review/server/monitor/nagios-plugins/files/12_check_snmp_1.4.15_regression.patch
|
UTF-8
| 7,864
| 2.640625
| 3
|
[] |
no_license
|
#! /bin/sh /usr/share/dpatch/dpatch-run
## 12_check_snmp_1.4.15_regression.dpatch by
## Thomas Guyot-Sionnest <dermoth@aei.ca>
##
## Original patch to make Timeticks works as in check_snmp v1.4.14, it turns
## out is_numeric isn't so useful and treating all types as numeric works
## best for backwards-compatibility. This is how it used to work in 1.4.14.
##
## As a special case, I also make calculate_rate look up for numeric values
## as it would otherwise return the last value instead.
##
## DP: Remove that is_numeric madness
@DPATCH@
diff --git a/NEWS b/NEWS
index ff92401..e3e8f37 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,10 @@
This file documents the major additions and syntax changes between releases.
+ ...
+
+ FIXES
+ Make check_snmp work more like v1.4.14 with regard to using special values (Timeticks, STRING) as numeric thresholds.
+
1.4.15 27th July 2010
ENHANCEMENTS
New check_ntp_peer -m and -n options to check the number of usable time sources ("truechimers")
diff --git a/plugins/check_snmp.c b/plugins/check_snmp.c
index f32a26e..d79da8c 100644
--- a/plugins/check_snmp.c
+++ b/plugins/check_snmp.c
@@ -169,7 +169,6 @@ main (int argc, char **argv)
char *state_string=NULL;
size_t response_length, current_length, string_length;
char *temp_string=NULL;
- int is_numeric=0;
time_t current_time;
double temp_double;
time_t duration;
@@ -335,29 +334,24 @@ main (int argc, char **argv)
/* We strip out the datatype indicator for PHBs */
if (strstr (response, "Gauge: ")) {
show = strstr (response, "Gauge: ") + 7;
- is_numeric++;
}
else if (strstr (response, "Gauge32: ")) {
show = strstr (response, "Gauge32: ") + 9;
- is_numeric++;
}
else if (strstr (response, "Counter32: ")) {
show = strstr (response, "Counter32: ") + 11;
- is_numeric++;
is_counter=1;
if(!calculate_rate)
strcpy(type, "c");
}
else if (strstr (response, "Counter64: ")) {
show = strstr (response, "Counter64: ") + 11;
- is_numeric++;
is_counter=1;
if(!calculate_rate)
strcpy(type, "c");
}
else if (strstr (response, "INTEGER: ")) {
show = strstr (response, "INTEGER: ") + 9;
- is_numeric++;
}
else if (strstr (response, "STRING: ")) {
show = strstr (response, "STRING: ") + 8;
@@ -396,15 +390,17 @@ main (int argc, char **argv)
}
}
- else if (strstr (response, "Timeticks: "))
+ else if (strstr (response, "Timeticks: ")) {
show = strstr (response, "Timeticks: ");
+ }
else
show = response;
iresult = STATE_DEPENDENT;
/* Process this block for numeric comparisons */
- if (is_numeric) {
+ /* Make some special values,like Timeticks numeric only if a threshold is defined */
+ if (thlds[i]->warning || thlds[i]->critical || calculate_rate) {
ptr = strpbrk (show, "0123456789");
if (ptr == NULL)
die (STATE_UNKNOWN,_("No valid data returned"));
diff --git a/plugins/t/check_snmp.t b/plugins/t/check_snmp.t
index 004ba1a..25a2999 100644
--- a/plugins/t/check_snmp.t
+++ b/plugins/t/check_snmp.t
@@ -8,7 +8,7 @@ use strict;
use Test::More;
use NPTest;
-my $tests = 8+38+2+2;
+my $tests = 8+42+2+2;
plan tests => $tests;
my $res;
@@ -124,6 +124,13 @@ SKIP: {
cmp_ok( $res->return_code, '==', 0, "Skipping all thresholds");
like($res->output, '/^SNMP OK - \d+ \w+ \d+\s.*$/', "Skipping all thresholds, result printed rather than parsed");
+ $res = NPTest->testCmd( "./check_snmp -H $host_snmp -C $snmp_community -o system.sysUpTime.0 -c 1000000000: -u '1/100 sec'");
+ cmp_ok( $res->return_code, '==', 2, "Timetick used as a threshold");
+ like($res->output, '/^SNMP CRITICAL - \*\d+\* 1\/100 sec.*$/', "Timetick used as a threshold, parsed as numeric");
+
+ $res = NPTest->testCmd( "./check_snmp -H $host_snmp -C $snmp_community -o system.sysUpTime.0");
+ cmp_ok( $res->return_code, '==', 0, "Timetick used as a string");
+ like($res->output, '/^SNMP OK - Timeticks:\s\(\d+\)\s+(?:\d+ days?,\s+)?\d+:\d+:\d+\.\d+\s.*$/', "Timetick used as a string, result printed rather than parsed");
}
# These checks need a complete command line. An invalid community is used so
diff --git a/plugins/tests/check_snmp.t b/plugins/tests/check_snmp.t
index e7ad192..c960f7b 100755
--- a/plugins/tests/check_snmp.t
+++ b/plugins/tests/check_snmp.t
@@ -51,7 +51,10 @@ if ($ARGV[0] && $ARGV[0] eq "-d") {
}
}
-my $tests = 33;
+# We should merge that with $ENV{'NPTEST_CACHE'}, use one dir for all test data
+$ENV{'NAGIOS_PLUGIN_STATE_DIRECTORY'} ||= "/var/tmp";
+
+my $tests = 39;
if (-x "./check_snmp") {
plan tests => $tests;
} else {
@@ -106,7 +109,7 @@ like($res->output, '/'.quotemeta('SNMP OK - And now have fun with with this: \"C
"And now have fun with with this: \"C:\\\\\"
because we\'re not done yet!"').'/m', "Attempt to confuse parser No.3");
-system("rm /usr/local/nagios/var/check_snmp/*");
+system("rm -f ".$ENV{'NAGIOS_PLUGIN_STATE_DIRECTORY'}."/check_snmp/*");
$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
is($res->return_code, 0, "Returns OK");
is($res->output, "No previous data to calculate rate - assume okay");
@@ -170,5 +173,16 @@ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1
is($res->return_code, 0, "OK as string doesn't match but inverted" );
is($res->output, 'SNMP OK - "stringtests" | ', "OK as inverted string no match" );
+$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.12 -w 4:5" );
+is($res->return_code, 1, "Numeric in string test" );
+is($res->output, 'SNMP WARNING - *3.5* | iso.3.6.1.4.1.8072.3.2.67.12=3.5 ', "WARNING threshold checks for string masquerading as number" );
+
+$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.13" );
+is($res->return_code, 0, "Not really numeric test" );
+is($res->output, 'SNMP OK - "87.4startswithnumberbutshouldbestring" | ', "Check string with numeric start is still string" );
+
+$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.14" );
+is($res->return_code, 0, "Not really numeric test (trying best to fool it)" );
+is($res->output, 'SNMP OK - "555\"I said\"" | ', "Check string with a double quote following is still a string (looks like the perl routine will always escape though)" );
diff --git a/plugins/tests/check_snmp_agent.pl b/plugins/tests/check_snmp_agent.pl
index 8784ab1..2ad8516 100644
--- a/plugins/tests/check_snmp_agent.pl
+++ b/plugins/tests/check_snmp_agent.pl
@@ -33,9 +33,9 @@ ends with with this: C:\\';
my $multilin5 = 'And now have fun with with this: "C:\\"
because we\'re not done yet!';
-my @fields = (ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_UNSIGNED, ASN_UNSIGNED, ASN_COUNTER, ASN_COUNTER64, ASN_UNSIGNED, ASN_COUNTER, ASN_OCTET_STR);
-my @values = ($multiline, $multilin2, $multilin3, $multilin4, $multilin5, 4294965296, 1000, 4294965296, uint64("18446744073709351616"), int(rand(2**32)), 64000, "stringtests");
-my @incrts = (undef, undef, undef, undef, undef, 1000, -500, 1000, 100000, undef, 666, undef);
+my @fields = (ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_UNSIGNED, ASN_UNSIGNED, ASN_COUNTER, ASN_COUNTER64, ASN_UNSIGNED, ASN_COUNTER, ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR, ASN_OCTET_STR );
+my @values = ($multiline, $multilin2, $multilin3, $multilin4, $multilin5, 4294965296, 1000, 4294965296, uint64("18446744073709351616"), int(rand(2**32)), 64000, "stringtests", "3.5", "87.4startswithnumberbutshouldbestring", '555"I said"' );
+my @incrts = (undef, undef, undef, undef, undef, 1000, -500, 1000, 100000, undef, 666, undef, undef, undef, undef );
# Number of elements in our OID
my $oidelts;
| true
|
bf52db414f9c76a6cd6e1e6f2647cbddfca01e04
|
Shell
|
openanthem/nimbus-docs
|
/scripts/generate.sh
|
UTF-8
| 2,763
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Set variables
DOCUMENTATION_VERSION="latest"
SOURCE_PATH="src"
ROOT_ADOC_PATH="${SOURCE_PATH}"
ROOT_ADOC_FILENAME="index.adoc"
ROOT_ADOC="${ROOT_ADOC_PATH}/${ROOT_ADOC_FILENAME}"
SUBSITES_DIR="${SOURCE_PATH}/pages"
RELEASE_NOTES_DIR="${SOURCE_PATH}/pages/release-notes/pages"
RESOURCES_DIR="${SOURCE_PATH}/resources"
IMAGES_PATH="${RESOURCES_DIR}/images"
# Collect user args
while getopts V: option
do
case "${option}" in
V) DOCUMENTATION_VERSION=${OPTARG};;
esac
done
# Set varibles
BUILD_DIR="dist/${DOCUMENTATION_VERSION}"
# Add an HTML Header
# TODO Handle this in AsciiDoctor.
function addHTMLHeader {
awk '/<div id="header">/{while(getline line<"src/shared/header.html"){print line}} //' $1 > tmp
mv tmp $1
}
#
## START SCRIPT
#
# Reset existing documentation build directory
if [ -d "$BUILD_DIR" ]; then
echo "Cleaning existing build directory: $BUILD_DIR"
rm -rf $BUILD_DIR
fi
# Copy dependencies
mkdir -p "${BUILD_DIR}"
cp -R "${RESOURCES_DIR}/tocbot-3.0.2" "$BUILD_DIR/"
cp -R "${IMAGES_PATH}" "$BUILD_DIR/"
# Create the home site
echo "Creating HTML output..."
echo " -> Building asciidoctor HTML to $BUILD_DIR/index.html..."
asciidoctor "$ROOT_ADOC" -D "$BUILD_DIR" -a toc=left -a nimbus-version="${DOCUMENTATION_VERSION}" -a revnumber="${DOCUMENTATION_VERSION}" -o "index.html"
addHTMLHeader "$BUILD_DIR/index.html"
## Create subsites
for f in ${SUBSITES_DIR}/*; do
if [ -d ${f} ]; then
echo " -> Creating Subsite HTML for: $f..."
f_name=${f##*"$SUBSITES_DIR/"}
asciidoctor "$f/default.adoc" -D "$BUILD_DIR" -a toc=left -a nimbus-version="${DOCUMENTATION_VERSION}" -a revnumber="${DOCUMENTATION_VERSION}" -o "$f_name.html"
addHTMLHeader "$BUILD_DIR/$f_name.html"
# else
# if [ "${f}" == "*.adoc" ]; then
# echo " -> Creating Subsite HTML for: $f..."
# f_name=${f##*"$SUBSITES_DIR/"}
# asciidoctor "$f/default.adoc" -D "$BUILD_DIR" -a toc=left -o "$f_name.html"
# fi
fi
done
## Create HTML Release Notes
for f in ${RELEASE_NOTES_DIR}/*; do
if [ -d ${f} ]; then
echo " -> Creating Release Notes HTML for: $f..."
f_name=${f##*"$RELEASE_NOTES_DIR/"}
mkdir -p "${BUILD_DIR}/release-notes"
asciidoctor "$f/default.adoc" -D "$BUILD_DIR/release-notes" -a nimbus-version="${DOCUMENTATION_VERSION}" -a revnumber="${DOCUMENTATION_VERSION}" -o "$f_name.html"
fi
done
# Zip HTML contents
#echo " -> Compressing HTML files to $BUILD_DIR/html.zip"
#zip -q -r "$BUILD_DIR/$DOCUMENTATION_VERSION.zip" "$BUILD_DIR"/*
# Create the PDF
#echo "Creating PDF output..."
#echo " -> Building asciidoctor PDF to $BUILD_DIR/$DOCUMENTATION_VERSION.pdf..."
#asciidoctor-pdf -d book "$ROOT_ADOC" -D "$BUILD_DIR" -o "$DOCUMENTATION_VERSION.pdf" -a imagesdir="../images"
| true
|
0b7e29afb8ca473993b51aa30c65a6518d011ae8
|
Shell
|
Jpocas3212/aur
|
/clion-eap/PKGBUILD
|
UTF-8
| 2,067
| 2.84375
| 3
|
[] |
no_license
|
# Maintainer: Raphaël Doursenaud <rdoursenaud@gpcsolutions.fr>
pkgname=clion-eap
_pkgname=clion
pkgbuild=141.352.13
# This is a 1.0 release candidate
_pkgbuild=1.0
pkgver=${pkgbuild}
pkgrel=1
pkgdesc="C/C++ IDE. 30-day evaluation."
arch=('x86_64')
options=(!strip)
url="http://www.jetbrains.com/${_pkgname}"
license=('custom')
optdepends=(
'gdb: native debugger'
'cmake: native build system'
'gcc: GNU compiler'
'clang: LLVM compiler'
'biicode: C/C++ dependency manager'
)
source=("http://download.jetbrains.com/cpp/${_pkgname}-${pkgver}.tar.gz")
sha512sums=('29bfd56e0ab5e97a0fffec6c521259d262cd4798226bdb2059c5cc8474717fe741ae029ab41b60c5ff9dbc6f790b36bc4d2874dea8a85b93b039ca37ec80283a')
package() {
cd ${srcdir}
mkdir -p ${pkgdir}/opt/${pkgname} || return 1
cp -R ${srcdir}/${_pkgname}-${_pkgbuild}/* ${pkgdir}/opt/${pkgname} || return 1
if [[ $CARCH = 'i686' ]]; then
rm -f ${pkgdir}/opt/${pkgname}/bin/libyjpagent-linux64.so
rm -f ${pkgdir}/opt/${pkgname}/bin/fsnotifier64
fi
if [[ $CARCH = 'x86_64' ]]; then
rm -f ${pkgdir}/opt/${pkgname}/bin/libyjpagent-linux.so
rm -f ${pkgdir}/opt/${pkgname}/bin/fsnotifier
fi
(
cat <<EOF
[Desktop Entry]
Version=${pkgver}
Type=Application
Name=${pkgname}
Exec="/usr/bin/${pkgname}" %f
Icon=${pkgname}
Comment=${pkgdesc}
GenericName=${_pkgname}
Categories=Development;IDE;
Terminal=false
StartupNotify=true
StartupWMClass=jetbrains-${_pkgname}
EOF
) > ${startdir}/${pkgname}.desktop
mkdir -p ${pkgdir}/usr/bin/ || return 1
mkdir -p ${pkgdir}/usr/share/applications/ || return 1
mkdir -p ${pkgdir}/usr/share/pixmaps/ || return 1
mkdir -p ${pkgdir}/usr/share/licenses/${pkgname} || return 1
install -m 644 ${startdir}/${pkgname}.desktop ${pkgdir}/usr/share/applications/
install -m 644 ${pkgdir}/opt/${pkgname}/bin/${_pkgname}.svg ${pkgdir}/usr/share/pixmaps/${pkgname}.svg
install -m 644 ${srcdir}/${_pkgname}-${_pkgbuild}/license/CLion_Preview_License.txt ${pkgdir}/usr/share/licenses/${pkgname}/${_pkgname}_license.txt
ln -s /opt/${pkgname}/bin/${_pkgname}.sh "$pkgdir/usr/bin/${pkgname}"
}
| true
|
c237525d8afa43fe464c8128457ef7ab785f636b
|
Shell
|
liurui-1/opentracing-tutorial
|
/java/run.sh
|
UTF-8
| 491
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ "$1" == "" ]; then
echo "Usage: run.sh qualified-class-name [args]"
exit 1
fi
className=$1
shift
set -e
mvn -q package dependency:copy-dependencies
CLASSPATH=""
for jar in $(ls target/dependency/*.jar target/java-opentracing-tutorial-*.jar); do
CLASSPATH=$CLASSPATH:$jar
done
ADD_MODULES=""
if [ "$(java -version 2>&1 | head -1 | grep '\"1\.[78].\+\"')" = "" ]; then
ADD_MODULES="--add-modules=java.xml.bind"
fi
java $ADD_MODULES -cp $CLASSPATH $className $*
| true
|
760969bd978df1fccbbcf26fda66287a22852b1e
|
Shell
|
elvistache/Twister-OS-Patcher
|
/uninstall.sh
|
UTF-8
| 714
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo 'Uninstalling Twister OS Patcher by FlameKat53 and MobileGamesMotionYT...'
echo 'Thanks for using me! You can install me again at "https://bit.ly/patchtwist"'
cd ~
#Checks to see if these files exist and replace or remove them
if [ -d ~/patcher/ ]; then
rm -r ~/patcher/
fi
if [ -f ~/.local/share/applications/patcher.desktop ]; then
rm ~/.local/share/applications/patcher.desktop
fi
if [ -f ~/Desktop/patcher.desktop ]; then
rm ~/Desktop/patcher.desktop
fi
sudo rm -f /usr/local/bin/twistpatch
sudo rm -f /usr/local/bin/twistpatch-uninstall
sudo rm -f /usr/local/bin/twistpatch-update
crontab -l | sed -n '/0 \* \* \* \* \~\/patcher\/checkforupdates.sh/!p' | crontab -
echo 'Uninstalled.'
| true
|
346964bd842627467dfb8192e6993ea41b7b8bff
|
Shell
|
benamarfaiez/sentinel_loading
|
/collectData.sh
|
UTF-8
| 865
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
var1=$(ls products)
#consult all products
for var in $var1
do
#make a test to know the repertoires
if [ -z $(echo $var | grep "tif") ]
then
var2=$(ls products/$var | grep "SAFE")
#make a test to know the repertoires
if [ -z $var2 ]
then
echo '---'
else
#Fist launch the command gdalinfo
var3=$( gdalinfo products/$var/$var2/MTD_MSIL2A.xml | grep 'SUBDATASET_1_NAME' | cut -d '=' -f 2)
#second launch the command gdal_translate (Format: xml to Tif)
gdal_translate $var3 "products/tmp.tif"
#third launch the command gdalwarp for reprojection
gdalwarp products/tmp.tif products/${var}.tif -t_srs EPSG:3857
#finally remove the temporary file
rm "products/tmp.tif"
fi
fi
done
#Builds a shapefile as a raster tileindex
gdaltindex web_application/data/drgidx.shp products/*.tif
| true
|
cae0282700b3a0f70d2428fb71a6e8cbe5a55e72
|
Shell
|
nbcrrolls/opalroll
|
/src/rocks/cron/save-http-logs
|
UTF-8
| 434
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
basedir=/share/backup/apache_logs
date=`date +\%Y\%m\%d`
logs=`ls /var/log/httpd/*log-20* 2>/dev/null`
HostName=`hostname`
if [ "$logs" ]; then
ls $basedir > /dev/null
mkdir -p $basedir/$date
fi
#save all archived logs
for i in /var/log/httpd/*log-20*;
do
if [ -f $i ] ; then
mv $i $basedir/$date/
echo "$HostName filesystem action: Moved apache logs $logs to $basedir/$date"
fi
done
| true
|
2c7154f8496a805bbb278a14e09fd6743c3d2532
|
Shell
|
GaloisInc/pirate
|
/demos/camera_demo/scripts/camera_demo.sh
|
UTF-8
| 378
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# usage: ./camera_demo.sh [device] [args]
MACHINE=`uname --machine`
PLATFORM_ARGS=""
USER_ARGS="${@:2:$#}"
VIDEO_DEVICE="/dev/video0"
PREFIX=""
APP="./camera_demo"
if [ $# -ge 1 ]; then
VIDEO_DEVICE=$1
fi
if [ ${MACHINE} = "armv7l" ]; then
PREFIX="sudo"
PLATFORM_ARGS="-f v -f h"
fi
${PREFIX} ${APP} -d ${VIDEO_DEVICE} ${PLATFORM_ARGS} ${USER_ARGS}
| true
|
89987e2d7a57fd3b35808b7bd4aa1abbb91b01bd
|
Shell
|
1054/DeepFields.SuperDeblending
|
/Softwares/read_galfit_output_fits_result
|
UTF-8
| 963
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#
if [[ $# -gt 0 ]]; then
readgresult_tmp="._.AstroGalfit._.ReadGalfitResult._."
CrabFitsHeader "$1" -ext 2 | sed -n -e '/COMP_/,/COMMENT/p' > "fit.header"
cat "fit.header" |cut -d\' -f2 |sed -E 's/\[(.*)\]/\1 0/g' |sed -e 's%+/-%%g' |sed -e 's%*%%g' > "$readgresult_tmp" #' #<20170210># |sed -e 's%*%%g'
echo "# ResultType ResultPosX ResultPosXErr ResultPosY ResultPosYErr ResultMags ResultMagsErr " > "fit.result"
echo "# " >> "fit.result"
cat "$readgresult_tmp" |while read line;do if [[ "$line" != "COMMENT"* ]];then echo -n "$line ";else echo "";fi;done >> "fit.result"
#<20170613># fixing "nan" problem, which occurs when prior sources are too many (perhaps...)
if grep -i -w -q "nan" "fit.result"; then
sed -i.bak -e 's/ nan/ -99/g' "fit.result"
fi
#<20171206># fixing "-nan" problem
if grep -i -w -q "-nan" "fit.result"; then
sed -i.bak -e 's/-nan/ -99/g' "fit.result"
fi
fi
| true
|
fe86cbf10f97c4516902bb078f826dac8b55ad49
|
Shell
|
histograph/puppet-deploy
|
/testNDJSON.sh
|
UTF-8
| 777
| 3.296875
| 3
|
[] |
no_license
|
#! /bin/bash
# set -x
unalias ls
UPLOADS="/uploads/datasets"
IMPORTS="/var/www/importeren/app/storage/exports"
for i in $(ls ${UPLOADS});
do
for ii in pits.ndjson relations.ndjson
do
j="${UPLOADS}/${i}/${ii}"
k="${IMPORTS}/${i}/${ii}"
# echo ${j}
# echo ${k}
# echo "LS: $(ls ${j} ${k})"
if [ -f "${j}" -a -f "${k}" ]
then
# echo "${i}/${ii} exists in both dir"
diff ${j} ${k}
elif [ -f "${j}" ]
then
echo "${i}/${ii} exists only in ${UPLOADS}";
elif [ -f "${k}" ]
then
echo "${i}/${ii} exists only in ${IMPORTS}"
else
echo "${i}/${ii} is not in either dir"
fi
done
done
# set +x
| true
|
784871978066b08c12d44d6bc7df3f67f0001781
|
Shell
|
jwestgard/scripts
|
/data-cleanup/ent-convert.sh
|
UTF-8
| 402
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
##################################################
# ent-convert.sh | Joshua Westgard | 2014-01-26 #
#------------------------------------------------#
# Converts the XML entities for greater than and #
# less than symbols into regular characters. #
##################################################
for file in $@; do
sed -i '.bak' 's/>/>/g
s/</</g' $file;
done
| true
|
7a62cfbd86907ba228f381053db15593f0b99773
|
Shell
|
charliephillips/netapp
|
/problem1.sh
|
UTF-8
| 2,397
| 3.625
| 4
|
[] |
no_license
|
#! /bin/bash
config=$1
output=$config.json
prevSlot=0
primed=false
if [ -f $config ]
then
while read line
do
# spin until serial is found
while [ "$systemSerial" == "" -a "$line" != "" ]
do
systemSerial=`echo $line | grep "System Serial Number" | cut -d':' -f2 | cut -d'(' -f1`
read line
done
# find the model number
while [ "$model" == "" -a "$line" != "" ]
do
model=`echo $line | grep -e 'FAS[0-9][0-9][0-9][0-9]' | cut -d':' -f2`
read line
done
echo "{\"system\":[{\"serialnumber\":\"$systemSerial\", \"model\":\"$model\"" > $output
# loop through the PCI cards by Slot
while read line
do
slot=`echo $line | grep "slot"`
if $primed
then
slot=$slotNew
fi
if [ "$slot" != "" ]
then
# get the port and slot number for this card
port=`echo $slot | awk '{print $6}' | grep -e '[0-9][a-z]'`
slotNumber=`echo $slot | awk '{print $2}' | cut -d':' -f1`
# check if this is a new slot number and only add it if it's new
if [ $slotNumber -ne $prevSlot ]
then
echo "},{\"slot\":\"$slot\"" >> $output
prevSlot=$slotNumber
fi
# ignore slot 0
if [ $slotNumber -ne 0 ]
then
count=0
counting=true
# count the number of HDD's
while $counting
do
read line
slotNew=`echo $line | grep "slot"`
# we have moved n the the next slot
if [ "$slotNew" != "" -o "$line" == "" ]
then
if [ $count -ne 0 ]
then
echo " ,\"port\":\"$port\", \"hddcount\":\"$count\"" >> $output
fi
counting=false
primed=true
# look for a HDD and count it
else
HDD=`echo $line | grep NETAPP`
if [ "$HDD" != "" ]
then
count=$(( $count + 1 ))
fi
fi
done
fi
fi
done
done <$config
echo "}]}" >> $output
fi
| true
|
1175ee9a89e4c342cd5f29440f5fa6fd45a8ca10
|
Shell
|
jsmatos/utility-scripts
|
/monitor_docker_instances/monitor_docker_instances.sh
|
UTF-8
| 1,327
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# To be used with an instance of generic_monitor plugin in xfce4
# Simple script that verifies if some processes are running inside docker and to
# allow some actions by clicking on icons.
#
SELF_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
function text {
if [[ $# -eq 0 ]]
then
echo "<txt></txt>"
else
echo "<txt>$@</txt>"
fi
}
function tooltip {
if [[ $# -eq 0 ]]
then
echo "<tool></tool>"
else
echo "<tool>$1</tool>"
fi
}
function click {
if [[ $# -gt 0 ]]
then
echo "<click>$@</click>"
fi
}
function icon {
if [[ $# -gt 0 ]]
then
echo "<img>$@</img>"
fi
}
function warn {
icon "${SELF_DIR}/alert-triangle-red-16.png"
}
docker_state=$(systemctl show --property ActiveState docker | grep "ActiveState=active")
if [ "x${docker_state}" != "xActiveState=active" ]
then
tooltip "docker service is stoped, click to start"
click "pkexec systemctl start docker"
warn
exit 0
fi
number_of_instances=$(docker ps -q | wc -l)
if [ ${number_of_instances} -eq 0 ]
then
tooltip "dnsdock is not running, click to start"
click "pkexec systemctl start docker"
warn
exit 0
fi
instance_names=$(docker ps --format '{{.Names}}')
tooltip "${number_of_instances} docker instances running
${instance_names}"
icon "${SELF_DIR}/check-circle-green-16.png"
click
| true
|
a5c0f3b70b6a5d5e82ceba971d6d7a55ccb3c89b
|
Shell
|
josephzhang8/schism
|
/conda.recipe/build_vl.sh
|
UTF-8
| 1,187
| 2.796875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/usr/bin/env bash
set -xeuo pipefail
#export mpi=mpich
export CC=mpicc
export FC=mpif90
export F77=mpif77
export F90=mpif90
#Fix an error?
sed -i -e "s|\!bflux0,|bflux0,|g" src/Hydro/schism_step.F90
# build and install schism
mkdir build
cd build
CMAKE_PLATFORM_FLAGS+=(-DCMAKE_TOOLCHAIN_FILE="${RECIPE_DIR}/cross-linux.cmake")
cmake -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_BUILD_TYPE="Release" \
-DCMAKE_Fortran_FLAGS_RELEASE_INIT="-O2 -ffree-line-length-none" \
-DCMAKE_Fortran_FLAGS_DEBUG_INIT="-g -ffree-line-length-none" \
-DCMAKE_Fortran_FLAGS_RELWITHDEBINFO_INIT="-O2 -g -ffree-line-length-none" \
-DC_PREPROCESS_FLAG="-cpp" \
-DNetCDF_FORTRAN_DIR="$PREFIX/lib" \
-DNetCDF_C_DIR="$PREFIX/lib" \
-DNetCDF_INCLUDE_DIR="$PREFIX/include" \
-DNetCDF_LIBRARIES="$PREFIX/lib/libnetcdff.so;$PREFIX/lib/libnetcdf.so" \
${CMAKE_PLATFORM_FLAGS[@]} \
-DTVD_LIM="VL" \
../src
#make -j${CPU_COUNT:-1}
# -DNetCDF_LIBRARIES="$PREFIX/lib/libnetcdff.dylib;$PREFIX/lib/libnetcdf.dylib" \
make
cp -r bin/* $PREFIX/bin/
cp -r include/* $PREFIX/include/
cp -r lib/* $PREFIX/lib/
ln -s $PREFIX/bin/pschism_TVD-VL $PREFIX/bin/schism
#cd $BUILD_PREFIX
| true
|
e3ef047e2937e110fa6561bccd7a9c4d71ed2d37
|
Shell
|
bioinfx/cvdc_scripts
|
/archive/promoterAnchoredInteraction/test/calBackGroundDistribution.sh
|
UTF-8
| 572
| 2.640625
| 3
|
[] |
no_license
|
pushd ../../analysis/promoterAnchoredInteractions
#name=D00_HiC_Rep1
for name in $(cat ../../data/hic/meta/names.txt); do
(
echo $name
chr=1
mkdir -p $name
for chr in {1..22} X; do
echo $chr
awk -v OFS='\t' '{dist=$2-$1;mat[dist] += $3;count[dist]++ }END {for (i in mat){print i,mat[i],count[i]} }' ../../data/hic/matrix/$name/${chr}_10000.txt |sort -k1,1n > $name/${chr}.dist.total
done
cat $name/{?,??}.dist.total | awk -v OFS='\t' '{mat[$1] += $2;count[$1]+= $3 }END {for (i in mat){print i,mat[i],count[i]} }' |sort -k1,1n > $name/all.dist.total
) &
done
popd
| true
|
b80811af7815955d1daa97f5b389f81b72dcef0e
|
Shell
|
Mudlet/installers
|
/shell-check.sh
|
UTF-8
| 287
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
function testFile {
echo "testing $1"
shellcheck "$1"
ret=$?
if [ "$ret" -ne 0 ]; then
echo "There are problems in $1"
fi
return "$ret"
}
result=0
for f in *.sh osx/*.sh generic-linux/*.sh
do
testFile "$f"
ret=$?
let result+=$ret
done
exit "$result"
| true
|
4b1cd5f0360f1b26cdd96f4c1f6fbb6eb0bc9554
|
Shell
|
okapies/travis-sandbox
|
/.travis/install-protobuf.sh
|
UTF-8
| 2,348
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# retrieve arguments
while [[ $# != 0 ]]; do
case $1 in
--)
shift
break
;;
--version)
ARG_VERSION="$2"
shift 2
;;
--download-dir)
ARG_DOWNLOAD_DIR="$2"
shift 2
;;
--build-dir)
ARG_BUILD_DIR="$2"
shift 2
;;
--install-dir)
ARG_INSTALL_DIR="$2"
shift 2
;;
--parallel)
ARG_PARALLEL="$2"
shift 2
;;
-*)
err Unknown option \"$1\"
exit
;;
*)
break
;;
esac
done
# validate the arguments
test -n "${ARG_VERSION}" || { echo "--version is not specified"; exit 1; }
test -n "${ARG_DOWNLOAD_DIR}" || { echo "--download-dir is not specified"; exit 1; }
test -n "${ARG_BUILD_DIR}" || { echo "--build-dir is not specified"; exit 1; }
test -n "${ARG_INSTALL_DIR}" || { echo "--install-dir is not specified"; exit 1; }
test -n "${ARG_PARALLEL}" || ARG_PARALLEL=1
# download (if it isn't cached)
if [ ! -e "${ARG_BUILD_DIR}/protobuf-${ARG_VERSION}/LICENSE" ]; then
echo -e "\e[33;1mDownloading libprotobuf\e[0m"
[ -d "${ARG_DOWNLOAD_DIR}" ] || mkdir -p ${ARG_DOWNLOAD_DIR}
cd ${ARG_DOWNLOAD_DIR}
if [ ! -e "protobuf-cpp-${ARG_VERSION}.tar.gz" ]; then
download_dir="https://github.com/protocolbuffers/protobuf/releases/download/v${ARG_VERSION}/protobuf-cpp-${ARG_VERSION}.tar.gz"
wget ${download_dir}
fi
tar -zxf protobuf-cpp-${ARG_VERSION}.tar.gz -C ${ARG_BUILD_DIR}
echo -e "\e[32;1mlibprotobuf was successfully downloaded.\e[0m"
else
echo -e "\e[32;1mlibprotobuf has been downloaded.\e[0m"
fi
# build (if it isn't cached)
if [ ! -e "${ARG_BUILD_DIR}/protobuf-${ARG_VERSION}/src/libprotobuf.la" ]; then
echo -e "\e[33;1mBuilding libprotobuf\e[0m"
cd ${ARG_BUILD_DIR}/protobuf-${ARG_VERSION}
./configure --prefix=${ARG_INSTALL_DIR} CFLAGS=-fPIC CXXFLAGS=-fPIC
make -j${ARG_PARALLEL}
echo -e "\e[32;1mlibprotobuf was successfully built.\e[0m"
else
echo -e "\e[32;1mlibprotobuf has been built.\e[0m"
fi
# install (always)
echo -e "\e[33;1mInstalling libprotobuf\e[0m"
cd ${ARG_BUILD_DIR}/protobuf-${ARG_VERSION}
make install
| true
|
f487d637bcee0b8c1bb7aef3b296be2f6ebfbce8
|
Shell
|
ilventu/aur-mirror
|
/scipoptsuite/PKGBUILD
|
UTF-8
| 4,703
| 2.765625
| 3
|
[] |
no_license
|
# Maintainer: Robert Schwarz <mail@rschwarz.net>
# Contributor: Johannes Schlatow <johannes.schlatow@googlemail.com>
# Contributor: Stephan Friedrichs <deduktionstheorem@googlemail.com>
pkgname='scipoptsuite'
pkgver='3.0.0'
pkgrel=7
pkgdesc="Tools for generating and solving optimization problems. Consists of ZIMPL, SoPlex, SCIP, GCG and UG"
arch=('i686' 'x86_64')
url='http://scip.zib.de/'
license=('LGPL3' 'custom:ZIB Academic License')
depends=('zlib' 'gmp' 'readline')
replaces=('ziboptsuite')
makedepends=('chrpath' 'doxygen' 'graphviz')
provides=('scip=3.0.0' 'soplex=1.7.0' 'zimpl=3.3.0' 'gcg=1.0.0' 'ug=0.7.0')
source=("http://scip.zib.de/download/release/${pkgname}-${pkgver}.tgz"
'soplex.dxy.patch')
sha256sums=('1ce8a351e92143e1d07d9aa5f9b0f259578f3cee82fcdd984e0024e0d4e3a548'
'49519d42fccb91806a3e62292c0af102b5748958eea34f552a4e21221990cf89')
build() {
# Extract directory names from the $provides array.
local _scip="${provides[0]//=/-}"
local _soplex="${provides[1]//=/-}"
local _zimpl="${provides[2]//=/-}"
local _gcg="${provides[3]//=/-}"
local _ug="${provides[4]//=/-}"
cd "${srcdir}/${pkgname}-${pkgver}"
make SHARED=true
make gcg
make ug
# @TODO: shared lib with ZIMPL seems to be broken
make scipoptlib ZIMPL=false SHARED=true
# @TODO: build docs in parallel?
cd "${srcdir}/${pkgname}-${pkgver}/${_scip}"
make doc
cd "${srcdir}/${pkgname}-${pkgver}/${_soplex}"
# fix soplex.dxy
# @FIXME: Remove this in the next version
patch -p1 < ${srcdir}/soplex.dxy.patch
make doc
cd "${srcdir}/${pkgname}-${pkgver}/${_gcg}"
make doc
# Some files have permission 640.
# @FIXME: Future versions might not require this line.
chmod -R a+r "${srcdir}/${pkgname}-${pkgver}"
}
check() {
cd "${srcdir}/${pkgname}-${pkgver}"
make test
}
package_scipoptsuite() {
# Extract directory names from the $provides array
local _scip="${provides[0]//=/-}"
local _soplex="${provides[1]//=/-}"
local _zimpl="${provides[2]//=/-}"
local _gcg="${provides[3]//=/-}"
local _ug="${provides[4]//=/-}"
# Note that, at least in ziboptsuite-2.1.0, the install targets of the
# scip/soplex/zimpl projects are utterly broken; manually copying
# everything where it belongs is absolutely necessary.
# @FIXME: Maybe make install will just work in future releases...
cd "${srcdir}/${pkgname}-${pkgver}"
# A local RPATH is set, get rid of it.
chrpath --delete ${_scip}/bin/scip
chrpath --delete ${_soplex}/bin/soplex
chrpath --delete ${_gcg}/bin/gcg
chrpath --delete ${_ug}/bin/fscip
#
# Binaries
#
install -D -m755 ${_scip}/bin/scip "${pkgdir}/usr/bin/scip"
install -D -m755 ${_soplex}/bin/soplex "${pkgdir}/usr/bin/soplex"
install -D -m755 ${_zimpl}/bin/zimpl "${pkgdir}/usr/bin/zimpl"
install -D -m755 ${_gcg}/bin/gcg "${pkgdir}/usr/bin/gcg"
install -D -m755 ${_ug}/bin/fscip "${pkgdir}/usr/bin/fscip"
#
# Includes
#
for dir in blockmemshell dijkstra nlpi objscip scip tclique xml; do
mkdir -p "${pkgdir}/usr/include/scip/${dir}"
cp ${_scip}/src/${dir}/*.h "${pkgdir}/usr/include/scip/${dir}"
done
mkdir -p "${pkgdir}/usr/include/"{soplex,zimpl}
cp ${_soplex}/src/*.h "${pkgdir}/usr/include/soplex"
cp ${_zimpl}/src/*.h "${pkgdir}/usr/include/zimpl"
#
# Libraries
#
mkdir -p "${pkgdir}/usr/lib"
cp -d ${_scip}/lib/liblpispx* "${pkgdir}/usr/lib"
cp -d ${_scip}/lib/libnlpi* "${pkgdir}/usr/lib"
cp -d ${_scip}/lib/libobjscip* "${pkgdir}/usr/lib"
cp -d ${_scip}/lib/libscip* "${pkgdir}/usr/lib"
cp -d ${_soplex}/lib/* "${pkgdir}/usr/lib"
cp -d ${_zimpl}/lib/* "${pkgdir}/usr/lib"
cp -d lib/libscipopt*.so "${pkgdir}/usr/lib/libscipopt.so"
# Repair "missing links"
# @FIXME: I hope this is not necessary in future versions!
cd "${pkgdir}/usr/lib"
ln -s -T libzimpl-*.a libzimpl.a
cd "${srcdir}/${pkgname}-${pkgver}"
#
# Documentation
#
mkdir -p "${pkgdir}/usr/share/doc/${pkgname}/"{scip,soplex,zimpl,gcg,ug}
cp -r ${_scip}/{CHANGELOG,release-notes,doc/html} "${pkgdir}/usr/share/doc/${pkgname}/scip/"
cp -r ${_soplex}/{CHANGELOG,doc/html} "${pkgdir}/usr/share/doc/${pkgname}/soplex/"
install -m644 ${_soplex}/src/simpleexample.cpp "${pkgdir}/usr/share/doc/${pkgname}/soplex/"
cp -r ${_zimpl}/{CHANGELOG,README,doc,example} "${pkgdir}/usr/share/doc/${pkgname}/zimpl/"
cp -r ${_gcg}/{CHANGELOG,README,doc/html} "${pkgdir}/usr/share/doc/${pkgname}/gcg/"
cp -r ${_ug}/README "${pkgdir}/usr/share/doc/${pkgname}/ug/"
#
# License
#
install -D -m644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true
|
83e854aef7d693901ddf4fcb0414b117e21946d6
|
Shell
|
andrewcampagnagit/quickstart-ibm-cloud-pak-for-security-1
|
/scripts/install.sh
|
UTF-8
| 4,711
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
exec > cp4s_install_logs.log
export ENTITLED_REGISTRY_PASSWORD=$1
export OCP_URL=$2
export LDAP_PASS=$3
export CLOUDCTL_TRACE=TRUE
cloudctl case save --case https://github.com/IBM/cloud-pak/raw/master/repo/case/ibm-cp-security-1.0.17.tgz --outputdir .
tar xvf ibm-cp-security-1.0.17.tgz
cat << EOF > ibm-cp-security/inventory/installProduct/files/values.conf
# Admin User ID (Required): The user that is to be assigned as an Administrator in the default account after the installation. The user must exist in an LDAP directory that will be connected to Foundational Services after deployment.
adminUserId="platform-admin"
# Cluster type (Required) should be one of the following: i.e. "aws", "ibmcloud", "azure", "ocp". This is a mandatory value, If not set it will be "ocp" by default.
cloudType="aws"
# Block storage (Required), see more details https://www.ibm.com/support/knowledgecenter/en/SSTDPP_1.7.0/docs/security-pak/persistent_storage.html
storageClass="gp2"
# Entitled by default (Required)
registryType="entitled"
# Only Required for online install
entitledRegistryUrl="cp.icr.io"
# Only Required for online install
entitledRegistryPassword="$ENTITLED_REGISTRY_PASSWORD"
# Only Required for online install
entitledRegistryUsername="cp"
# Only required for offline/airgap install
localDockerRegistry=""
# Only required for offline/airgap install
localDockerRegistryUsername=""
# Only required for offline/airgap install
localDockerRegistryPassword=""
# CP4S FQDN domain (Optional: Not required if your cloudType is set to "ibmcloud" or "aws")
cp4sapplicationDomain=""
# e.g ./path-to-cert/cert.crt (Optional: Not required if you are using ibmcloud or aws). See more details: https://www.ibm.com/support/knowledgecenter/en/SSTDPP_1.7.0/docs/security-pak/tls_certs.html.
cp4sdomainCertificatePath=""
# Path to domain certificate key ./path-to-key/cert.key (Optional: Not required if you using ibmcloud or aws). See more at https://www.ibm.com/support/knowledgecenter/en/SSTDPP_1.7.0/docs/security-pak/tls_certs.html.
cp4sdomainCertificateKeyPath=""
# Path to custom ca cert e.g <path-to-cert>/ca.crt (Only required if using custom/self signed certificate and optional on ibmcloud or aws). See more at https://www.ibm.com/support/knowledgecenter/en/SSTDPP_1.7.0/docs/security-pak/tls_certs.html.
cp4scustomcaFilepath=""
# Set image pullpolicy e.g Always,IfNotPresent, default is Always (Required)
cp4simagePullPolicy="Always"
# Set to "true" to enable Openshift authentication (Optional). Only supported for ROKS clusters, for more details, see https://www.ibm.com/support/knowledgecenter/en/SSHKN6/iam/3.x.x/roks_config.html
cp4sOpenshiftAuthentication="false"
# Default Account name, default is "Cloud Pak For Security" (Optional)
defaultAccountName="Cloud Pak For Security"
# set to "true" to enable CSA Adapter (Optional), see https://www.ibm.com/support/knowledgecenter/en/SSTDPP_1.7.0/docs/scp-core/csa-adapter-cases.html for more details
enableCloudSecurityAdvisor="false"
# Set storage fs group. Default is 26 (Optional)
storageClassFsGroup="26"
# Set storage class supplemental groups (Optional)
storageClassSupplementalGroups=""
# Set seperate storageclass for backup (Optional)
backupStorageClass=""
# Set custom storage size for backup, default is 100Gi (Optional)
backupStorageSize="100Gi"
EOF
cat patch.sh > /ibm/ibm-cp-security/inventory/installProduct/files/launch.sh
cloudctl case launch -t 1 --case ibm-cp-security --namespace cp4s --inventory installProduct --action install --args "--license accept --helm3 /usr/local/bin/helm3 --inputDir /ibm"
CP_RESULT=${?}
echo $CP_RESULT
CP_PASSWORD=$(oc get secret platform-auth-idp-credentials -o jsonpath='{.data.admin_password}' -n ibm-common-services | base64 -d)
CP_ROUTE=$(oc get route cp-console -n ibm-common-services|awk 'FNR == 2 {print $2}')
SERVER="$(cut -d':' -f1 <<<"$OCP_URL")"
PORT="$(cut -d':' -f2 <<<"$OCP_URL")"
cat << EOF > cp4s-openldap-master/playbook.yml
---
- hosts: local
gather_facts: true
any_errors_fatal: true
roles:
- roles/secops.ibm.icp.login
- roles/secops.ibm.icp.openldap.deploy
- roles/secops.ibm.icp.openldap.register
vars:
icp:
console_url: "$CP_ROUTE"
ibm_cloud_server: "" # Only Applicable for IBMCloud Deployment
ibm_cloud_port: "" # Only Applicable for IBMCloud Deployment
username: "admin"
password: "$CP_PASSWORD"
account: "id-mycluster-account"
namespace: "default"
openldap:
adminPassword: "$LDAP_PASS"
initialPassword: "$LDAP_PASS"
userlist: "isc-demo,isc-test,platform-admin"
EOF
cd cp4s-openldap-master
ansible-playbook -i hosts playbook.yml
| true
|
576cc7b26f21c39ff09389371dcb9c8c43f42022
|
Shell
|
symbooglix/gpu
|
/other/clean.sh
|
UTF-8
| 100
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
for ext in bc opt cbpl bpl loc gbpl; do
find . -iname "*.${ext}" -print -delete
done
| true
|
2ee71ba69641722058758913067664d93d786aae
|
Shell
|
smbrave/gosupervisor
|
/build.sh
|
UTF-8
| 283
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
SVN_VERSION=`git rev-parse --short HEAD || echo "GitNotFound"`
APP_VERSION=`head -n 3 update.sh|grep LAST_VERSION|awk -F"=" '{print $2}'`
go build -ldflags "-X main.buildTime=`date +%Y%m%d-%H%M%S` -X main.binaryVersion=$APP_VERSION -X main.svnRevision=${SVN_VERSION}"
| true
|
c13000ea940ec6b7b9110f04feaddcd05a902d84
|
Shell
|
unkmc/RTK-Server
|
/rtk/map-server-auto
|
UTF-8
| 129
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
until ./map-server; do
echo "Server 'map-server' crashed with exit code $?. Respawning.." >&2
sleep 1
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.