blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
298486583f66fdd0dae83160ad135e89469411c1
|
Shell
|
haniokasai/docker-minecraft
|
/resources/run-PC-vanilla.sh
|
UTF-8
| 589
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "run vanilla...." >&1
sh /minecraft/resources/setPerm.sh
sh /minecraft/resources/blockUDP.sh
cd /minecraft/server
chmod 755 /minecraft/bin -R
chown root:root /minecraft/bin -R
echo "残念ながら、Java版の提供を停止します"
#echo "if you do not agree eura, please stop now...." >&1
#echo "eula=true" > eula.txt
#i=(`awk '/^Mem/ {printf("%u", $7);}' <(free -m)`)
#su -l ${SRVID} -c "cd /minecraft/server ; java -Xmx$((i/10*9))m -XX:MaxRAM=`cat /sys/fs/cgroup/memory/memory.limit_in_bytes` -jar /minecraft/bin/mcpc.jar nogui"
#echo "run vanilla....done" >&1
| true
|
6dcce95be4f1ed5c1f7015e7ce3dc82ac22ccd63
|
Shell
|
isabella232/k8s-kpi-scripts
|
/src/files/thirdparty/k8s-live-stats
|
UTF-8
| 1,849
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
ITEMS=180
MAXITEMS=183
mkdir -p /var/tmp/logs/api/1
mkdir -p /var/tmp/logs/api/2
source ~/.nova
echo "Removing old log files"
swift list production-juju-ps45-cdo-jujucharms-machine-1.canonical.com | grep 201 | grep api.jujucharms.com.log | grep '\.anon\.gz$' | tail -n $MAXITEMS > /var/tmp/logs/api/max-logs1.list
swift list production-juju-ps45-cdo-jujucharms-machine-2.canonical.com | grep 201 | grep api.jujucharms.com.log | grep '\.anon\.gz$' | tail -n $MAXITEMS > /var/tmp/logs/api/max-logs2.list
cd /var/tmp/logs/api/1
ls -1 *.gz > ../all-files1.list
sort ../all-files1.list ../max-logs1.list ../max-logs1.list | uniq -u | xargs rm -f
cd /var/tmp/logs/api/2
ls -1 *.gz > ../all-files2.list
sort ../all-files2.list ../max-logs2.list ../max-logs2.list | uniq -u | xargs rm -f
echo "Downloading log files"
swift list production-juju-ps45-cdo-jujucharms-machine-1.canonical.com | grep 201 | grep api.jujucharms.com.log | grep '\.anon\.gz$' | tail -n $ITEMS > /var/tmp/logs/api/logs1.list
swift list production-juju-ps45-cdo-jujucharms-machine-2.canonical.com | grep 201 | grep api.jujucharms.com.log | grep '\.anon\.gz$' | tail -n $ITEMS > /var/tmp/logs/api/logs2.list
echo "Downloading log files using get.sh"
FILES=`cat /var/tmp/logs/api/logs1.list`
cd /var/tmp/logs/api/1
for f in $FILES
do
echo "swift download $f"
if [ ! -f $f ]; then
swift download production-juju-ps45-cdo-jujucharms-machine-1.canonical.com $f
else
echo "File already available: $f"
fi
done
FILES=`cat /var/tmp/logs/api/logs2.list`
cd /var/tmp/logs/api/2
for f in $FILES
do
echo "swift download $f"
if [ ! -f $f ]; then
swift download production-juju-ps45-cdo-jujucharms-machine-2.canonical.com $f
else
echo "File already available: $f"
fi
done
cd /srv/k8s-kpi-scripts/parts/
python -u k8s-live-stats.py
| true
|
10d5fe4d0a5f95fea153456607bc0bc9692e5e2f
|
Shell
|
montxero/xero_configs
|
/.bash_profile
|
UTF-8
| 217
| 3.140625
| 3
|
[] |
no_license
|
# shellcheck source=/dev/null
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
if [ -f "$HOME/.bash_aliases" ]; then
. "$HOME/.bash_aliases"
fi
if [ -f "$HOME/.profile" ]; then
. "$HOME/.profile"
fi
| true
|
4d1ad2489d1861276d1819bbbe9a705a8faa2613
|
Shell
|
hochraldo/dotfiles
|
/home/.bash_profile
|
UTF-8
| 652
| 3.015625
| 3
|
[] |
no_license
|
if [ -e "${HOME}/.homesick/repos/homeshick/homeshick.sh" ] ; then
source "${HOME}/.homesick/repos/homeshick/homeshick.sh"
source "$HOME/.homesick/repos/homeshick/completions/homeshick-completion.bash"
fi
if [ -e "${HOME}/.git-completion.bash" ] ; then
source "${HOME}/.git-completion.bash"
fi
if [ -e "${HOME}/.bashrc" ] ; then
source "${HOME}/.bashrc"
fi
if [ -e "${HOME}/.bashrc_box_independant" ] ; then
source "${HOME}/.bashrc_box_independant"
fi
if [ -e "${HOME}/.base16-default.dark.sh" ] ; then
source "${HOME}/.base16-default.dark.sh"
fi
# Create tmp folder for vi
if [ ! -d "${HOME}/.vitmp" ] ; then
mkdir "${HOME}/.vitmp"
fi
| true
|
de5f64c64db242bd6ec86e088ca79bba80dad899
|
Shell
|
kamath/ANTsDoc
|
/Tex2/examplesandtests/testEx7.sh
|
UTF-8
| 139
| 3.15625
| 3
|
[] |
no_license
|
op=$1
# now run a test and store its success/failure
testresult=0
if [[ ${#op} -gt 2 ]] ; then
rm ${op}* # cleanup
fi
exit $testresult
| true
|
79dff566d3f20d3e5dc2d81aa73fc283f142dba5
|
Shell
|
JochenHayek/misc
|
/renaming_files_with_timestamps/PHOTO.rename.sh
|
UTF-8
| 329
| 2.875
| 3
|
[] |
no_license
|
:
# e.g.
#
# PHOTO-2020-03-22-15-24-43.jpg
shopt -s nullglob
set -x
~/bin/rename -v </dev/null \
\
's/^ (PHOTO)- (?<YYYY>....)-(?<mm>..)-(?<dd>..)-(?<HH>..)-(?<MM>..)-(?<SS>..) \.(?<suffix>jpeg|jpg|mp4)
$/999990-000--$+{YYYY}$+{mm}$+{dd}$+{HH}$+{MM}$+{SS}--___.$+{suffix}/x' \
\
"$@"
exit 0
| true
|
aaacc5a929e5b75d385e2e81bc63b538a38b0fb2
|
Shell
|
b0ltn/PlexGuide.com-The-Awesome-Plex-Server
|
/scripts/menus/backup-restore/restore/restore.sh
|
UTF-8
| 2,389
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
clear
while [ 1 ]
do
CHOICE=$(
whiptail --title "Restore Menu" --menu "Make your choice" 19 25 12 \
"1 )" "CouchPotato" \
"2 )" "Deluge" \
"3 )" "Emby" \
"4 )" "Heimdall" \
"5 )" "HTPCManager" \
"6 )" "Jackett" \
"7 )" "Lidarr" \
"8 )" "Medusa" \
"9)" "Myler" \
"10)" "Muximux" \
"11)" "NZBGET" \
"12)" "NZBHydra" \
"13)" "NZBHydra" \
"14)" "Ombi" \
"15)" "Organizr" \
"16)" "Plex" \
"17)" "Portainer" \
"18)" "Radarr" \
"19)" "Resilio" \
"20)" "Rutorret" \
"21)" "SABNZBD" \
"22)" "Sonarr" \
"23)" "Tautulli" \
"24)" "Ubooquity" \
"25)" "Exit " 3>&2 2>&1 1>&3
)
result=$(whoami)
case $CHOICE in
"1 )")
echo "couchpotato" > /tmp/program_var
;;
"2 )")
echo "deluge" > /tmp/program_var
;;
"3 )")
echo "emby" > /tmp/program_var
;;
"4 )")
echo "heimdall" > /tmp/program_var
;;
"5 )")
echo "htpcmanager" > /tmp/program_var
;;
"6 )")
echo "jackett" > /tmp/program_var
;;
"7 )")
echo "lidarr" > /tmp/program_var
;;
"8 )")
echo "medusa" > /tmp/program_var
;;
"9 )")
echo "myler" > /tmp/program_var
;;
"10)")
echo "muximux" > /tmp/program_var
;;
"11)")
echo "nzbget" > /tmp/program_var
;;
"12)")
echo "nzbhydra" > /tmp/program_var
;;
"13)")
echo "nzbhydra2" > /tmp/program_var
;;
"14)")
echo "ombiv3" > /tmp/program_var
;;
"15)")
echo "organizr" > /tmp/program_var
;;
"16)")
echo "plex" > /tmp/program_var
;;
"17)")
echo "portainer" > /tmp/program_var
;;
"18)")
echo "radarr" > /tmp/program_var
;;
"19)")
echo "resilio" > /tmp/program_var
;;
"20)")
echo "rutorrent" > /tmp/program_var
;;
"21)")
echo "sabnzbd" > /tmp/program_var
;;
"22)")
echo "sonarr" > /tmp/program_var
;;
"23)")
echo "tautulli" > /tmp/program_var
;;
"24)")
echo "ubooquity" > /tmp/program_var
;;
"25)")
clear
exit 0
;;
esac
ansible-playbook /opt/plexguide/ansible/plexguide.yml --tags restore
read -n 1 -s -r -p "Press any key to continue "
done
exit
| true
|
bcce8e24d2c0b4823491a7ee8480cf62f7e49e6f
|
Shell
|
Anil1111/WebAPI
|
/pack.sh
|
UTF-8
| 298
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
FNAME="`date '+%Y%m%d_%H%M'`_WebAPI.tgz"
EX="--exclude=WebAPI/obj "
EX="${EX} --exclude=WebAPI/bin "
#EX="${EX} --exclude=WebAPI.Tests/bin "
#EX="${EX} --exclude=WebAPI.Tests/obj "
set -x
tar -czf $FNAME $EX .vs WebAPI WebAPI.sln pack.sh browsing.sh .gitignore .gitattributes
set +x
| true
|
9a0bbf7f61898530919ba78fa730cbbd6fd07c00
|
Shell
|
cloudwm/installer
|
/tweaks/enable-ssl-ubuntu20
|
UTF-8
| 1,355
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# Add this at the begining of all scripts.
if [ -f "include/startup.sh" ]; then
. include/startup.sh
elif [ -f "../include/startup.sh" ]; then
. ../include/startup.sh
fi
systemctl stop apache2.service
execSpecial 3 '(fail|error)' certbot certonly -n --standalone --preferred-challenges http --agree-tos --email ${ADMINEMAIL} -d ${CWM_DOMAIN} ${CWM_DEBUG:+"--test-cert"}
###OREN CHANGES###
if [[ $? == 1 ]]
then
export certbot_failed=1
echo "Generating self-signed certificate" | log
#waitOrStop 0 "Failed certbot certificate generation"
mkdir /etc/letsencrypt/live
mkdir /etc/letsencrypt/live/${CWM_DOMAIN}
openssl req -x509 -sha256 -newkey rsa:2048 -keyout /etc/letsencrypt/live/${CWM_DOMAIN}/privkey.pem -out /etc/letsencrypt/live/${CWM_DOMAIN}/fullchain.pem -days 1024 -nodes -subj '/CN=localhost'
waitOrStop 0 "Certificate creation failed"
# Sending log to log server - 45-93-93-142.cloud-xip.com
curl -v -X POST -D -H "Content-Type: application/x-www-form-urlencoded" -d "vm_name='$CWM_NAME'&image_name='$CWM_OS'&vm_owner='$ADMINEMAIL'" http://45-93-93-142.cloud-xip.com
else
export certbot_failed=0
fi
###OREN CHANGES###
export CWM_DISPLAYED_ADDRESS="${CWM_DOMAIN}"
systemctl start apache2.service
waitOrStop 0 "Failed to start Apache service"
tag global-domain-set.success
tag ssl-ready.success
tagScript success
exit 0
| true
|
243ea8c161dfd8ef872c781ec982515612b8f00e
|
Shell
|
Diesmaster/smartchain-node-for-customers
|
/jobs.sh
|
UTF-8
| 19,157
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
rpcuser=$(env | grep IJUICE_KOMODO_NODE_USERNAME | cut -d '=' -f2-)
rpcpassword=$(env | grep IJUICE_KOMODO_NODE_PASSWORD | cut -d '=' -f2-)
rpcport=$(env | grep IJUICE_KOMODO_NODE_RPC_PORT | cut -d '=' -f2-)
komodo_node_ip=127.0.0.1
# TODO modulo 100 blocks
# echo "Using $komodo_node_ip:$rpcport with $rpcuser:$rpcpassword"
THIS_NODE_PUBKEY=$(env | grep THIS_NODE_PUBKEY | cut -d '=' -f2-)
THIS_NODE_WIF=$(env | grep THIS_NODE_WIF | cut -d '=' -f2-)
THIS_NODE_WALLET=$(env | grep THIS_NODE_WALLET | cut -d '=' -f2-)
# echo "Using node wallet ${THIS_NODE_WALLET}"
# TODO modulo 100 blocks
IS_MINE=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\": \"isminequery\", \"method\": \"validateaddress\", \"params\": [\"${THIS_NODE_WALLET}\"]}" -H 'content-type: text/plain;' http://127.0.0.1:$rpcport/ | jq -r '.result.ismine')
if [ "${IS_MINE}" == "false" ] ; then
curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\": \"importwif\", \"method\": \"importprivkey\", \"params\": [\"${THIS_NODE_WIF}\"]}" -H 'content-type: text/plain;' http://127.0.0.1:$rpcport/
fi
BLOCKNOTIFY_CHAINSYNC_LIMIT=$(env | grep BLOCKNOTIFY_CHAINSYNC_LIMIT | cut -d '=' -f2-)
HOUSEKEEPING_ADDRESS=$(env | grep HOUSEKEEPING_ADDRESS | cut -d '=' -f2-)
# echo "Chain out-of-sync limit: ${BLOCKNOTIFY_CHAINSYNC_LIMIT}"
# TEST_DATA=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9+/=' | fold -w 100 | head -n 1)
###############################################################################
# START HOUSEKEEPING
# we send this amount to an address for housekeeping
# update by 0.0001 (manually, if can be done in CI/CD, nice-to-have not need-to-have) (MYLO)
# house keeping address is list.json last entry during dev
SCRIPT_VERSION=0.00010005
CHAIN_SYNC=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\": \"syncquery\", \"method\": \"getinfo\", \"params\": []}" -H 'content-type: text/plain;' http://127.0.0.1:$rpcport/ | jq -r '.result.longestchain - .result.blocks as $diff | $diff')
echo "Out of sync by ${CHAIN_SYNC} blocks"
if [ $CHAIN_SYNC -lt ${BLOCKNOTIFY_CHAINSYNC_LIMIT} ] ; then
echo "Chain sync ok. Working..."
else
echo "Chain out of sync by ${CHAIN_SYNC} blocks. If counting down, syncing. Try next block, goodbye..."
# TODO send alarm
exit
fi
# TODO modulo block number (MYLO)
# send a small amount (SCRIPT_VERSION) for HOUSEKEEPING_ADDRESS from each organization
#############################
# info: for external documentation then remove from here
# one explorer url to check is
# IJUICE http://seed.juicydev.coingateways.com:24711/address/RS7y4zjQtcNv7inZowb8M6bH3ytS1moj9A
# POS95 http://seed.juicydev.coingateways.com:54343/address/RS7y4zjQtcNv7inZowb8M6bH3ytS1moj9A
#############################
# send SCRIPT_VERSION, increment by 0.00000001 for each update
# curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"housekeeping1\", \"method\": \"sendtoaddress\", \"params\": [\"${HOUSEKEEPING_ADDRESS}\", ${SCRIPT_VERSION}, \"\", \"\"] }" -H "content-type: text/plain;" http://$komodo_node_ip:$rpcport/
#############################
# END OF HOUSEKEEPING
###############################################################################
###############################################################################
# START JCF IMPORT API INTEGRITY CHECKS
# JCF is the only part of script that refers to BATCH.
# development of new partners can use RAW_REFRESCO-like variables
###########################
# organization R-address = $1
# raw_json import data in base64 = $2
# batch record import database id(uuid) = $3
###########################
function import-jcf-batch-integrity-pre-process {
# integrity-before-processing , create blockchain-address for the import data from integration pipeline
# r_address has a database constraint for uniqueness. will fail if exists
# signmessage, genkomodo.php
# update batches-api with "import-address"
# send "pre-process" tx to "import-address"
local WALLET=$1
local DATA=$2
echo $DATA
local IMPORT_ID=$3
echo "Checking import id: ${IMPORT_ID}"
# no wrap base64 from https://superuser.com/a/1225139
local SIGNED_DATA=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"signrawjson\", \"method\": \"signmessage\", \"params\": [\"${WALLET}\", \"${DATA}\"] }" -H 'content-type: text/plain;' http://127.0.0.1:$rpcport/ | jq -r '.result' | base64 -w 0) # | sed 's/\=//')
echo "signed data: ${SIGNED_DATA}"
local INTEGRITY_ADDRESS=$(php ${BLOCKNOTIFY_DIR}genaddressonly.php $SIGNED_DATA | jq -r '.address')
echo "INTEGRITY_ADDRESS will be ${INTEGRITY_ADDRESS}"
# IMPORTANT! this next POST will fail if the INTEGRITY_ADDRESS is not unique. The same data already has been used to create an address in the integrity table
echo curl -s -X POST -H "Content-Type: application/json" ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_JCF_BATCH_INTEGRITY_PATH} --data "{\"integrity_address\": \"${INTEGRITY_ADDRESS}\", \"batch\": \"${IMPORT_ID}\"}"
local INTEGRITY_ID=$(curl -s -X POST -H "Content-Type: application/json" ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_JCF_BATCH_INTEGRITY_PATH} --data "{\"integrity_address\": \"${INTEGRITY_ADDRESS}\", \"batch\": \"${IMPORT_ID}\"}" | jq -r '.id')
echo "integrity db id: ${INTEGRITY_ID}"
# curl sendtoaddress small amount
local INTEGRITY_PRE_TX=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"sendpretx\", \"method\": \"sendtoaddress\", \"params\": [\"${INTEGRITY_ADDRESS}\", ${SCRIPT_VERSION}, \"\", \"\"] }" -H "content-type: text/plain;" http://$komodo_node_ip:$rpcport/ | jq -r '.result')
echo "integrity pre tx: ${INTEGRITY_PRE_TX}"
curl -s -X PUT -H 'Content-Type: application/json' ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_JCF_BATCH_INTEGRITY_PATH}${INTEGRITY_ID}/ --data "{\"integrity_address\": \"${INTEGRITY_ADDRESS}\", \"integrity_pre_tx\": \"${INTEGRITY_PRE_TX}\" }"
}
###########################
# organization wallet = $1
# raw_json import data = $2
# batch database id = $3
###########################
function import-raw-refresco-batch-integrity-pre-process {
echo "#### RAW REFRESCO ####"
local WALLET=$1
local DATA=$2 # this is raw_json TODO needs to save in db
local IMPORT_ID=$3
echo "Checking import id: ${IMPORT_ID}"
_jq() {
echo ${DATA} | base64 --decode | jq -r ${1}
}
_getaddress() {
SIGNED_ITEM=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"signrawjson\", \"method\": \"signmessage\", \"params\": [\"${WALLET}\", \"${1}\"] }" -H 'content-type: text/plain;' http://127.0.0.1:$rpcport/ | jq -r '.result' | base64 -w 0) # | sed 's/\=//')
ITEM_ADDRESS=$(php ${BLOCKNOTIFY_DIR}genaddressonly.php $SIGNED_ITEM | jq -r '.address')
echo ${ITEM_ADDRESS}
}
# integrity-before-processing , create blockchain-address for the import data from integration pipeline
# blockchain-address has a database constraint for uniqueness. will fail if exists
# signmessage, genkomodo.php
# update batches-api with "import-address"
# send "pre-process" tx to "import-address"
# no wrap base64 from https://superuser.com/a/1225139
local SIGNED_DATA=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"signrawjson\", \"method\": \"signmessage\", \"params\": [\"${WALLET}\", \"${DATA}\"] }" -H 'content-type: text/plain;' http://127.0.0.1:$rpcport/ | jq -r '.result' | base64 -w 0) # | sed 's/\=//')
echo "signed data: ${SIGNED_DATA}"
local INTEGRITY_ADDRESS=$(php ${BLOCKNOTIFY_DIR}genaddressonly.php $SIGNED_DATA | jq -r '.address')
echo "INTEGRITY_ADDRESS will be ${INTEGRITY_ADDRESS}"
# IMPORTANT! this next POST will fail if the INTEGRITY_ADDRESS is not unique. The same data already has been used to create an address in the integrity table
echo curl -s -X POST -H \"Content-Type: application/json\" ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_PATH} --data "{\"integrity_address\": \"${INTEGRITY_ADDRESS}\", \"batch\": \"${IMPORT_ID}\"}"
local INTEGRITY_ID=$(curl -s -X POST -H "Content-Type: application/json" ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_PATH} --data "{\"integrity_address\": \"${INTEGRITY_ADDRESS}\", \"batch\": \"${IMPORT_ID}\"}" | jq -r '.id')
echo "integrity db id: ${INTEGRITY_ID}"
if [ "${INTEGRITY_ID}" != "null" ] ; then
# curl sendtoaddress small amount
local INTEGRITY_PRE_TX=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"sendpretx\", \"method\": \"sendtoaddress\", \"params\": [\"${INTEGRITY_ADDRESS}\", ${SCRIPT_VERSION}, \"\", \"\"] }" -H "content-type: text/plain;" http://$komodo_node_ip:$rpcport/ | jq -r '.result')
echo "integrity pre tx: ${INTEGRITY_PRE_TX}"
curl -s -X PUT -H 'Content-Type: application/json' ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_PATH}${INTEGRITY_ID}/ --data "{\"integrity_address\": \"${INTEGRITY_ADDRESS}\", \"integrity_pre_tx\": \"${INTEGRITY_PRE_TX}\" }"
# TODO JCF data model will use this mechanism
# GET THE PARTS OF IMPORT DATA THAT NEED A TX SENT
local ANFP=$(_jq '.anfp')
local PON=$(_jq '.pon')
local BNFP=$(_jq '.bnfp')
local ANFP_ADDRESS=$(_getaddress ${ANFP})
local PON_ADDRESS=$(_getaddress ${PON})
local BNFP_ADDRESS=$(_getaddress ${BNFP})
echo "IMPORT DATA TO SEND TO: ${ANFP} has ${ANFP_ADDRESS} & ${PON} has ${PON_ADDRESS} & ${BNFP} has ${BNFP_ADDRESS}"
local SMTXID=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"smbatchinputs\", \"method\": \"sendmany\", \"params\": [\"\", {\"${ANFP_ADDRESS}\":0.001,\"${PON_ADDRESS}\":0.002, \"${BNFP_ADDRESS}\": 0.003} ]} " -H 'content-type: text/plain;' http://$komodo_node_ip:$rpcport/ | jq -r '.result')
echo "${SMTXID} is the sendmany"
else
echo "Cannot complete integrity tx, likely cause is RAW_JSON is empty and/or already exists which creates duplicate integrity address, not allowed by db uniqueness constraint"
# TODO add duplicate flag to batch import, so it does not try again
fi
}
# general flow (high level)
#############################
# check for unprocessed imports (import api)
# check for imports with address but no pre-process tx (indicates something wrong with rpc to signmessage or php address-gen)
# check for imports with address but no post-process tx (indicates incomplete import, potential rpc error)
# check for unaddressed records { certificates, facilities, country etc. }
# generate address in subprocess
# fund new wallets that need funding
# wallet maintenance (e.g. consolidate utxos, make sure threshold minimum available for smooth operation)
#############################
# TODO IMPORTANT! integrity issue if batches funded by product journey inputs more than once
# batches getting funded by certificates, locations, dates, IMPORTANT! processing twice a problem. (MYLO)
# certificates are funded wallets, processing twice not ideal, but not an integrity problem.
# batches are paper wallets, processing twice not a problem IF only for updating address data for APIs
#############################
# variables v1
# IMPORTANT! can add, but do not change names until v2 is sanctioned by vic/CI/CD team (MYLO)
#############################
BLOCKHASH=${1}
EXPLORER_1_BASE_URL=
EXPLORER_2_BASE_URL=
INSIGHT_API_GET_ADDRESS_UTXO="insight-api-komodo/addrs/XX_CHECK_ADDRESS_XX/utxo"
INSIGHT_API_BROADCAST_TX="insight-api-komodo/tx/send"
IMPORT_API_BASE_URL=
IMPORT_API_INTEGRITY_PATH=integrity/
IMPORT_API_BATH_PATH=batch/
JUICYCHAIN_API_BASE_URL=
BLOCKNOTIFY_DIR=$(env | grep BLOCKNOTIFY_DIR | cut -d '=' -f2-)
BLOCKNOTIFY_CHAINSYNC_LIMIT=$(env | grep BLOCKNOTIFY_CHAINSYNC_LIMIT | cut -d '=' -f2-)
# dev v1 import-api
DEV_IMPORT_API_IP=$(env | grep IMPORT_API_IP | cut -d '=' -f2-)
DEV_IMPORT_API_PORT=$(env | grep IMPORT_API_PORT | cut -d '=' -f2-)
DEV_IMPORT_API_BASE_URL=http://${DEV_IMPORT_API_IP}:${DEV_IMPORT_API_PORT}/
DEV_IMPORT_API_JCF_BATCH_INTEGRITY_PATH=$(env | grep DEV_IMPORT_API_JCF_BATCH_INTEGRITY_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_JCF_BATCH_PATH=$(env | grep DEV_IMPORT_API_JCF_BATCH_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_JCF_BATCH_REQUIRE_INTEGRITY_PATH=$(env | grep DEV_IMPORT_API_JCF_BATCH_REQUIRE_INTEGRITY_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_JCF_BATCH_NEW_PATH=$(env | grep DEV_IMPORT_API_BATCH_NEW_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_RAW_REFRESCO_PATH=$(env | grep DEV_IMPORT_API_RAW_REFRESCO_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_RAW_REFRESCO_REQUIRE_INTEGRITY_PATH=$(env | grep DEV_IMPORT_API_RAW_REFRESCO_REQUIRE_INTEGRITY_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_RAW_REFRESCO_NEW_PATH=$(env | grep DEV_IMPORT_API_RAW_REFRESCO_NEW_PATH | cut -d '=' -f2-)
DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_PATH=$(env | grep DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_PATH | cut -d '=' -f2-)
# TODO after 15 Sept
################################
# dev v1 juicychain-api
DEV_JUICYCHAIN_API_BASE_URL=http://localhost:8888/
DEV_JUICYCHAIN_API_BATCH_PATH=batch/
DEV_JUICYCHAIN_API_CERTIFICATE_PATH=certificate/
DEV_JUICYCHAIN_API_LOCATION_PATH=location/
DEV_JUICYCHAIN_API_COUNTRY_PATH=country/
DEV_JUICYCHAIN_API_BLOCKCHAIN_ADDRESS_PATH=blockchain-address/
##############################
# note, var substitution for XX_CHECK_ADDRESS_XX
# ADDRESS_TO_CHECK="MYLO"
# out="${INSIGHT_API_GET_ADDRESS_UTXO/XX_CHECK_ADDRESS_XX/${ADDRESS_TO_CHECK}}"
# echo $out
#############################
# house keeping
#############################
# get the block height this blocknotify is running,send to api/db/reporting TODO finalize these vars with vic (MYLO)
#BLOCKHEIGHT=$(curl -s --user $rpcuser:$rpcpassword --data-binary "{\"jsonrpc\": \"1.0\", \"id\":\"curltest\", \"method\": \"getblock\", \"params\": [\"${BLOCKHASH}\"] }" -H 'content-type: text/plain;' http://$komodo_node_ip:$rpcport/ | jq -r '.result.height')
################################################################################
# for JCF, JCF_BATCH are the only "BATCH" we refer to as a var in this script
# to onboard new partners, copy & replace RAW_REFRESCO section below
#############################
# batch logic - jcf data model
#############################
# receive json responses
echo "poll import-api: batch/require_integrity/ result follows:"
echo curl -s -X GET ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_JCF_BATCH_REQUIRE_INTEGRITY_PATH}
RES_DEV_IMPORT_API_JCF_BATCHES_NULL_INTEGRITY=$(curl -s -X GET ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_JCF_BATCH_REQUIRE_INTEGRITY_PATH})
# from https://stackoverflow.com/a/46955018
if jq -e . >/dev/null 2>&1 <<<"${RES_DEV_IMPORT_API_JCF_BATCHES_NULL_INTEGRITY}"; then
echo "Parsed JSON successfully and got something other than false/null"
echo "Start JSON response"
echo ${RES_DEV_IMPORT_API_JCF_BATCHES_NULL_INTEGRITY}
echo "End JSON response"
else
echo "Failed to parse JSON, or got false/null"
fi
# DEV_IMPORT_API_JCF_BATCH_INTEGRITY_NO_POST_TX=$(curl -s -X GET ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_INTEGRITY_PATH})
# TODO batches/integrity/missing_post_tx/
# echo ${DEV_IMPORT_API_JCF_BATCH_INTEGRITY_NO_POST_TX}
# integrity-before-processing , check / create address for the import data from integration pipeline
# signmessage, genkomodo.php
# update batches-api with "import-address"
# send "pre-process" tx to "import-address"
for row in $(echo "${RES_DEV_IMPORT_API_JCF_BATCHES_NULL_INTEGRITY}" | jq -r '.[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
RAW_JSON=$(_jq '.raw_json? | .raw_json' > /dev/null)
BATCH_DB_ID=$(_jq '.id? | .id' > /dev/null)
echo "Checking if id exists"
if [ "${BATCH_DB_ID+x}" = "x" ] ; then
echo "No result, skipping...."
else
import-jcf-batch-integrity-pre-process ${THIS_NODE_WALLET} ${RAW_JSON} ${BATCH_DB_ID}
fi
done
################################################################################
# for new raw data checking, copy next ~30 lines
# replace RAW_REFRESCO with RAW_NEWCOMPANY
# make sure new vars are declared for RAW_NEWCOMPANY stuff
#############################
# batch logic - raw refresco
#############################
# receive json responses
echo "poll import-api: ${DEV_IMPORT_API_RAW_REFRESCO_REQUIRE_INTEGRITY_PATH} result follows:"
RES_DEV_IMPORT_API_RAW_REFRESCO_NULL_INTEGRITY=$(curl -s -X GET ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_RAW_REFRESCO_REQUIRE_INTEGRITY_PATH})
echo ${RES_DEV_IMPORT_API_RAW_REFRESCO_NULL_INTEGRITY}
# DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_NO_POST_TX=$(curl -s -X GET ${DEV_IMPORT_API_BASE_URL}${DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_PATH})
echo "TODO raw-refresco/integrity/missing_post_tx/"
# echo ${DEV_IMPORT_API_RAW_REFRESCO_INTEGRITY_NO_POST_TX}
# integrity-before-processing , check / create address for the import data from integration pipeline
# signmessage, genkomodo.php
# update batches-api with "import-address"
# send "pre-process" tx to "import-address"
for row in $(echo "${RES_DEV_IMPORT_API_RAW_REFRESCO_NULL_INTEGRITY}" | jq -r '.[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
# TODO NOTE: if the RAW_JSON is the same as another import, during the pre-process, the address generation will be same as existing
# so it will not create a new batch tx, and always be in the list of new unprocessed imports
# CORRECT
RAW_JSON=$row
# TO FORCE {} FOR TESTING
# RAW_JSON=$(_jq '.raw_json')
# echo $RAW_JSON | base64 --decode
BATCH_DB_ID=$(_jq '.id')
import-raw-refresco-batch-integrity-pre-process ${THIS_NODE_WALLET} ${RAW_JSON} ${BATCH_DB_ID}
done
###############################################################################
#
# DONT LOOK PAST THIS LINE
#
###############################################################################
# TODO for loop with jq (for each batch with with pre-process tx (not conceived properly yet) (MYLO)
# for each input for this batch, generate tx
# electrum-komodo stuff
# signmessage of input
# genkomodo.php to get wif & address
# get utxo for input to send to batch INSIGHT_API_GET_ADDRESS_UTXO
# createrawtransaction funding batch address & sending change back to this (input) address
# use wif in signmessage.py with the utxo
# broadcast via explorer INSIGHT_API_BROADCAST_TX
# integrity-after-processing
# send "post-process" tx to "import-address"
# "import-address" with pre & post process tx
#############################
############################
# cert logic
# CERTIFICATES_NEW_NO_ADDRESS=$(curl -s -X GET ${CERTIFICATES_GET_UNADDRESSED_URL})
# CERTIFICATES_NO_FUNDING_TX=$(curl -s -X GET ${CERTIFICATES_GET_NO_FUNDING_TX_URL})
# for loop with jq (for each certificate with no address do this)
# signmessage(cert_identifier)
# genkomodo.php for address
# update juicychain-api with address
# certificates need funding, rpc sendtoaddress
# update juicychain-api with funding tx (separate to address-gen update, possibly no funds to send)
| true
|
33d0cb662fc2a63ea4b106b206fb3377b29ef980
|
Shell
|
nagyist/eivind88.master_code
|
/runAllPylearn2.sh
|
UTF-8
| 453
| 3.734375
| 4
|
[] |
permissive
|
#!/usr/bin/env bash
function message {
echo "This script takes one arg ("Converted"), which is the folder in which it can find other folder containing .names and .files -files it will upload to the server."
exit
}
if [[ !($1) ]]; then
message
fi
FOLDER=$(shopt -s extglob; echo "${1%%+(/)}")
cd $FOLDER
for D in `ls -d */`
do
cd $D
rm -rf Results/Pylearn2/*
python runPylearn2.py
cd ..
done
echo "ALL RUNS ARE FINISHED"
| true
|
f43b110888464823b47a26afb3ee22887427d8a0
|
Shell
|
Smk1989/dark_multitool1.0
|
/Cyanogenmod_scripts/import_libraries/import_deviceblobs
|
UTF-8
| 810
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo "=============================================================================================="
echo "Please connect your phone via USB to your computer"
echo "Enter the correct name of device manufacturer(for eg. for nozomi i.e xperia s its sony(No caps)"
echo "==============================================================================================="
read manufacturer_name
cd ~/$directory_name/device/
if [ -d "$manufacturer_name" ];
then
echo "Found $manufacturer_name under ~/$directory_name/device/$manufacturer_name"
cd ~/$directory_name/device/$manufacturer_name/$codename
./extract-files.sh
else
echo "Please Enter your manufacturer name correctly"
echo "If you are not sure about the spelling go to ~/$directory_name/device/ and check your manufacturer name there"
fi
| true
|
1e2cbb78b8fe021d3449739a55604b0f88b0f5db
|
Shell
|
andereric/skriptimine
|
/praks6/yl3
|
UTF-8
| 430
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Skript, mis küsib kasutajalt OS-i eelistusi
osch=0
#Skript küsib kasutajalt andmeid
echo "1. Unix (Sun Os)"
echo "2. Linux (Red Hat)"
echo -n "Select your choice of OS (1 or 2)...: "
read osch
#Arvutused
if [ $osch -eq 1 ]; then
echo "You picked Unix (Sun OS)."
else
if [ $osch -eq 2 ]; then
echo "You picked Linux (Red Hat)"
else
echo "What? You don't like Unix/Linux OS?!?!?"
fi
fi
#Skripti lõpp
| true
|
c9eea65eca2bd9e539f17ad71d91f28731be463b
|
Shell
|
Mikej81/terraform-azure-f5-scca
|
/.envVarsHelperExample.sh
|
UTF-8
| 698
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
## set vars
# azure
arm_resource_group=""
arm_client_id=""
arm_client_secret=""
arm_subscription_id=""
arm_tenant_id=""
# creds
# ssh_key_dir="$(echo $HOME)/.ssh"
# ssh_key_name="id_rsa"
ssh_key_dir=""
ssh_key_name=""
azure_ssh_key_name=""
azure_pub_key_name=""
# azure
export ARM_RESOURCE_GROUP=${arm_resource_group}
export ARM_SUBSCRIPTION_ID=${arm_subscription_id}
export ARM_TENANT_ID=${arm_tenant_id}
export ARM_CLIENT_ID=${arm_client_id}
export ARM_CLIENT_SECRET=${arm_client_secret}
export AZURE_SSH_KEY_NAME=${azure_ssh_key_name}
export AZURE_PUB_KEY_NAME=${azure_pub_key_name}
# creds
export SSH_KEY_DIR=${ssh_key_dir}
export SSH_KEY_NAME=${ssh_key_name}
echo "env vars done"
| true
|
6cd56177d4afb0a8e0a59b72fc6ed2d77824da1f
|
Shell
|
preethihiremath/Unix
|
/loop.sh
|
UTF-8
| 212
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
while echo “enter your name: \n” ; do
read name
if [ `expr "$name" : '.*' ` -gt 10 ] ; then
echo “name too long”
else
echo “name=$name”
break
fi
done
| true
|
6ebad379b7339403e1dc00725a3c0c9c55625493
|
Shell
|
mygorillacodeteam/kali-core
|
/tests/lib/queries/test_is_dir_exist_not_true.sh
|
UTF-8
| 111
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
test_is_dir_exist_not_true() {
expect_true $LINENO $(is_dir "a_directory_that_does_not_exist" || echo true)
}
| true
|
32a8186d6dde0acd0ee38d1e4419c10c0d69ec07
|
Shell
|
elsid/CodeWizards
|
/release.sh
|
UTF-8
| 2,507
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -ex
VERSION=${1}
DIR=${PWD}/out/${VERSION}
ROOT=${PWD}
mkdir ${DIR}
cd cpp-cgdk
cp action.cpp ${DIR}
cp base_strategy.cpp ${DIR}
cp battle_mode.cpp ${DIR}
cp circle.cpp ${DIR}
cp graph.cpp ${DIR}
cp master_strategy.cpp ${DIR}
cp move_mode.cpp ${DIR}
cp move_to_node.cpp ${DIR}
cp move_to_position.cpp ${DIR}
cp MyStrategy.cpp ${DIR}
cp optimal_destination.cpp ${DIR}
cp optimal_movement.cpp ${DIR}
cp optimal_path.cpp ${DIR}
cp optimal_position.cpp ${DIR}
cp optimal_target.cpp ${DIR}
cp retreat_mode.cpp ${DIR}
cp skills.cpp ${DIR}
cp stats.cpp ${DIR}
cp time_limited_strategy.cpp ${DIR}
cp world_graph.cpp ${DIR}
cp abstract_strategy.hpp ${DIR}
cp action.hpp ${DIR}
cp base_strategy.hpp ${DIR}
cp battle_mode.hpp ${DIR}
cp cache.hpp ${DIR}
cp circle.hpp ${DIR}
cp common.hpp ${DIR}
cp context.hpp ${DIR}
cp damage.hpp ${DIR}
cp golden_section.hpp ${DIR}
cp graph.hpp ${DIR}
cp helpers.hpp ${DIR}
cp line.hpp ${DIR}
cp master_strategy.hpp ${DIR}
cp math.hpp ${DIR}
cp minimize.hpp ${DIR}
cp mode.hpp ${DIR}
cp move_mode.hpp ${DIR}
cp move_to_node.hpp ${DIR}
cp move_to_position.hpp ${DIR}
cp MyStrategy.h ${DIR}
cp optimal_destination.hpp ${DIR}
cp optimal_movement.hpp ${DIR}
cp optimal_path.hpp ${DIR}
cp optimal_position.hpp ${DIR}
cp optimal_target.hpp ${DIR}
cp point.hpp ${DIR}
cp profiler.hpp ${DIR}
cp retreat_mode.hpp ${DIR}
cp skills.hpp ${DIR}
cp stats.hpp ${DIR}
cp target.hpp ${DIR}
cp time_limited_strategy.hpp ${DIR}
cp world_graph.hpp ${DIR}
cd ../bobyqa-cpp/
sed 's/<bobyqa.h>/"bobyqa.h"/' src/bobyqa.cpp > ${DIR}/bobyqa.cpp
cp include/bobyqa.h ${DIR}
cp src/altmov.cpp ${DIR}
cp src/altmov.hpp ${DIR}
cp src/bobyqb.hpp ${DIR}
cp src/impl.hpp ${DIR}
cp src/prelim.hpp ${DIR}
cp src/rescue.hpp ${DIR}
cp src/trsbox.cpp ${DIR}
cp src/trsbox.hpp ${DIR}
cp src/update.cpp ${DIR}
cp src/update.hpp ${DIR}
cp src/utils.hpp ${DIR}
cd ${DIR}
zip ../${VERSION}.zip *.hpp *.cpp *.h
ln -s ${ROOT}/cpp-cgdk-origin/Strategy.cpp Strategy.cpp
ln -s ${ROOT}/cpp-cgdk-origin/Strategy.h Strategy.h
ln -s ${ROOT}/cpp-cgdk-origin/Runner.cpp Runner.cpp
ln -s ${ROOT}/cpp-cgdk-origin/Runner.h Runner.h
ln -s ${ROOT}/cpp-cgdk-origin/RemoteProcessClient.cpp RemoteProcessClient.cpp
ln -s ${ROOT}/cpp-cgdk-origin/RemoteProcessClient.h RemoteProcessClient.h
ln -s ${ROOT}/cpp-cgdk-origin/model/ model
ln -s ${ROOT}/cpp-cgdk-origin/csimplesocket csimplesocket
echo ${DIR}/compilation.log
bash ${ROOT}/cpp-cgdk-origin/compile-g++14.sh || cat ${DIR}/compilation.log
echo 'done'
| true
|
4d8658a060dbfcac47eef55a494df22c64a62295
|
Shell
|
xskjs888/docker-web
|
/init.sh
|
UTF-8
| 355
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
SOURCE="$0"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
cd $DIR
mkdir /data/www -p
mkdir /data/log/nginx -p
mkdir /data/mysql -p
cp -r ./conf /data
echo "Done!"
| true
|
4b1a94ca91de35ac883523bc50b1b693dd84508f
|
Shell
|
shiludeng/oh-my-zsh
|
/themes/azure.zsh-theme
|
UTF-8
| 11,271
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#color{{{
autoload colors
colors
for color in RED GREEN YELLOW BLUE MAGENTA CYAN WHITE; do
eval _$color='%{$terminfo[bold]$fg[${(L)color}]%}'
eval $color='%{$fg[${(L)color}]%}'
(( count = $count + 1 ))
done
FINISH="%{$terminfo[sgr0]%}"
#}}}
#命令提示符
#RPROMPT=$(echo "$RED%D %T$FINISH")
PROMPT=$(echo "$CYAN%n@$YELLOW%M:$GREEN%/$_YELLOW>$FINISH ")
#PROMPT=$(echo "$BLUE%M$GREEN%/
#$CYAN%n@$BLUE%M:$GREEN%/$_YELLOW>>>$FINISH ")
#标题栏、任务栏样式{{{
case $TERM in (*xterm*|*rxvt*|(dt|k|E)term)
precmd () { print -Pn "\e]0;%n@%M//%/\a" }
preexec () { print -Pn "\e]0;%n@%M//%/\ $1\a" }
;;
esac
#}}}
#编辑器
export EDITOR=vim
#输入法
export XMODIFIERS="@im=ibus"
export QT_MODULE=ibus
export GTK_MODULE=ibus
#关于历史纪录的配置 {{{
#历史纪录条目数量
export HISTSIZE=10000
#注销后保存的历史纪录条目数量
export SAVEHIST=10000
#历史纪录文件
export HISTFILE=~/.zhistory
#以附加的方式写入历史纪录
setopt INC_APPEND_HISTORY
#如果连续输入的命令相同,历史纪录中只保留一个
setopt HIST_IGNORE_DUPS
#为历史纪录中的命令添加时间戳
setopt EXTENDED_HISTORY
#启用 cd 命令的历史纪录,cd -[TAB]进入历史路径
setopt AUTO_PUSHD
#相同的历史路径只保留一个
setopt PUSHD_IGNORE_DUPS
#在命令前添加空格,不将此命令添加到纪录文件中
#setopt HIST_IGNORE_SPACE
#}}}
#每个目录使用独立的历史纪录{{{
cd() {
builtin cd "$@" # do actual cd
fc -W # write current history file
local HISTDIR="$HOME/.zsh_history$PWD" # use nested folders for history
if [ ! -d "$HISTDIR" ] ; then # create folder if needed
mkdir -p "$HISTDIR"
fi
export HISTFILE="$HISTDIR/zhistory" # set new history file
touch $HISTFILE
local ohistsize=$HISTSIZE
HISTSIZE=0 # Discard previous dir's history
HISTSIZE=$ohistsize # Prepare for new dir's history
fc -R #read from current histfile
}
mkdir -p $HOME/.zsh_history$PWD
export HISTFILE="$HOME/.zsh_history$PWD/zhistory"
function allhistory { cat $(find $HOME/.zsh_history -name zhistory) }
function convhistory {
sort $1 | uniq |
sed 's/^:\([ 0-9]*\):[0-9]*;\(.*\)/\1::::::\2/' |
awk -F"::::::" '{ $1=strftime("%Y-%m-%d %T",$1) "|"; print }'
}
#使用 histall 命令查看全部历史纪录
function histall { convhistory =(allhistory) |
sed '/^.\{20\} *cd/i\\' }
#使用 hist 查看当前目录历史纪录
function hist { convhistory $HISTFILE }
#全部历史纪录 top50
function top50 { allhistory | awk -F':[ 0-9]*:[0-9]*;' '{ $1="" ; print }' | sed 's/ /\n/g' | sed '/^$/d' | sort | uniq -c | sort -nr | head -n 50 }
#}}}
#杂项 {{{
#允许在交互模式中使用注释 例如:
#cmd #这是注释
setopt INTERACTIVE_COMMENTS
#启用自动 cd,输入目录名回车进入目录
#稍微有点混乱,不如 cd 补全实用
setopt AUTO_CD
#扩展路径
#/v/c/p/p => /var/cache/pacman/pkg
setopt complete_in_word
#禁用 core dumps
#limit coredumpsize 0
#Emacs风格 键绑定
bindkey -e
#bindkey -v
#设置 [DEL]键 为向后删除
#bindkey "\e[3~" delete-char
#以下字符视为单词的一部分
WORDCHARS='*?_-[]~=&;!#$%^(){}<>'
#}}}
#自动补全功能 {{{
setopt AUTO_LIST
setopt AUTO_MENU
#开启此选项,补全时会直接选中菜单项
#setopt MENU_COMPLETE
#autoload -U compinit
#compinit
autoload -U select-word-style
select-word-style whitespace
#自动补全缓存
#zstyle ':completion::complete:*' use-cache on
#zstyle ':completion::complete:*' cache-path .zcache
#zstyle ':completion:*:cd:*' ignore-parents parent pwd
#自动补全选项
zstyle ':completion:*' verbose yes
zstyle ':completion:*' menu select
zstyle ':completion:*:*:default' force-list always
zstyle ':completion:*' select-prompt '%SSelect: lines: %L matches: %M [%p]'
zstyle ':completion:*:match:*' original only
zstyle ':completion::prefix-1:*' completer _complete
zstyle ':completion:predict:*' completer _complete
zstyle ':completion:incremental:*' completer _complete _correct
zstyle ':completion:*' completer _complete _prefix _correct _prefix _match _approximate
#路径补全
zstyle ':completion:*' expand 'yes'
zstyle ':completion:*' squeeze-shlashes 'yes'
zstyle ':completion::complete:*' '\\'
#彩色补全菜单
eval $(dircolors -b)
export ZLSCOLORS="${LS_COLORS}"
zmodload zsh/complist
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
#修正大小写
zstyle ':completion:*' matcher-list '' 'm:{a-zA-Z}={A-Za-z}'
#错误校正
zstyle ':completion:*' completer _complete _match _approximate
zstyle ':completion:*:match:*' original only
zstyle ':completion:*:approximate:*' max-errors 1 numeric
#kill 命令补全
compdef pkill=kill
compdef pkill=killall
zstyle ':completion:*:*:kill:*' menu yes select
zstyle ':completion:*:*:*:*:processes' force-list always
zstyle ':completion:*:processes' command 'ps -au$USER'
#补全类型提示分组
zstyle ':completion:*:matches' group 'yes'
zstyle ':completion:*' group-name ''
zstyle ':completion:*:options' description 'yes'
zstyle ':completion:*:options' auto-description '%d'
zstyle ':completion:*:descriptions' format $'\e[01;33m -- %d --\e[0m'
zstyle ':completion:*:messages' format $'\e[01;35m -- %d --\e[0m'
zstyle ':completion:*:warnings' format $'\e[01;31m -- No Matches Found --\e[0m'
zstyle ':completion:*:corrections' format $'\e[01;32m -- %d (errors: %e) --\e[0m'
# cd ~ 补全顺序
zstyle ':completion:*:-tilde-:*' group-order 'named-directories' 'path-directories' 'users' 'expand'
#}}}
##行编辑高亮模式 {{{
# Ctrl+@ 设置标记,标记和光标点之间为 region
zle_highlight=(region:bg=magenta #选中区域
special:bold #特殊字符
isearch:underline)#搜索时使用的关键字
#}}}
##空行(光标在行首)补全 "cd " {{{
user-complete(){
case $BUFFER in
"" ) # 空行填入 "cd "
BUFFER="cd "
zle end-of-line
zle expand-or-complete
;;
"cd --" ) # "cd --" 替换为 "cd +"
BUFFER="cd +"
zle end-of-line
zle expand-or-complete
;;
"cd +-" ) # "cd +-" 替换为 "cd -"
BUFFER="cd -"
zle end-of-line
zle expand-or-complete
;;
* )
zle expand-or-complete
;;
esac
}
zle -N user-complete
bindkey "\t" user-complete
#}}}
#命令别名 {{{
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -i'
alias ls='ls -F --color=auto'
alias ll='ls -al'
alias grep='grep --color=auto'
alias la='ls -a'
alias pacman='sudo pacman-color'
alias p='sudo pacman-color'
alias y='yaourt'
alias h='htop'
#git alias
alias g='git'
alias gb='git branch'
alias gst='git status'
alias gsh='git show'
alias gd='git diff'
alias gco='git checkout'
#adb alias
alias aks='adb kill-server'
alias ass='adb start-server'
alias ars='aks && ass'
alias alog='adb logcat -C'
#[Esc][h] man 当前命令时,显示简短说明
alias run-help >&/dev/null && unalias run-help
autoload run-help
#历史命令 top10
alias top10='print -l ${(o)history%% *} | uniq -c | sort -nr | head -n 10'
#}}}
#路径别名 {{{
#进入相应的路径时只要 cd ~xxx
#hash -d A="/media/ayu/dearest"
#hash -d H="/media/data/backup/ayu"
#hash -d E="/etc/"
#hash -d D="/home/ayumi/Documents"
hash -d weigou="/home/users/shiludeng/data/dev/app/ecom/weigou"
hash -d moviese="/home/users/shiludeng/data/dev/app/search/movie/user/se"
hash -d ub="/home/users/shiludeng/data/dev/public/ub"
hash -d example="/home/users/shiludeng/data/dev/com/tools/ubgen"
#}}}
#{{{自定义补全
#补全 ping
zstyle ':completion:*:ping:*' hosts 192.168.1.{1,50,51,100,101} www.google.com
#补全 ssh scp sftp 等
#zstyle -e ':completion::*:*:*:hosts' hosts 'reply=(${=${${(f)"$(cat {/etc/ssh_,~/.ssh/known_}hosts(|2)(N) /dev/null)"}%%[# ]*}//,/ })'
#}}}
#{{{ F1 计算器
arith-eval-echo() {
LBUFFER="${LBUFFER}echo \$(( "
RBUFFER=" ))$RBUFFER"
}
zle -N arith-eval-echo
bindkey "^[[11~" arith-eval-echo
#}}}
####{{{
function timeconv { date -d @$1 +"%Y-%m-%d %T" }
# }}}
zmodload zsh/mathfunc
autoload -U zsh-mime-setup
zsh-mime-setup
setopt EXTENDED_GLOB
#autoload -U promptinit
#promptinit
#prompt redhat
setopt correctall
autoload compinstall
#漂亮又实用的命令高亮界面
setopt extended_glob
TOKENS_FOLLOWED_BY_COMMANDS=('|' '||' ';' '&' '&&' 'sudo' 'do' 'time' 'strace')
recolor-cmd() {
region_highlight=()
colorize=true
start_pos=0
for arg in ${(z)BUFFER}; do
((start_pos+=${#BUFFER[$start_pos+1,-1]}-${#${BUFFER[$start_pos+1,-1]## #}}))
((end_pos=$start_pos+${#arg}))
if $colorize; then
colorize=false
res=$(LC_ALL=C builtin type $arg 2>/dev/null)
case $res in
*'reserved word'*) style="fg=magenta,bold";;
*'alias for'*) style="fg=cyan,bold";;
*'shell builtin'*) style="fg=yellow,bold";;
*'shell function'*) style='fg=green,bold';;
*"$arg is"*)
[[ $arg = 'sudo' ]] && style="fg=red,bold" || style="fg=blue,bold";;
*) style='none,bold';;
esac
region_highlight+=("$start_pos $end_pos $style")
fi
[[ ${${TOKENS_FOLLOWED_BY_COMMANDS[(r)${arg//|/\|}]}:+yes} = 'yes' ]] && colorize=true
start_pos=$end_pos
done
}
check-cmd-self-insert() { zle .self-insert && recolor-cmd }
check-cmd-backward-delete-char() { zle .backward-delete-char && recolor-cmd }
zle -N self-insert check-cmd-self-insert
zle -N backward-delete-char check-cmd-backward-delete-char
# Initialize colors.
autoload -U colors
colors
# Allow for functions in the prompt.
setopt PROMPT_SUBST
# Autoload zsh functions.
fpath=(~/.zsh/functions $fpath)
autoload -U ~/.zsh/functions/*(:t)
# Set the prompt.
local ret_status="%(?:%{$fg_bold[green]%}➜ :%{$fg_bold[red]%}➜ %s)"
export MODE_INDICATOR="<<<vi>>>"
RPS2="$(vi_mode_prompt_info)"
PROMPT='%{${fg[cyan]}%}%n@%{${fg[yellow]}%}%M:%{${fg[cyan]}%}%B%/%{$fg_bold[blue]%}$(git_prompt_info)%{$fg_bold[blue]%}$(svn_prompt_info)%{${fg[default]}%}%{$fg[cyan]%}$(vi_mode_prompt_info)
> '
ZSH_PROMPT_BASE_COLOR="%{$fg_bold[blue]%}"
ZSH_THEME_REPO_NAME_COLOR="%{$fg_bold[yello]%}"
ZSH_THEME_REPO_NAME_COLOR="%{$fg_bold[yello]%}"
SVN_SHOW_BRANCH="true"
ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg_bold[magenta]%} -git- %{$reset_color%}%{$fg_bold[yellow]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$fg[cyan]%} ?"
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg_bold[red]%} ±"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[red]%} ♥"
ZSH_THEME_SVN_PROMPT_PREFIX="%{$fg_bold[magenta]%} -svn- %{$reset_color%}%{$fg_bold[yellow]%}"
ZSH_THEME_SVN_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_SVN_PROMPT_UNTRACKED="%{$fg[cyan]%} ?"
ZSH_THEME_SVN_PROMPT_DIRTY="%{$fg_bold[red]%} ±"
ZSH_THEME_SVN_PROMPT_CLEAN="%{$fg_bold[red]%} ♥"
| true
|
6dca047f52109a97b596c4edcda0b1b667a06e74
|
Shell
|
CSCfi/hpc_biotools
|
/infoseq_summary.bash
|
UTF-8
| 1,877
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -f
# A script to fetch sequences from a sequence file or database
# 21.1. 2011 KM
export LC_ALL=C
#setenv PLPLOT_LIB /v/linux26_x86_64/appl/molbio/emboss/6.5.7/share/EMBOSS
export PATH=/appl/molbio/emboss/6.5.7_gnu/bin:${PATH}
#setenv LD_LIBRARY_PATH /v/linux26_x86_64/appl/molbio/emboss/libharu/2.0.8/lib
osformat=("fasta")
if [[ "$1" == "" ]]
then
echo "Syntax:"
echo "infoseq_summary sequence_file"
exit 1
fi
if [[ -e $1 ]]
then
infile=$1
printf "%s\t%s\n" " File name: " $1
size=$(du -sh "$1" | awk '{print $1}' )
printf "%s\t%s\n" " File size: " $size
sformat=$(sfcheck.bash "$1")
printf "%s\t%s\n" " File format: " $sformat
if [[ "$sformat" == "Not an EMBOSS compatible sequence file" ]]
then
exit 0
fi
else
# seqret "$1" $TMPDIR/infoseq_summary_tmp_$$
# set infile=$TMPDIR/infoseq_summary_tmp_$$
echo ' USA definition: '"\t""$1"
fi
infoseq -nowarning -nocolumn -delimiter "::" -nohead "$1" -only -usa -name -type -length -filter | awk -F "::" 'BEGIN { s = 10000000000} { a = a +$NF} \
{ if ( $NF > l) l = $NF } { if ( $NF == l) ln = $3 } { if ( $NF < s) s = $NF} { if ( $NF == s) sn = $3} {ka = a / NR} \
END { if ( $4 == "N") print " Sequence type: \tNucleotide"} \
END { if ( $4 == "P") print " Sequence type: \tProtein"} \
END { print " Number of sequences: \t" NR } \
END { print " Longest (or one of equally long): \t" ln "\t" l } \
END { print " Shortest (or one of equally short): \t" sn "\t"s } \
END { print " Average length: \t" ka } \
END {print " Total amount of nucleotides/residues:\t" a } \
END { if ( NF > 5) print " Note: Sequence names seem to contain douple-douple point (::) characters!"}'
| true
|
d6e77d9b9c0353785d1202495a6d5a0f4f1444cc
|
Shell
|
lindblom/node-postgres-vagrant-saltstack
|
/salt/srv/salt/nodejs/install.sh
|
UTF-8
| 286
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
NODE_VERSION=v0.10.18
wget http://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-linux-x64.tar.gz
tar -zxf node-$NODE_VERSION-linux-x64.tar.gz
cd node-$NODE_VERSION-linux-x64
cp -prf bin/* /usr/local/bin/
cp -prf lib/* /usr/local/lib/
cp -prf share/* /usr/local/share/
| true
|
9bb21ab7fbbe3cecbd71b288ef14e9947db7bf1a
|
Shell
|
xdays/dockerfiles
|
/wireguard/scripts/run.sh
|
UTF-8
| 672
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Install Wireguard. This has to be done dynamically since the kernel
# module depends on the host kernel version.
apt update
apt install -y linux-headers-$(uname -r)
apt install -y wireguard
# Find a Wireguard interface
interface=`find /etc/wireguard -name ${INTERFACE:-wg0}.conf`
if [[ -z $interface ]]; then
echo "$(date): Interface not found in /etc/wireguard" >&2
exit 1
fi
echo "$(date): Starting Wireguard with $interface"
wg-quick up $interface
# Handle shutdown behavior
finish () {
echo "$(date): Shutting down Wireguard"
wg-quick down $interface
exit 0
}
trap finish SIGTERM SIGINT SIGQUIT
sleep infinity &
wait $!
| true
|
78b7e09bfa6ba9b9d9f979c1718273b46c6e6dd0
|
Shell
|
MarcosJRcwb/log2ram
|
/build-packages.sh
|
UTF-8
| 1,121
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Exit the script if any of the commands fail
set -e
set -u
set -o pipefail
# Set working directory to the location of this script
cd "$(dirname "${BASH_SOURCE[0]}")"
STARTDIR="$(pwd)"
DESTDIR="$STARTDIR/pkg"
OUTDIR="$STARTDIR/deb"
# Remove potential leftovers from a previous build
rm -rf "$DESTDIR" "$OUTDIR"
## log2ram
# Create directory
install -Dm 644 "$STARTDIR/log2ram.service" "$DESTDIR/etc/systemd/system/log2ram.service"
install -Dm 755 "$STARTDIR/log2ram" "$DESTDIR/usr/local/bin/log2ram"
install -Dm 644 "$STARTDIR/log2ram.conf" "$DESTDIR/etc/log2ram.conf"
install -Dm 644 "$STARTDIR/uninstall.sh" "$DESTDIR/usr/local/bin/uninstall-log2ram.sh"
# cron
install -Dm 755 "$STARTDIR/log2ram.cron" "$DESTDIR/etc/cron.daily/log2ram"
install -Dm 644 "$STARTDIR/log2ram.logrotate" "$DESTDIR/etc/logrotate.d/log2ram"
# Build .deb
mkdir "$DESTDIR/DEBIAN" "$OUTDIR"
cp "$STARTDIR/debian/"* "$DESTDIR/DEBIAN/"
dpkg-deb --build "$DESTDIR" "$OUTDIR"
reprepro -b /var/www/repos/apt/debian includedeb buster "$OUTDIR"/*.deb
reprepro -b /var/www/repos/apt/debian includedeb stretch "$OUTDIR"/*.deb
| true
|
0e55ef17e1b525ff8503752c8aeee1f3e4cedd6e
|
Shell
|
chrisi51/snapper
|
/snapper
|
UTF-8
| 6,297
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
source /etc/backup.conf
if test -f $CMD_DUPLICITY;
then
if [ ! -d ${BACKUPPATH}images ];
then
echo "have to create the backup path: ${BACKUPPATH}images";
mkdir ${BACKUPPATH}images;
fi
echo "" > ${TEMPLOGDATEI}
OIFS=$IFS
unset $EXCLUDE_GREP;
while getopts ':l:' OPTION ; do
case "$OPTION" in
l) EXCLUDE_GREP="| grep '$OPTARG' | grep -v '_snap'";;
esac
done
if [ -z "$EXCLUDE_GREP" ];
then
EXCLUDE_GREP="| grep -v 'Snapshot'"
for i in ${EXCLUDE_LVS[@]}; do
EXCLUDE_GREP="$EXCLUDE_GREP | grep -v '$i'"
done
fi
echo $EXCLUDE_GREP;
lvscantext=`eval /sbin/lvscan $EXCLUDE_GREP`
echo $lvscantext;
datum="$(date +%Y-%m-%d)"
echo $datum > ${BACKUPPATH}lastbackup.txt
datumzeit="$(date +%Y-%m-%d-%H-%M-%S)"
PROBLEM="false"
IFS=$'\n'
i=0
export PASSPHRASE=$PASSPHRASE
export FTP_PASSWORD=$FTP_PASSWORD
echo "-------------------------------------------------" >> ${TEMPLOGDATEI} 2>&1
echo "-------------------------------------------------" >> ${TEMPLOGDATEI} 2>&1
echo "----- BACKUP-JOB $datumzeit ------------" >> ${TEMPLOGDATEI} 2>&1
echo "-------------------------------------------------" >> ${TEMPLOGDATEI} 2>&1
for zeile in $lvscantext
do
i=$[$i+1]
# echo $zeile
ACTIVE=`echo $zeile | awk -v FS=" " '{print $1}'`
LV=`echo $zeile | awk -v FS="'" '{print $2}'`
LVname=`echo $LV | sed "s:/:_:g" | sed "s:_::"`
LVname=`echo $LV | awk -v FS="/" '{print $NF}'`
LVnamerev=`echo $LVname | rev`
VGname=`echo $LV | rev | sed "s:${LVnamerev}/::" | rev`
Kapazitaet=`echo $zeile | awk -v FS="[" '{print $2}' | awk -v FS=" " '{print $1}'`
if [ $(echo "$Kapazitaet > 10" | bc) -eq 1 ];
then
Kapazitaet=$(echo "$Kapazitaet/10" | bc );
else
Kapazitaet="1";
fi
if [ $ACTIVE == "ACTIVE" -a ! -e ${BACKUPPATH}images/${datum}-${LVname}.img.gz ];
then
echo "Backup $LVname:" >> ${TEMPLOGDATEI} 2>&1
echo "-------------------------------------------------" >> ${TEMPLOGDATEI} 2>&1
echo "=> Loeschen alter Backup-Images" >> ${TEMPLOGDATEI} 2>&1
# Loeschen eventuell vorhandener alter Backup-Images-Zips
rm -f ${BACKUPPATH}images/*${LVname}.img.gz > /dev/null
echo "=> Anlegen des Snapshots ${LVname}_snap" >> ${TEMPLOGDATEI} 2>&1
# Snapshot der aktuellen VM anlegen
echo "$i $ACTIVE --- > LV: $LV, Kapazitaet: $Kapazitaet, LVName: $LVname, VGName: $VGname"
`/sbin/lvcreate --size ${Kapazitaet}G --snapshot --name ${LVname}_snap $LV > /dev/null`
sync
sleep 1
echo "=> Image ${datum}-${LVname}.img.gz aus Snapshot ${LVname}_snap erstellen" >> ${TEMPLOGDATEI} 2>&1
# Snapshot in ein Image schreiben und einpacken
nice -n19 ionice -c3 /usr/bin/pigz -c < ${VGname}/${LVname}_snap > ${BACKUPPATH}images/${datum}-${LVname}.img.gz
sync
sleep 3
echo "=> ${SAVETIME} alte Images vom FTP-Server entfernen" >> ${TEMPLOGDATEI} 2>&1
# Loeschen aller Backups, die aelter als ${SAVETIME} sind
$CMD_DUPLICITY remove-older-than ${SAVETIME} --force scp://${FTP_USER}@${FTP_SERVER}/vm-images/${LVname} >> ${TEMPLOGDATEI} 2>&1
echo "=> aktuelles Image ${datum}-${LVname}.img.gz auf FTP-Server ablegen" >> ${TEMPLOGDATEI} 2>&1
# Woechentliches Full-Backup und taegliches inkrementielles Backup
$CMD_DUPLICITY full --volsize 500 ${BACKUPPATH}images/${datum}-${LVname}.img.gz scp://${FTP_USER}@${FTP_SERVER}/vm-images/${LVname} >> ${TEMPLOGDATEI} 2>&1
echo "=> Snapshot ${LVname}_snap entfernen" >> ${TEMPLOGDATEI} 2>&1
# Snapshot nach Imageerstellung wieder loeschen um Speicherplatz freizugeben
/sbin/lvremove -f ${VGname}/${LVname}_snap > /dev/null
sync
sleep 1
if [ $? -ne 0 ]; then
# echo "ERROR eMail an $KONTAKTADRESSE geschickt" >> ${TEMPLOGDATEI} 2>&1
# $CMD_CAT ${TEMPLOGDATEI} | mail -s "Backup-Fehler auf `hostname`!" $KONTAKTADRESSE
PROBLEMVM=`echo ${PROBLEMVM} ${LVname}`
PROBLEM="true"
fi
echo "-------------------------------------------------" >> ${TEMPLOGDATEI} 2>&1
fi
done
echo "du -sm . " | lftp -u ${FTP_USER},${FTP_PASSWORD} ${FTP_SERVER}
# echo "df" | sftp ${FTP_USER}@${FTP_SERVER}
# echo "df -h" | sftp ${FTP_USER}@${FTP_SERVER}
# echo "df -hi" | sftp ${FTP_USER}@${FTP_SERVER}
unset PASSPHRASE
unset FTP_PASSWORD
echo "----- HDD-Use für einzelne Sicherung ------------" >> ${TEMPLOGDATEI} 2>&1
HDDuseINT=`du -s -B G /srv/storage/backup/images/ | awk -v FS=" " '{print $1}' | sed "s:G::g" | sed "s:M::" | sed "s:B::"`
HDDuseREAL=`du -sh /srv/storage/backup/images/ | awk -v FS=" " '{print $1}' | sed "s:G::g" | sed "s:M::" | sed "s:B::"`
if [ $HDDuseINT -ge 48 ]; then
echo " WARNUNG --- BACKUPS WERDEN ZU GROSS ($HDDuseREAL)" >> ${TEMPLOGDATEI} 2>&1
# PROBLEM="true"
echo " WARNUNG --- BACKUPS WERDEN ZU GROSS ($HDDuseREAL)" | mail -s "Backups werden zu gross auf `hostname`!" $KONTAKTADRESSE
else
echo " $HDDuseREAL" >> ${TEMPLOGDATEI} 2>&1
fi
echo "----- DONE --------------------------------------" >> ${TEMPLOGDATEI} 2>&1
if [ $PROBLEM != "true" ]; then
echo "SUCCESS eMail an $KONTAKTADRESSE geschickt" >> ${TEMPLOGDATEI} 2>&1
$CMD_CAT ${TEMPLOGDATEI} | mail -s "Backup Erfolgreich auf `hostname`!" $KONTAKTADRESSE
else
echo "ERROR eMail an $KONTAKTADRESSE geschickt" >> ${TEMPLOGDATEI} 2>&1
$CMD_CAT ${TEMPLOGDATEI} | mail -s "Backup-Fehler auf `hostname`!" $KONTAKTADRESSE
fi
$CMD_CAT ${TEMPLOGDATEI} >> ${LOGDATEI}
IFS=$OIFS
else
echo "duplicity was not found. please correct path-"
echo "duplicity was not found. please correct path-" | mail -s "duplicity not found @ `hostname`!" $KONTAKTADRESSE
fi
| true
|
9786f8038ce835fe687abfb7b90160accaf40bbe
|
Shell
|
mobomo/MobomoU
|
/bin/composer
|
UTF-8
| 437
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
readonly PROGDIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd)"
readonly PROJECTDIR="$(dirname "$PROGDIR")"
readonly ARGS="$@"
docker run \
--rm \
-t \
-v $(pwd)/composer.json:/app/composer.json \
-v $(pwd)/composer.lock:/app/composer.lock \
-v $(pwd)/web:/app/web \
-v $(pwd)/vendor:/app/vendor \
-v $(pwd)/scripts:/app/scripts \
-v composer-cache:/tmp/cache \
mobomo/composer $ARGS
| true
|
bdf1ca5fbe6350b00687dec9b302df899cc84228
|
Shell
|
trueman1990/VistaModels
|
/ARM/linux_qt_a9/sw/script/build_sdcard.sh
|
UTF-8
| 614
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SW_ROOT="$(dirname "$SCRIPT")"
source $SW_ROOT/setup.sh
cd $SW_ROOT/sdcard
export SYSROOT=$SW_ROOT/sdcard/sysroot
mkdir -p $SYSROOT/lib/modules/$VER_LINUX
cp `find $SW_ROOT/kernel_modules -name "*.ko" -print` $SYSROOT/lib/modules/$VER_LINUX
sed -i 's/VER_LINUX/'"$VER_LINUX"'/' $SYSROOT/etc/init.d/rcS
#sed -i 's/.*LCD_CONSOLE.*/tty1\:\:once\:\/root\/watch \-platform linuxfb/' $SYSROOT/etc/inittab
rm -rf sysroot.ext2
echo "Generating sysroot.ext2..."
genext2fs -b 500000 -d $SYSROOT $SW_ROOT/sdcard/sysroot.ext2
echo "Completed..."
| true
|
05aaa35fbba9bf8e9f0d5b384d16a490342e015d
|
Shell
|
everpeace/docker-kafka
|
/start-kafka.sh
|
UTF-8
| 2,578
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Optional ENV variables:
# * ADVERTISED_LISTENERS: the listner address to be advertized to clients for the container, e.g. "PLAINTEXT://`docker-machine ip \`docker-machine active\``:9092"
# * ZK_CONNECT: the zookeeper.connect value, e.g "zk1:2181,zk2:2181,zk3:2181/kafka"
# * LOG_RETENTION_HOURS: the minimum age of a log file in hours to be eligible for deletion (default is 168, for 1 week)
# * LOG_RETENTION_BYTES: configure the size at which segments are pruned from the log, (default is 1073741824, for 1GB)
# * NUM_PARTITIONS: configure the default number of log partitions per topic
# * BROKER_ID: broker id for the container, if this was unset, unique broker id will be assigned by using zookeeper.
# Configure advertised host/port
if [ ! -z "$ADVERTISED_LISTENERS" ]; then
echo "advertised listeners: $ADVERTISED_LISTENERS"
export ESCAPED_ADVERTISED_LISTENERS=$(echo $ADVERTISED_LISTENERS | sed 's/\//\\\//g');
sed -r -i "s/#(advertised.listeners)=(.*)/\1=$ESCAPED_ADVERTISED_LISTENERS/g" $KAFKA_HOME/config/server.properties
fi
if [ ! -z "$ZK_CONNECT" ]; then
echo "zookeeper.connect: $ZK_CONNECT"
sed -r -i "s/zookeeper.connect=localhost:2181/zookeeper.connect=$ZK_CONNECT/g" $KAFKA_HOME/config/server.properties
fi
# Allow specification of log retention policies
if [ ! -z "$LOG_RETENTION_HOURS" ]; then
echo "log retention hours: $LOG_RETENTION_HOURS"
sed -r -i "s/(log.retention.hours)=(.*)/\1=$LOG_RETENTION_HOURS/g" $KAFKA_HOME/config/server.properties
fi
if [ ! -z "$LOG_RETENTION_BYTES" ]; then
echo "log retention bytes: $LOG_RETENTION_BYTES"
sed -r -i "s/#(log.retention.bytes)=(.*)/\1=$LOG_RETENTION_BYTES/g" $KAFKA_HOME/config/server.properties
fi
# Configure the default number of log partitions per topic
if [ ! -z "$NUM_PARTITIONS" ]; then
echo "default number of partition: $NUM_PARTITIONS"
sed -r -i "s/(num.partitions)=(.*)/\1=$NUM_PARTITIONS/g" $KAFKA_HOME/config/server.properties
fi
# Enable/disable auto creation of topics
if [ ! -z "$AUTO_CREATE_TOPICS" ]; then
echo "auto.create.topics.enable: $AUTO_CREATE_TOPICS"
echo "auto.create.topics.enable=$AUTO_CREATE_TOPICS" >> $KAFKA_HOME/config/server.properties
fi
if [ ! -z "$BROKER_ID" ]; then
echo "broker.id: $BROKER_ID"
sed -r -i "s/broker.id=(.*)/broker.id=$BROKER_ID/g" $KAFKA_HOME/config/server.properties
else
sed -r -i "s/(broker.id=.*)/#\1/g" $KAFKA_HOME/config/server.properties
fi
# Run Kafka
cat $KAFKA_HOME/config/server.properties
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
| true
|
6ead8cdb1019e81dad86c2402b101a1228cf77c6
|
Shell
|
RobertAudi/.dotfiles
|
/zsh/.config/zsh/plugins/spectrum/bin/spectrum.pacman
|
UTF-8
| 4,911
| 2.71875
| 3
|
[
"WTFPL"
] |
permissive
|
#!/usr/bin/env zsh
local esc="$(echo -en '\e')"
local blackf="${esc}[30m"; local redf="${esc}[31m"; local greenf="${esc}[32m"
local yellowf="${esc}[33m"; local bluef="${esc}[34m"; local purplef="${esc}[35m"
local cyanf="${esc}[36m"; local whitef="${esc}[37m"
local blackb="${esc}[40m"; local redb="${esc}[41m"; local greenb="${esc}[42m"
local yellowb="${esc}[43m" local blueb="${esc}[44m"; local purpleb="${esc}[45m"
local cyanb="${esc}[46m"; local whiteb="${esc}[47m"
local boldon="${esc}[1m"; local boldoff="${esc}[22m"
local italicson="${esc}[3m"; local italicsoff="${esc}[23m"
local ulon="${esc}[4m"; local uloff="${esc}[24m"
local invon="${esc}[7m"; local invoff="${esc}[27m"
local reset="${esc}[0m"
cat << EOF
${yellowf} ▄███████▄${reset} ${redf} ▄██████▄${reset} ${greenf} ▄██████▄${reset} ${bluef} ▄██████▄${reset} ${purplef} ▄██████▄${reset} ${cyanf} ▄██████▄${reset}
${yellowf}▄█████████▀▀${reset} ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄${reset} ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄${reset} ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄${reset} ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄${reset} ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}
${yellowf}███████▀${reset} ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███${reset} ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███${reset} ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███${reset} ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███${reset} ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}
${yellowf}███████▄${reset} ${redf}████████████${reset} ${greenf}████████████${reset} ${bluef}████████████${reset} ${purplef}████████████${reset} ${cyanf}████████████${reset}
${yellowf}▀█████████▄▄${reset} ${redf}██▀██▀▀██▀██${reset} ${greenf}██▀██▀▀██▀██${reset} ${bluef}██▀██▀▀██▀██${reset} ${purplef}██▀██▀▀██▀██${reset} ${cyanf}██▀██▀▀██▀██${reset}
${yellowf} ▀███████▀${reset} ${redf}▀ ▀ ▀ ▀${reset} ${greenf}▀ ▀ ▀ ▀${reset} ${bluef}▀ ▀ ▀ ▀${reset} ${purplef}▀ ▀ ▀ ▀${reset} ${cyanf}▀ ▀ ▀ ▀${reset}
${boldon}${yellowf} ▄███████▄ ${redf} ▄██████▄ ${greenf} ▄██████▄ ${bluef} ▄██████▄ ${purplef} ▄██████▄ ${cyanf} ▄██████▄${reset}
${boldon}${yellowf}▄█████████▀▀ ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄ ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄ ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄ ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄ ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}
${boldon}${yellowf}███████▀ ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███ ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███ ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███ ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███ ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}
${boldon}${yellowf}███████▄ ${redf}████████████ ${greenf}████████████ ${bluef}████████████ ${purplef}████████████ ${cyanf}████████████${reset}
${boldon}${yellowf}▀█████████▄▄ ${redf}██▀██▀▀██▀██ ${greenf}██▀██▀▀██▀██ ${bluef}██▀██▀▀██▀██ ${purplef}██▀██▀▀██▀██ ${cyanf}██▀██▀▀██▀██${reset}
${boldon}${yellowf} ▀███████▀ ${redf}▀ ▀ ▀ ▀ ${greenf}▀ ▀ ▀ ▀ ${bluef}▀ ▀ ▀ ▀ ${purplef}▀ ▀ ▀ ▀ ${cyanf}▀ ▀ ▀ ▀${reset}
EOF
| true
|
58208cdf9249465c0186e2b31c8bd2276a8b9795
|
Shell
|
nuagenetworks/nuage-metroae-config
|
/metroae-config
|
UTF-8
| 2,329
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
cd $(dirname $0)
TEMPLATE_TAR_LOCATION="https://metroae-config-templates.s3.amazonaws.com/metroae_config.tar"
VSD_SPECIFICATIONS_LOCATION="https://vsd-api-specifications.s3.us-east-2.amazonaws.com/specifications.tar"
SPEC_FOLDER=./vsd-api-specifications
TEMPLATE_FOLDER=./levistate-templates
TAG=$(git rev-parse --short HEAD)
TI=""
[ -t 0 ] && TI="-ti"
function download_container_templates {
if ! command -v curl &> /dev/null; then
echo "The command 'curl' was not found in PATH"
echo "This is a small utility for downloading files from the Internet"
echo "It is needed for downloading templates from Amazon S3"
echo "Please install this utility with:"
echo ""
echo "sudo yum install curl"
echo ""
exit 1
fi
{
curl -O $TEMPLATE_TAR_LOCATION &> /dev/null &&
curl -O $VSD_SPECIFICATIONS_LOCATION &> /dev/null
} || {
echo "Warning: Internet connection not detected from the container."
echo "MetroAE Config Templates and VSD Specification files were not downloaded."
echo "Please exit the container first and download the tar files from the provided URLs below."
echo "Download the MetroAE Config Template files from the following URL:"
echo "$TEMPLATE_TAR_LOCATION"
echo ""
echo "Download the VSD Specification files from the following URL:"
echo "$VSD_SPECIFICATIONS_LOCATION"
echo "Upon successful download of the two tar files"
echo "untar the files to the current directory"
echo ""
exit 0
}
template_tar_file=$(basename $TEMPLATE_TAR_LOCATION)
specs_tar_file=$(basename $VSD_SPECIFICATIONS_LOCATION)
mkdir -p $TEMPLATE_FOLDER
mkdir -p $SPEC_FOLDER
tar -xf $specs_tar_file -C $SPEC_FOLDER
tar -xf $template_tar_file -C $TEMPLATE_FOLDER
rm -f $template_tar_file
rm -f $specs_tar_file
}
. ./metroae-config.in
# Run the command inside docker
args=( "$@" )
[[ ! -d "$SPEC_FOLDER" || ! -d "$TEMPLATE_FOLDER" ]] && download_container_templates
docker run --network=host --rm $TI -v "`pwd`:/metroaeconfig" metroaeconfig:$TAG python3 metroae_config.py "${args[@]}" -tp "$TEMPLATE_FOLDER/templates" -sp "$SPEC_FOLDER"
| true
|
68ba44ca7b0e22d0384c8173accc370d371209f9
|
Shell
|
AviSoomirtee/sysAdmin
|
/trash/backup_script_auto/auto_inventaire.sh
|
UTF-8
| 8,937
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
#############################################################################################
###### This is an automated script made for correcting errors for Unix Inventory. #####
###### The list of servers DNS and name should be placed in host.txt #####
###### Logs will be created with output of errors if failed or with success. #####
###### For any modification to this script please consult the author first. #####
###### Author: Diresh Soomirtee. #####
#############################################################################################
CONF="/usr/local/linkbynet/script/inventaire/etc/ocsinventory-agent.cfg"
FILE="/var/lib/lbn-ocsinventory-agent/https:__gate.linkbynet.com_ec99b813fa064f7f7cfa1d35bc7cc3d743c61fd1_/cacert.pem"
#chk_file="[[ -f "'/var/lib/lbn-ocsinventory-agent/https:__gate.linkbynet.com_ec99b813fa064f7f7cfa1d35bc7cc3d743c61fd1_/cacert.pem'" ]] ; echo $?"
STATE='0'
PROX_STATE='0'
SSL_STATE='0'
TRY='0'
EXIT='0'
#HOST='uxfrlb-bckapp1p.sodexo.lbn.eq3.std.linkbynet.com'
#HOST='lvmh-mhis-dynacollec01.lvmh.lbn.ie1.std.linkbynet.com'
#HOST="fdf-pp-bdd-02.fdf.lbn.ie1.std.linkbynet.com"
#HOST='fdf-bdd-02.fdf.lbn.ie1.std.linkbynet.com'
#HOST='lvmh-mhisdocker-pp-dock01.lvmh.lbn.ie1.std.linkbynet.com'
#HOST='uxfrlb-zabmon4p.sodexo.lbn.eq3.std.linkbynet.com'
#HOST='lvmh-chaumet-linbdd01.lvmh.lbn.ie1.std.linkbynet.com'
#HOST='zzxlbndc2ux.ppr.th2.par.linkbynet.com'
#HOST='edf-partage-web-01.edf.lbn.ie1.std.linkbynet.com'
#HOST='idg-jac-bdd-p-02.idgroup.lbn.ix1.aub.linkbynet.com'
#HOST='idg-jac-solr-p-01.idgroup.lbn.ix1.aub.linkbynet.com'
#HOST='pointpeco-bo-rct-1.outiz.lbn.ix1.aub.linkbynet.com'
#HOST='pcis-lvmhbdd1d.pcis.lbn.ie2.std.linkbynet.com'
#HOST='casyope-consult-p-01.casyope.lbn.ie2.std.linkbynet.com'
#HOST='pano-web1.panoranet.lbn.ie1.std.linkbynet.com'
#HOST='pano-web2.panoranet.lbn.ie1.std.linkbynet.com'
HOST='eram-prod-bdd01-adm.eram.lbn.ie2.std.linkbynet.com'
BASH='execute.bash'
CMD='all_cmds.bash'
CRT='add_crt.bash'
SSL_B='b_ssl.bash'
ERR_COM="Cannot establish communication : 500 Can't connect to gate.linkbynet.com:443 (connect: Connection timed out)"
ERR_COM2="Cannot establish communication : 500 Can't connect to gate.linkbynet.com:443"
ERR_COM3="Cannot establish communication : 500 Can't connect to gate.linkbynet.com:443 (connect: Connexion terminée par expiration du délai d'attente)"
ERR_SSL4="Cannot establish communication : 500 Can't connect to gate.linkbynet.com:443"
ERR_SSL="Cannot establish communication : 500 SSL_ca_file /var/lib/lbn-ocsinventory-agent/https:__gate.linkbynet.com_ec99b813fa064f7f7cfa1d35bc7cc3d743c61fd1_/cacert.pem does not exist"
ERR_SSL2="Cannot establish communication : 500 SSL_ca_file /var/lib/lbn-ocsinventory-agent/https:__gate.linkbynet.com_ec99b813fa064f7f7cfa1d35bc7cc3d743c61fd1_/cacert.pem can't be used: No such file or directory"
ERR_SSL3="Cannot establish communication : 500 SSL negotiation failed:"
function check_cacert {
read ext_file <<< $(ssh $HOST [[ -f "'/var/lib/lbn-ocsinventory-agent/https:__gate.linkbynet.com_ec99b813fa064f7f7cfa1d35bc7cc3d743c61fd1_/cacert.pem'" ]] ; echo $?)
echo "Does file exists"
echo $ext_file
if [ "$ext_file" = '1' ]
then
echo " "
echo "Cacert.pem not found."
echo " "
echo "Need to copy certificate."
return 1
#STATE='2'
else
echo "Certificate Cacert.pem already exist."
return 0
fi
}
while [ ! $EXIT = '1' ]
do
echo "****************************** Script starting *******************************"
read PROXY <<< $(sed -n 1p $CMD | ssh -t $HOST)
read ERRORR <<< $(sed -n 2p $CMD | ssh -t $HOST)
read ERRLOG <<< $(sed -n 3p $CMD | ssh -t $HOST)
echo "***************************** The log found **********************************"
echo " "
echo $ERRLOG
echo " "
echo "******************************************************************************"
echo " "
echo " "
echo "************************* This is the ERROR in log ***************************"
echo " "
if [ -z "$ERRORR" ]
then
echo "No errors found"
else
echo "******************* The error found **************************"
echo $ERRORR
fi
echo " "
###################################################################################### Checking for the time Inventaire is running
echo "***************************** Checking cron time *****************************"
read MIN <<< $(sed -n 4p $CMD | ssh -t $HOST)
sleep 2
read HOUR <<< $(sed -n 5p $CMD | ssh -t $HOST)
sleep 2
echo " "
echo " "
echo "*************************** Inventaire running at $HOUR:$MIN ******************"
#MIN="23"
#HOUR="4"
####################################################################################### Setting the cron 2 min before inventaire is run
if [ "$MIN" = "23" ]
then
echo " "
echo "Cron for adding proxy will run at 4 21"
MIN=21
fi
echo " "
#echo "$HOUR"
#echo "$MIN"
echo "*************************** Cron will be run at $HOUR:$MIN ******************"
#echo " "
#echo " "
echo "*************************** Analyzing the error *********************************"
echo " "
echo "$ERRORR"
echo " "
echo "*********************************************************************************"
echo " "
############################################################## Checkiing if proxy is available
if [ -z "$PROXY" ]
then
echo " "
echo "Proxy not available"
else
echo " "
echo "Proxy available"
echo $PROXY
PROX_STATE="1"
fi
#check_cacert
############################################################### Checking for communication errors
if [ "$ERRORR" = "$ERR_COM3" ]
then
echo " "
echo "Maybe need proxy"
if [ "$PROX_STATE" = "1" ]
then
STATE='1'
fi
elif [ "$ERRORR" = "$ERR_COM2" ]
then
if [ check_cacert $1 ]
then
echo "Need ccacert.pem"
STATE='2'
else
echo "Maybe need proxy"
if [ "$PROX_STATE" = "1" ]
then
STATE='1'
fi
fi
################################################################# Checking for cacert.pem
elif [ "$ERRORR" = "$ERR_SSL" ] || [ "$ERRORR" = "$ERR_SSL2" ]
then
echo " "
echo "Checking if cacert.pem is present"
if [ check_cacert $1 ]
then
echo "Need certificate"
STATE='2'
else echo "Certificate already exist."
fi
elif [ "$ERRORR" = "$ERR_SSL3" ] || [ "$ERRORR" = "$ERR_SSL4" ]
then
echo " "
echo "Checking if cacert.pem is present"
if [ check_cacert $1 ]
then
echo "Need certificate"
STATE='2'
elif [ $TRY = '1' ]
then
STATE='3'
else echo "Certificate already exist."
fi
else
echo "No errors were found."
fi
############################################################################## cases for a particular task to be executed for correcting the errors
case $STATE in
1)
echo " "
echo "Going to add proxy"
echo "sudo -i" > $BASH
echo "cd /etc/cron.d/" >> $BASH
echo "touch inventaire_proxy" >> $BASH
TEMP_PROXY="$MIN $HOUR * * * root echo $PROXY"
TEMP2_PROXY="$TEMP_PROXY >> /usr/local/linkbynet/script/inventaire/etc/ocsinventory-agent.cfg"
echo "echo '$TEMP2_PROXY' >> inventaire_proxy " >> $BASH
echo "cd " >> $BASH
echo "echo '$PROXY' >> /usr/local/linkbynet/script/inventaire/etc/ocsinventory-agent.cfg" >> $BASH
echo "cd /usr/local/linkbynet/script/inventaire/" >> $BASH
echo "sh inventaire.sh --no-timer" >> $BASH
echo "exit" >> $BASH
echo "exit" >> $BASH
cat $BASH | ssh -t -t $HOST
echo "Proxy added successfull" >> $HOST.txt
echo "----" >> $HOST.txt
echo "This is the proxy added: $PROXY" >>$HOST.txt
;;
2)
echo "Adding certificate"
echo "Adding certificate" >> $HOST.txt
#cat $CRT | ssh -t -t $HOST
echo "Certificate successfully added."
echo "Certificate successfully added." >> $HOST.txt
;;
3)
echo "Need to bypass ssl"
echo "Bypassing ssl negoticiation " >> $HOST.txt
#cat $SSL_B | ssh -t -t $HOST
echo "Done"
;;
0)
echo "Nothing to do"
;;
esac
echo "************************* Checking for log *********************************************"
read RESULT <<< $(sed -n 3p $CMD | ssh -t $HOST)
echo "************************ Here is the log **********************************************"
echo " "
echo $RESULT
echo " "
echo "****************************************************************************************"
read ERRORR <<< $(sed -n 2p $CMD | ssh -t $HOST)
sleep 1
if [ -z "$ERRORR" ]
then
echo "************************************************************************************"
echo " "
echo "Completed successfully."
STATE='0'
EXIT='1'
echo "Completed successfully." >> $HOST.txt
else
echo " "
TRY='1'
echo "hmmmm... something went wrong."
echo "hmmmm... something went wrong." >> $HOST.txt
fi
echo " "
echo " "
echo "************************************ Script Ended! **************************************"
done
| true
|
af508e2aa44f84e4c39e447803da2dba199d37f2
|
Shell
|
HumanNeuroscienceLab/facemem
|
/10_preproc/02_timing_global.bash
|
UTF-8
| 2,005
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Convert allruns local timing (per run) to global
# useful for the concatenated files
###
# Basic Paths
###
basedir="/mnt/nfs/psych/faceMemoryMRI"
timingdir="${basedir}/scripts/timing"
datadir="${basedir}/data/nifti"
subjects=$( cd ${datadir}; ls -d * )
runtypes=( Questions NoQuestions )
short_runtypes=( withQ noQ )
###
# Functions
###
function run_lengths {
local funcfiles=( $@ )
# Get run lengths
local runlengths=()
for (( j = 0; j < ${#funcfiles[@]}; j++ )); do
runlengths+=(`fslnvols ${funcfiles[j]}`)
done
echo ${runlengths[@]}
}
function local_to_global {
local infile="$1"
local runlengths="$2"
local outfile="$3"
# run number for each trial
#awk '{ s=0; for (i=1; i<=NF; i++) print NR }' ${infile} > ${outfile}_tmp_runs.1D
# onsets for each trial
#timing_tool.py -tr 1 -timing ${infile} -run_len ${runlengths} -local_to_global ${outfile}_tmp_times.1D
timing_tool.py -tr 1 -timing ${infile} -run_len ${runlengths} -local_to_global ${outfile}
## combine
#1dcat ${outfile}_tmp_runs.1D ${outfile}_tmp_times.1D > ${outfile}
#rm ${outfile}_tmp_runs.1D ${outfile}_tmp_times.1D
}
function run_subject {
local subject="$1"
local runtype="$2"
local short_runtype="$3"
# Run Lengths
echo "...runlengths"
runlengths=$( run_lengths ${datadir}/${subject}/${subject}_FaceMemory01_${short_runtype}_run*.nii.gz )
# Local to Global
echo "...local2global"
local_to_global "${timingdir}/allruns_faceMemory01_${subject}_${runtype}_bio" "${runlengths}" "${timingdir}/global_allruns_faceMemory01_${subject}_${runtype}_bio"
local_to_global "${timingdir}/allruns_faceMemory01_${subject}_${runtype}_phys" "${runlengths}" "${timingdir}/global_allruns_faceMemory01_${subject}_${runtype}_phys"
}
###
# Run
###
for subject in ${subjects}; do
echo
for (( i = 0; i < ${#runtypes[@]}; i++ )); do
echo "${subject} - ${runtypes[i]}"
run_subject ${subject} ${runtypes[i]} ${short_runtypes[i]}
done
done
| true
|
3a97e0850bce685a61abefeee5eeaa66276766b7
|
Shell
|
rrfuentes/histo-recom
|
/computeRECOM.sh
|
UTF-8
| 12,155
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#-----------------------------Output files-----------------------------
#SBATCH --output=output_%A.%a.txt
#SBATCH --error=error_%A.%a.txt
#-----------------------------Required resources-----------------------
#SBATCH --time=0-200:00:00
#SBATCH --array=1-12
#SBATCH --cpus-per-task=2 #10
#SBATCH --mem=5000 #50000
#----------------------------------------------------------------------
chromfile=S_lycopersicum_chromosomes.4.00.chrom
chrom=$(sed -n "$SLURM_ARRAY_TASK_ID"p "$chromfile")
inputdir=Pimp_Genomes/Merged/$chrom
outdir=Pimp_Genomes
sites=$inputdir/${chrom}_merged.ldhat.sites
locs=$inputdir/${chrom}_merged.ldhat.locs
window=5000
overlap=500
pos=$(awk 'NR==1{print $1; exit;}' $locs)
splitdir=$inputdir/Windows
LDout=$inputdir/LDhat
seqLDout=$inputdir/seqLDhot
rhoout=$outdir/LDhat
seqout=$outdir/seqLD
mkdir -p $splitdir
mkdir -p $LDout
mkdir -p $seqLDout
mkdir -p $rhoout
mkdir -p $seqout
numhap=$(awk 'NR==1{print $1; exit;}' $sites) #retrieve the number of haplotypes
#GENERATE lookup table separately
#if [ $SLURM_ARRAY_TASK_ID -eq 1 ]; then
# lkgen -lk /home/WUR/fuent011/Tools/LDhat/lk_files/lk_n192_t0.001 -nseq $numhap -prefix $outdir/lk_n${numhap}_t0.001
#fi
lkfile=Pimp_Genomes/Merged/lk_n${numhap}_t0.001new_lk.txt
lim=$(((pos/(window-overlap))-1)) #0-based file/window naming
remainder=$((pos%(window-overlap)))
if [ $remainder -ne 0 ];then
if [ $remainder -gt $overlap ];then (( lim++ )); fi
#if remainder is less than overlap, adjust the size of the last window
fi
if [ $pos -lt $window ];then #Number of SNPs is less than window size
window=$pos
overlap=0
lim=0
fi
echo -e "$chrom\t$((lim+1))"
skip=1
if [ $skip -eq 0 ]; then
for i in `seq 0 $lim`
do
strpos=$((i*window - overlap*$i + 1)) #1-based
endpos=$((strpos+window - 1))
cur_window=$window
if [ $remainder -ne 0 ] && [ $i -eq $lim ]; then
#Last window contains the remaining SNPs but spans the same size as other window#
if [ $remainder -gt $overlap ];then
strpos=$((pos-window+1)) #take the last (window)-SNPs from the end
endpos=$pos #The very last SNP position
else #otherwise, use same start but shorter window size
cur_window=$((window-overlap+remainder))
endpos=$((strpos+window-overlap+remainder-1))
fi
fi
echo -e "Window:$i;\t$strpos-$endpos Size:$cur_window"
#Prepare locs file per window
awk -v winidx=$i -v win=$cur_window -v sp=$strpos -v ep=$endpos 'BEGIN{cnt=0;}
{if(NR-1>=sp && NR-1<=ep){
row[++cnt]=$0;
if(NR-1==sp) x=$1; if(NR-1==ep) y=$1; #Get genomic position of the bounding SNPs
}
} END{
#Add header
if(winidx) printf("%d\t%.3f\tL\n",win,y-x+1); #Add 1 to enable LDhat to consider the last SNP per window
else printf("%d\t%.3f\tL\n",win,y+1);
#Extract blocks of SNP positions
for(i=1;i<=win;i++){
if(winidx) printf("%.4f\n",row[i]-x+0.001); #Second window to last window
else printf("%.4f\n",row[i]); #First window
}
}' $locs > $splitdir/${chrom}_${i}.locs
awk -v winidx=$i -v win=$cur_window -v sp=$strpos -v ep=$endpos 'NR==1{print $1"\t"win"\t1";}
NR>1{
if($0~/^>/){
if(length($0)>30){
print "ERROR: Sequence name is too long\n";
exit 1;
}
print $0; #Print sequence name
}else{
lines=int(win/2000); #only 2k characters per line allowed in LDhat
for(i=0;i<lines;i++){
print substr($0,sp+i*2000,2000);
if(i+1==lines && win%2000) #Print the remaining characters
print substr($0,sp+((i+1)*2000),win%2000);
}
}
}' $sites > $splitdir/${chrom}_${i}.sites
done
fi
#RUN LDhat using a job array
sbatch --array=0-$lim runLDhat.sh -i $splitdir/${chrom} -l $lkfile -x 20000000 -y 2000 -b 5 -p $LDout/${chrom}
wait
#RUN LDhat stat on each window
if [ $skip -eq 0 ]; then
cd $LDout
for i in `seq 0 $lim`
do
stat -input $LDout/${chrom}_${i}.rates.txt -loc $splitdir/${chrom}_${i}.locs -burn 2000
mv res.txt $LDout/${chrom}_${i}.res.txt
done
fi
#for i in `less /lustre/nobackup/WUR/BIOINF/fuent011/References/Tomato/S_lycopersicum_chromosomes.4.00.chrom`; do inputdir="/lustre/nobackup/WUR/BIOINF/fuent011/Project2.0/Pimp_Genomes/Merged"; /home/WUR/fuent011/Tools/LDhat/convert -seq $inputdir/$i/${i}_merged.ldhat.sites -loc $inputdir/$i/${i}_merged.ldhat.locs | awk -v chr=$i '$1~/Segregating/{n=split($0,a," "); if(n==4){numsnp=a[4];}} $1~/^Watterson/{split($0,a," "); printf("%s\t%d\t%f\t%.6f\n",chr,numsnp,a[4],(a[4]/numsnp)/1e-8);}' -; done
#Merge windows
if [ $skip -eq 0 ]; then
printf "" > $rhoout/${chrom}_rho
offset=0
for i in `seq 0 $lim`
do
strpos=$((i*window - overlap*$i + 1)) #1-based
endpos=$((strpos+window - 1))
cur_window=$window
if [ $remainder -ne 0 ] && [ $i -eq $lim ]; then
#Last window contains the remaining SNPs but spans the same size as other window#
if [ $remainder -gt $overlap ];then
strpos=$((pos-window+1)) #take the last (window)-SNPs from the end
endpos=$pos #The very last SNP position
else #otherwise, use same start but shorter window size
cur_window=$((window-overlap+remainder))
endpos=$((strpos+window-overlap+remainder-1))
fi
fi
if [ $i -ne 0 ]; then
offset=$(awk -v sp=$strpos '{if(NR-1==sp) print $1;}' $locs )
fi
echo -e "Offset:$offset; Window=$i; Startpos:$strpos"
awk -v os=$offset -v winidx=$i -v win=$cur_window -v overl=$overlap -v lm=$lim -v rem=$remainder 'NR>2{ #offset of 2 rows for res.txt
if(winidx==0){
#first window or single-window chromosome
if(NR<=win+2-(overl/2)) printf("%.3f\t%.5f\n",$1,$2);
}else if(winidx==lm){
#if(NR==win-rem+(overl/2)+2) print "Last window";
#only include the remainder positions; exclude additional overlap/2 because the first window has size window-(overlap/2)
if(rem>overl){ #last block of size equal to other windows
if(NR>win-rem+(overl/2)+2) printf("%.3f\t%.5f\n",$1+os-0.001,$2);
}else{ #last block with shorter size
if(NR>(overl/2)+2) printf("%.3f\t%.5f\n",$1+os-0.001,$2);
}
}else{ #if(winidx)
#exclude overlap/2 positions from both ends of the window
if(NR>(overl/2)+2 && NR<=win+2-(overl/2)) printf("%.3f\t%.5f\n",$1+os-0.001,$2);
}
}' $LDout/${chrom}_${i}.res.txt >> $rhoout/${chrom}_rho
done
fi
#printf "" > FINAL_results/Pimp/Pimp_ALLCHR_rho; for i in `ls Pimp_Genomes/LDhat/*_rho`; do awk '{tmp=FILENAME; gsub(/^.*\//,"",tmp); gsub("_rho","",tmp); printf("%s\t%d\t%.10f\n", tmp,$1*1000,$2);}' $i >> FINAL_results/Pimp/Pimp_ALLCHR_rho; done
#printf "" > FINAL_results/Pimp/Pimp_ALLCHR_rho.bed; for i in `ls Pimp_Genomes/LDhat/*_rho`; do awk 'NR==1{tmp=FILENAME; gsub(/^.*\//,"",tmp); gsub("_rho","",tmp); startp=$1*1000; rho=$2;} NR>1{endp=$1*1000; printf("%s\t%d\t%d\t%.10f\n",tmp,startp,endp,rho); startp=$1*1000; rho=$2;}' $i >> FINAL_results/Pimp/Pimp_ALLCHR_rho.bed; done
#printf "" > FINAL_results/Pimp_ALLCHR_rho_100kbwindow; for i in `ls Pimp_Genomes/LDhat/SL4.0ch*_rho`; do printf "" > tmp; awk 'NR==1{tmp=FILENAME; gsub(/^.*\//,"",tmp); gsub("_rho","",tmp); pos=$1; rho=$2;} NR>1{startp=pos*1000; endp=$1*1000; rate=($1-pos)*rho; intervallen=($1-pos)*1000; for(i=startp;i<endp;i++){printf("%s\t%d\t%d\t%.10f\n",tmp,i,i,rate/intervallen);} pos=$1; rho=$2;}' $i >> tmp; awk -v chr=$i 'BEGIN{gsub(/^.*\//,"",chr); gsub("_rho","",chr);} NR>1{if($1==chr) print $0;}' ../References/Tomato/S_lycopersicum_chromosomes.4.00.len | bedtools makewindows -g - -w 100000 -s 10000 > tmpwin; bedtools map -a tmpwin -b tmp -c 4 -o sum >> FINAL_results/Pimp_ALLCHR_rho_100kbwindow; done
#awk '{print $0"\t"$2;}' FINAL_results/Pimp/Pimp_SNPpositions | bedtools intersect -v -a - -b ../References/Tomato/ITAG4.0_RepeatModeler_repeats_light.gff | bedtools intersect -a FINAL_results/Window100kb_step50kb -b - -c > FINAL_results/Pimp/Pimp_SNPs_100kb
#RUN sequenceLDhot for using a job array
sbatch --array=0-$lim runseqLDhot.sh -c $chrom -i $splitdir/${chrom} -r $LDout/${chrom} -w 50000 -p $seqLDout/${chrom}
wait
if [ $skip -eq 1 ]; then
printf "" > $seqout/${chrom}_sum
printf "" > $seqout/${chrom}_bgres
for i in `seq 0 $lim`
do
strpos=$((i*window - overlap*$i + 1)) #1-based
endpos=$((strpos+window - 1))
if [ $remainder -ne 0 ] && [ $i -eq $lim ]; then
#Last window contains the remaining SNPs but spans the same size as other window#
if [ $remainder -gt $overlap ];then
strpos=$((pos-window+1)) #take the last (window)-SNPs from the end
endpos=$pos #The very last SNP position
else #otherwise, use same start but shorter window size
cur_window=$((window-overlap+remainder))
endpos=$((strpos+window-overlap+remainder-1))
fi
fi
if [ $i -ne 0 ]; then
offset=$(awk -v sp=$strpos '{if(NR-1==sp) print $1*1000;}' $locs )
if [ $i -ne $lim ]; then
if [ $remainder -gt $overlap ];then
pos1=$(awk -v sp=$((strpos+overlap/2)) '{if(NR-1==sp) print $1*1000;}' $locs )
pos2=$(awk -v ep=$((endpos-overlap/2)) '{if(NR-1==ep) print $1*1000;}' $locs )
else #otherwise, use same start but shorter window size
pos1=$(awk -v sp=$((strpos+overlap/2)) '{if(NR-1==sp) print $1*1000;}' $locs )
pos2=$(awk -v ep=$pos '{if(NR-1==ep) print $1*1000;}' $locs )
fi
else
pos1=$(awk -v sp=$((pos-remainder+1+overlap/2)) '{if(NR-1==sp) print $1*1000;}' $locs )
pos2=$(awk -v ep=$pos '{if(NR-1==ep) print $1*1000;}' $locs )
fi
else
offset=0
pos1=$(awk -v sp=$strpos '{if(NR-1==sp) print $1*1000;}' $locs )
pos2=$(awk -v ep=$((endpos-overlap/2)) '{if(NR-1==ep) print $1*1000;}' $locs )
fi
echo -e "$strpos; $endpos; $pos1; $pos2; $offset"
awk -F" " -v p1=$pos1 -v p2=$pos2 -v os=$offset '{if($1+os>=p1 && $2+os<=p2) print $1+os"\t"$2+os"\t"$3"\t"$4;}' $seqLDout/${chrom}_${i}_input.sum >> $seqout/${chrom}_sum
awk -F" " -v p1=$pos1 -v p2=$pos2 -v os=$offset '{if($3+os>=p1 && $2+os<=p2) printf("%s\t%d\t%d\t%.10f\n",$1,$2+os,$3+os,$5);}' $LDout/${chrom}_${i}.bgres >> $seqout/${chrom}_bgres
done
fi
exit
#Combine all chrom background recombination rates
cat Pimp_Genomes/seqLD/SL4.0ch*_bgres | sort -k1,1 -k2,2n > FINAL_results/Pimp/Pimp_ALLCHR_bgres
printf "" > FINAL_results/Pimp/Pimp_seqLD; for i in `ls Pimp_Genomes/seqLD/SL4.0ch*_sum`; do awk 'NR==1{LR=$3; RHO=$4; tmp=FILENAME; gsub(/.*\//,"",tmp); gsub("_sum","",tmp);} NR>1{if($3==""){print tmp"\t"$1"\t"$2"\t"LR"\t"RHO;}else{print tmp"\t"$0; LR=$3; RHO=$4;}}' $i >> FINAL_results/Pimp/Pimp_seqLD; done
LR_lim=$(awk '$4>0{print $4;}' FINAL_results/Pimp/Pimp_seqLD | sort -k1,1n | awk '{row[NR]=$0;}END{perc=NR*0.95; for(i=1;i<=NR;i++) if(i>perc){print row[i]; exit;}}' - )
awk -v lim=$LR_lim '$4>lim{print $0;}' FINAL_results/Pimp/Pimp_seqLD | sort -k1,1 -k2,2n | bedtools merge -i - -d 500 -c 4,5 -o max | bedtools map -a - -b FINAL_results/Pimp/Pimp_ALLCHR_rho.bed -c 4 -o max | bedtools map -a - -b FINAL_results/Pimp/Pimp_ALLCHR_bgres -c 4 -o mean | awk '$7>0 && $6/$7>10{print $0;}' - > FINAL_results/Pimp/Pimp_seqLD_hotspots
#Convert p/kb to pb; Sum p per map interval, then divide by interval width (Mb); compute p/Mb
printf "" > Compare2Map/SLVintage_vs_EXPIM2012; for i in `ls SLVintage_Genomes/LDhat/SL4.0ch*_rho`; do printf "" > tempfile; awk 'NR==1{tmp=FILENAME; gsub(/^.*\//,"",tmp); gsub("_rho","",tmp); startp=$1*1000; rho=$2;} NR>1{endp=$1*1000; intervallen=endp-startp; rate=(rho/1000); for(i=startp;i<endp;i++){printf("%s\t%d\t%d\t%.10f\n",tmp,i,i,rate);} startp=$1*1000; rho=$2;}' $i >> tempfile; awk -v chr=$i 'BEGIN{gsub(/^.*\//,"",chr); gsub("_rho","",chr);} $1==chr{if(NR==1){pos=$2;cM=$3;}else{if(cM==$3) next; printf("%s\t%d\t%d\t%.10f\n", $1,pos,$2,(($3-cM)/($2-pos))*1000000); pos=$2; cM=$3;}}' GeneticMap/EXPIM2012_physicalpos.txt > tempfile2; bedtools map -a tempfile2 -b tempfile -c 4 -o sum | awk '{printf("%s\t%d\t%d\t%.10f\n",$1,$2,$3,($4/($3-$2))*1000000); }' >> Compare2Map/SLVintage_vs_EXPIM2012; done
| true
|
2256fcd71fc43d5c377a15a7829068ce444d3516
|
Shell
|
route4me/route4me-curl
|
/Linux/Optimizations/Hybrid Data Optimization/reoptimization.sh
|
UTF-8
| 373
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
url=https://api.route4me.com/api.v4/optimization_problem.php
apikey=11111111111111111111111111111111
optprobid=C86B7F391E6A08187FEA0D623B48DA32
# The example refers to the process of reoptimization an existing optimization.
curl -o file1.txt -g -X PUT -k "$url?api_key=$apikey&optimization_problem_id=$optprobid&reoptimize=1"
echo "Finished..."
sleep 15
| true
|
23268360344399928b0f95885b4e003a509e6db8
|
Shell
|
PrachitiP/Shell-Script-Problems
|
/IfElseProblems/monthday.sh
|
UTF-8
| 301
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash -x
#take date and month as input
echo "Enter month and date:"
read x
x=`date -d"${x}" +%m%d`
y="Mar 20"
y=`date -d"${y}" +%m%d`
z="June 20"
z=`date -d"${z}" +%m%d`
#Check if date in between March20 and June 20
if [[ ($x -gt $y && $z -gt $x) ]]
then
echo "True"
else
echo "False"
fi
| true
|
b213c26973d5ac4a2c871b30f9930944a43ae9bb
|
Shell
|
sebt3/diaspora-pi
|
/docker/build.sh
|
UTF-8
| 1,774
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
sep() {
local c=$(( ( 80 - $(echo "%s" "$*"|wc -c) )/2 ));
local s=$(awk -v m=$c 'BEGIN{for(c=0;c<m;c++) printf "=";}');
echo "$s $* $s";
}
set -e
useradd -m diaspora
sep Install build dependencies
apt-get update
apt-get install -y git build-essential cmake libssl-dev libcurl4-openssl-dev libxml2-dev libxslt-dev libmagickwand-dev libpq-dev imagemagick ghostscript curl nodejs
sep Cloning diaspora sources
su - diaspora -c "git clone -b v$(sed 's/,.*//' /tmp/.tags) https://github.com/diaspora/diaspora.git"
rm -rf /home/diaspora/diaspora/.git
chown -R diaspora:diaspora /home/diaspora/diaspora /usr/local/lib/ruby/gems /usr/local/bin
echo "tar czf source.tar.gz diaspora"|su - diaspora
cd /home/diaspora/diaspora
mv ../source.tar.gz ./public/
cp config/diaspora.yml.example config/diaspora.yml
cp config/database.yml.example config/database.yml
sep Install ruby bundler
gem install bundler pg
echo "cd ~/diaspora; script/configure_bundler"|su - diaspora
N=0
sep Build and install ruby dependencies
set +e
while [ $N -lt 10 ];do
RAILS_ENV=production DB=postgres bundle install --without test development --with postgresql && break
((N++))
done
set -e
sep Prepare /target
mv /home/diaspora/diaspora/public /home/diaspora/diaspora/pub
mkdir -p /target/usr/local/lib/ruby/gems /target/home/diaspora/ /target/usr/local/bin /target/usr/local/bundle /target/bin /target/tmp
cp -Rapf /usr/local/bin/* /target/usr/local/bin
cp -Rapf /usr/local/bundle/* /target/usr/local/bundle
cp -Rapf /home/diaspora/diaspora /target/home/diaspora/
cp -Rapf /usr/local/lib/ruby/gems/* /target/usr/local/lib/ruby/gems
cp -apf /bin/entrypoint.sh /target/bin
chown -R root:root /target/usr/
chmod 755 /target/bin/entrypoint.sh
sed 's/,.*//' /tmp/.tags >/target/diaspora_version
| true
|
0b89d7de447824e546c8672312d8fd19f2a3c41d
|
Shell
|
flahertylab/rvd
|
/HCC1187_data_rvd2.sh
|
UTF-8
| 699
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo '----------------------'
echo 'Running RVD2 in the HCC1187 subject PAXIP1 gene dataset.'
echo 'Please provide the directory to the control and case hdf5 files.'
echo '----------------------'
echo 'Generating control and case hdf5 file from depth chart files:'
python rvd27.py gibbs $1 -o control_HCC1187 -p 10 -g 4000 -m 5 -s 199096
python rvd27.py gibbs $2 -o case_HCC1187 -p 10 -g 4000 -m 5
# echo '----------------------'
echo 'Performing hypothesis test (germline test and somatic test)'
echo '----------------------'
python rvd27.py germline_test control_HCC1187.hdf5
echo '----------------------'
python rvd27.py somatic_test control_HCC1187.hdf5 case_HCC1187.hdf5
echo 'Done.'
| true
|
a4f8ba4a873d13d1a783a2e44afd66378a93aa27
|
Shell
|
mengxin-mx-cpu/standalone-linux-io-tracer
|
/source/kernel/configure.d/1_vm_ops.conf
|
UTF-8
| 1,052
| 3.03125
| 3
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-protobuf"
] |
permissive
|
#!/bin/bash
#
# Copyright(c) 2012-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct vm_operations_struct vm; int (*fn1)(struct vm_area_struct*, struct vm_fault*) = NULL; vm.fault = fn1;" "linux/mm.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct vm_operations_struct vm; vm_fault_t (*fn1)(struct vm_fault*) = NULL; vm.fault = fn1;" "linux/mm.h"
then
echo $cur_name "2" >> $config_file_path
elif compile_module $cur_name "struct vm_operations_struct vm; int (*fn1)(struct vm_fault*) = NULL; vm.fault = fn1;" "linux/mm.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "IOTRACE_VM_FUNC_TYPE 1";;
"2")
add_define "IOTRACE_VM_FUNC_TYPE 2";;
*)
exit 1
esac
}
conf_run $@
| true
|
aa0e555552e44c92b6ef27773f0ecbdc45d7c943
|
Shell
|
changpk/AnimationTutorial
|
/MoveTurioal/usrConfig.sh
|
UTF-8
| 513
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
export LC_ALL=zh_CN.GB2312;export LANG=zh_CN.GB2312
###############以下是为编译配置工程根目录等相关参数
username="$USER"
desktopDir="/Users/${username}/Desktop/workspace"
echo "\n"
echo "$desktopDir"
# cd ${desktopDir}
# workrootDir="${desktopDir}/workbuild" #代码的根目录
# if [ -d "$workrootDir" ];then
# echo "$workrootDir文件目录存在"
# else
# echo "$workrootDir文件目录不存在,创建"
# mkdir -pv $workrootDir
# fi
# echo "\n"
# echo "$workrootDir"
| true
|
9bc529d1b06077c40976d3966bc65334ed9651f0
|
Shell
|
weilonge/configurations
|
/bin/file_send
|
UTF-8
| 100
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
RECV_IP=$1
RECV_PORT=$2
FILE_PATH=$3
tar cv "$FILE_PATH" | nc "$RECV_IP" "$RECV_PORT"
| true
|
2f1e8ffd7a562392a550422cc3edb7be0c6bf780
|
Shell
|
antocuni/home
|
/bin/antocuni_kbd
|
UTF-8
| 455
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
### This script needs to be placed in /lib/systemd/system-sleep/ ###
if [ $1 = post ] && [ $2 = suspend ]
then
date '+%F %T' > /tmp/resumed
chown antocuni:antocuni /tmp/resumed
DISPLAY=:0.0 ; export DISPLAY
HOME=/home/antocuni; export HOME
(sleep 2; /home/antocuni/bin/kbd) &
fi
if [ $1 = pre ]
then
echo "Disabling Wake On Lan"
sudo ethtool -s eth0 wol d
# to query the current status: sudo ethtool eth0
fi
| true
|
814b19b4631cd6310d16888a1a3e4c743eafc0c8
|
Shell
|
xdreamseeker/zabbix
|
/redhat/checkarchivelog.sh
|
UTF-8
| 504
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
sid=$1
#2016.6.22 solve ioctl for device problem
source /home/oracle/.bash_profile 2>/dev/null
export ORACLE_SID=$sid
sqlresult=`sqlplus -S /nolog <<EOF
connect / as sysdba;
set echo off feedback off heading off underline off;
set timing off;
select ceil(sum(ru.percent_space_used * case when db.log_mode <> 'ARCHIVELOG' then 0 else 1 end )) percent_space_used \
from sys.v_\\$recovery_area_usage ru \
inner join sys.v_\\$database db on 1=1;
exit;
EOF`
typeset -i sqlresult
echo $sqlresult
| true
|
ef38725f23e1adce991f9fc343145cd00e6961df
|
Shell
|
plomovtsev/RAIC2016-scala-cgdk
|
/compile-scala.sh
|
UTF-8
| 1,296
| 2.9375
| 3
|
[] |
no_license
|
if [ ! -f $SCALA_HOME/lib/scala-library.jar ]
then
echo Unable to find scala-library.jar in SCALA_HOME [$SCALA_HOME] > compilation.log
exit 1
fi
if [ ! -f src/main/java/Runner.java ]
then
echo Unable to find src/main/java/Runner.java > compilation.log
exit 1
fi
if [ ! -f src/main/scala/MyStrategy.scala ]
then
echo Unable to find src/main/scala/MyStrategy.scala > compilation.log
exit 1
fi
rm -rf classes
mkdir classes
$SCALA_HOME/bin/scalac -sourcepath "src/main/scala" -d classes src/main/scala/*.scala src/main/scala/model/*.scala src/main/java/*.java src/main/java/model/*.java >compilation.log 2>&1
$JAVA_HOME/bin/javac -sourcepath "src/main/java" -cp "classes" -d classes src/main/java/*.java >>compilation.log 2>&1
if [ ! -f classes/MyStrategy.class ]
then
echo Unable to find classes/MyStrategy.class >> compilation.log
exit 1
fi
if [ ! -f classes/Runner.class ]
then
echo Unable to find classes/Runner.class >> compilation.log
exit 1
fi
echo Manifest-Version: 1.0 >MANIFEST.MF
echo Main-Class: Runner >>MANIFEST.MF
echo Class-Path: scala-library.jar scala-reflect.jar >>MANIFEST.MF
jar -cfm "./scala-cgdk.jar" MANIFEST.MF -C "./classes" . >>compilation.log 2>&1
cp -n $SCALA_HOME/lib/scala-library.jar $SCALA_HOME/lib/scala-reflect.jar .
| true
|
298f2ec35c300c1f093c7bb7ae944d2cb552e990
|
Shell
|
vijay299/playSMS
|
/daemon/linux/bin/sendsmsd
|
UTF-8
| 338
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
. /etc/default/playsms
while true; do
for QUEUE in `$(which php) -q $PLAYSMS_BIN/sendsmsd.php $PLAYSMS_PATH _GETQUEUE_`; do
EXISTS=`ps ax | grep $QUEUE | grep sendsmsd.php`
if [ -z "$EXISTS" ]; then
$(which php) -q $PLAYSMS_BIN/sendsmsd.php $PLAYSMS_PATH _PROCESS_ $QUEUE >/dev/null 2>&1 &
fi
done
sleep 2;
done
| true
|
329013c3034f1e13984102e8f80468a5e57f4d39
|
Shell
|
IRTermite/osic-santaclara
|
/scripts/inv2cobbler_incsc.sh
|
UTF-8
| 3,324
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
INVENV=incsc
NETMASK=255.255.252.0
GATEWAY=172.22.0.1
DNS=8.8.8.8
#####################################################
# Need one argument which is the exported .csv file
#####################################################
if [ $# == 0 ]
then
echo "Usage: inv2cobbler_incsc.sh inventory_csv_file.csv"
exit 1
else
INVEXPORT=$1
if [ ! -e "${INVEXPORT}" ]
then
echo "${INVEXPORT} doesn't exist"
exit 1
fi
fi
###############################################################
# Create the cobbler commands from an exported spreadsheet list
###############################################################
# loop through export and pull out populate the ilo csv file
while read -r INV_LINE
do
echo $INV_LINE | grep "${INVENV}" > /dev/null
if [ $? == 0 ]
then
ILOIP=$(echo $INV_LINE | awk -F ',' '/incsc/{print $6}')
COBBLERIP=$(echo $INV_LINE | awk -F ',' '/incsc/{print $7}')
MAC=$(echo $INV_LINE | awk -F ',' '/incsc/{print $4}')
MODEL=$(echo $INV_LINE | awk -F ',' '/incsc/{print $5}')
NAME=$(echo $INV_LINE | awk -F ',' '/incsc/{print $12}')
case $NAME in
*infra*)
ROLE='controller'
;;
*compute*)
ROLE='compute'
;;
*swift*)
ROLE='swift'
;;
*ceph*)
ROLE='ceph'
;;
*cinder*)
ROLE='cinder'
;;
*logger*)
ROLE='logging'
;;
*)
ROLE='network'
;;
esac
# Use a single seed file and provide ksmeta data for the disk config
PROFILE='ubuntu-14.04.5-server-unattended-osic'
case "$ROLE" in
cinder)
DISK_CONFIG='cinder'
;;
swift)
DISK_CONFIG='swift'
;;
ceph)
DISK_CONFIG='ceph'
;;
*)
DISK_CONFIG='generic'
;;
esac
# Select pxe interface via model
case "$MODEL" in
LENOVO)
INTERFACE='p1p1'
;;
Dell)
INTERFACE='em1'
;;
*)
INTERFACE='unknown'
;;
esac
# Create the input file
INPUTLINE="${NAME},${MAC},${COBBLERIP},${NETMASK},${GATEWAY},${DNS},${INTERFACE},${PROFILE}"
echo $INPUTLINE | grep ',,' > /dev/null
if [ $? == 0 ]
then
echo "Skipping on missing field: $INPUTLINE" >&2
continue
fi
# Check for an existing cobbler profile
cobbler system report --name ${NAME} > /dev/null
if [ $? == 0 ]
then
echo "cobbler system remove --name ${NAME}"
fi
echo "cobbler system add --name=${NAME} --mac=${MAC} --profile=${PROFILE} --hostname=${NAME} --interface=${INTERFACE} --ip-address=${COBBLERIP} --subnet=${NETMASK} --gateway=${GATEWAY} --name-servers=${DNS} --kopts=\"interface=${INTERFACE} console=tty0 console=ttyS0,115200n8\" --ksmeta=\"disk_config=${DISK_CONFIG} model=${MODEL}\""
fi
done < $INVEXPORT
| true
|
8f708198ed239a00c5935c2138ec9daa3f70604f
|
Shell
|
mlorenzo-stratio/scriptcellar
|
/monitoring/nagios-plugins/oracle/check_oracle_session_front.old
|
UTF-8
| 698
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
vcountsession=0
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
WARNING=$1
CRITICAL=$2
vcountsession=$(sudo -u oracle /usr/local/nagios/libexec/check_session_front | sed -e "s/[^0-9]*\([0-9]*\)[^0-9]*/\1/")
if [ $vcountsession -ge $CRITICAL ]; then
echo "SESSIONS CRITICAL: $vcountsession sesiones abiertas | sessions=$vcountsession;$WARNING;$CRITICAL"
exit $STATE_CRITICAL
else
if [ $vcountsession -ge $WARNING ]; then
echo "SESSIONS WARNING: $vcountsession sesiones abiertas | sessions=$vcountsession;$WARNING;$CRITICAL"
exit $STATE_WARNING
else
echo "SESSIONS OK: $vcountsession sesiones abiertas | sessions=$vcountsession;$WARNING;$CRITICAL"
exit $STATE_OK
fi
fi
| true
|
3aa960b8b6f565704edd6fbb8368a485061bfb91
|
Shell
|
monmonpig/dotfiles
|
/setup
|
UTF-8
| 661
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
DIFF=`which diff`
CWD=`pwd`
file_ls=`ls -d _*`
echo "Change to $CWD";
function link {
srcfile="${CWD}/${1}"
destfile="$HOME/${1/_/.}"
echo "${destfile}"
if [ -f $destfile ]; then
if [ -n "$($DIFF $srcfile $destfile)" ]; then
backup $destfile
fi
fi
ln -s $srcfile $destfile
}
function backup {
if [ -r $1 ]; then
backupfile="$1.bak"
if [ -r $backupfile ]; then
rm $backupfile
fi
mv $1 $backupfile
echo "Backup $1 to $backupfile"
else
echo "File $1 doesn't exist."
fi
}
for file in $file_ls; do
link $file
done
| true
|
16b26f479dabc0ee1fcc096f60013ba90a07bd4e
|
Shell
|
metalter/shell
|
/2.sh
|
UTF-8
| 304
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#author:metalter
read -p "请输入用户名" user
if [ -z $user ];then
echo "未输入用户名"
exit
fi
#stty -echo 关闭回显 / stty echo 打开回显
stty -echo
read -p "请输入密码" pass
stty echo
pass=${pass:-123456}
useradd "$user"
echo "$pass" | passwd --stdin "$user"
| true
|
8eebf7539bc121b997e4ef983013689a1ccdc5a6
|
Shell
|
jasonisgett/MacAdmin
|
/Software/Matlab/license_Matlab.sh
|
UTF-8
| 2,237
| 4.03125
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
###################################################################################################
# Script Name: license_Matlab.sh
# By: Zack Thompson / Created: 1/10/2018
# Version: 1.2.0 / Updated: 4/19/2021 / By: ZT
#
# Description: This script applies the license for Matlab.
#
###################################################################################################
echo "***** License Matlab process: START *****"
##################################################
# Define Variables
# Find all instances of Matlab
app_paths=$( /usr/bin/find /Applications -iname "Matlab*.app" -maxdepth 1 -type d )
# Verify that a Matlab version was found.
if [[ -z "${app_paths}" ]]; then
echo "A version of Matlab was not found in the expected location!"
echo "***** License Matlab process: FAILED *****"
exit 1
else
# If the machine has multiple Matlab Applications, loop through them...
while IFS=$'\n' read -r app_path; do
# Get the Matlab version
# shellcheck disable=SC2002
app_version=$( /bin/cat "${app_path}/VersionInfo.xml" | /usr/bin/grep release | /usr/bin/awk -F "<(/)?release>" '{print $2}' )
echo "Applying License for Version: ${app_version}"
# Build the license file location
license_folder="${app_path}/licenses"
license_file="${license_folder}/network.lic"
if [[ ! -d "${license_folder}" ]]; then
# shellcheck disable=SC2174
/bin/mkdir -p -m 755 "${license_folder}"
/usr/sbin/chown root:admin "${license_folder}"
fi
##################################################
# Create the license file.
echo "Creating license file..."
/bin/cat > "${license_file}" <<licenseContents
SERVER license.server.com 11000
USE_SERVER
licenseContents
if [[ -e "${license_file}" ]]; then
# Set permissions on the file for everyone to be able to read.
echo "Applying permissions to license file..."
/bin/chmod 644 "${license_file}"
/usr/sbin/chown root:admin "${license_file}"
else
echo "ERROR: Failed to create the license file!"
echo "***** License Matlab process: FAILED *****"
exit 2
fi
done < <(echo "${app_paths}")
fi
echo "Matlab has been activated!"
echo "***** License Matlab process: COMPLETE *****"
exit 0
| true
|
9a92f3dc86357115031e0e82b3e524c453592f8b
|
Shell
|
elishani/azure-templates
|
/scripts/install-rancher2.sh
|
UTF-8
| 649
| 3.359375
| 3
|
[] |
no_license
|
resource_group_name=$1
export file_name=cluster.yml
az network public-ip list --resource-group $resource_group_name | grep '"ipAddress":' | awk -F'"' '{print $4}' > iplist.txt
sed -e "1d" iplist.txt > vm.txt
echo "nodes:" > $file_name
i=1
for public in `cat vm.txt` ; do
vm=rancher$i
private=`ssh -o "StrictHostKeyChecking no" vm@$public /sbin/ip addr | grep 'inet 10.0' | awk '{print $2}' | cut -d'/' -f1`
host_name=`ssh vm@$public hostname`
cat >> $file_name <<EOF
- address: $public
internal_address: $private
user: vm
role: [controlplane, worker, etcd]
hostname_override: $host_name
EOF
i=$((++i))
done
| true
|
a12d2f6ea145deab134f38b04e71e28743500d00
|
Shell
|
SoumyajitTech/zsh-Power
|
/zsh-autocomplete/zsh-autocomplete.plugin.zsh
|
UTF-8
| 729
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
setopt alwayslastprompt NO_singlelinezle
() {
emulate -L zsh
zmodload -F zsh/parameter p:functions
# Workaround for https://github.com/zdharma/zinit/issues/366
[[ -v functions[.zinit-shade-off] ]] &&
.zinit-shade-off "${___mode:-load}"
typeset -gHa _autocomplete__options=(
localoptions extendedglob rcquotes
NO_aliases NO_banghist NO_caseglob NO_clobber NO_listbeep
)
setopt $_autocomplete__options
export -U FPATH fpath=( ${${(%):-%x}:A:h}/*(-/) $fpath )
builtin autoload -Uz .autocomplete.__init__
.autocomplete.__init__
# Workaround for https://github.com/zdharma/zinit/issues/366
[[ -v functions[.zinit-shade-on] ]] &&
.zinit-shade-on "${___mode:-load}"
return 0
}
| true
|
b93f5f99826d232f805ac2361d4c4f0418a900e8
|
Shell
|
d11wtq/gentoo-personal-deps
|
/deps/gruvbox_theme.sh
|
UTF-8
| 198
| 2.90625
| 3
|
[] |
no_license
|
gruvbox_theme() {
new_config=$p/gruvbox_theme/gruvbox.sh
old_config=~/.bashrc.d/gruvbox.sh
is_met() {
diff $old_config $new_config
}
meet() {
cp -f $new_config $old_config
}
}
| true
|
71ef72b1cb6ae1a340b197b191802e629ab0fd94
|
Shell
|
stucchimax/reverse-dns-stats
|
/run.sh
|
UTF-8
| 872
| 3.03125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
DATE=`date "+%Y%m%d"`;
mkdir $DATE
cd $DATE
# fetch delegated file
# fetch split domain file
wget -q "ftp://ftp.ripe.net/pub/stats/ripencc/delegated-ripencc-extended-latest"
wget -q "ftp://ftp.ripe.net/ripe/dbase/split/ripe.db.domain.gz"
# Get stats from delegated file
../parse_delegated.py > delegated_parsed.csv
# Get all the infos from the domain files
../parse_domain_splitfile.py > domains.csv
rm delegated-ripencc-extended-latest ripe.db.domain.gz
../perPrefixStats.py
../computeCorrelations.py
# tail -n+2 revDel_perPrefix_stats.csv | awk 'BEGIN{FS="|"; twentyfive = 0; fifty = 0; seventyfive = 0; hundred = 0;} {if ($5 <= 25) {twentyfive++;} else if (($5 > 25) && ($5 <= 50)) {fifty++;} else if (($5 > 50) && ($5 <= 75)) {seventyfive++;} else if (($5 > 75)) {hundred++;}} END{print(twentyfive " - " fifty " - " seventyfive " - " hundred);}'
| true
|
2855fa6fc414f4ed0305b7aeac06e9a5e0dc3a4e
|
Shell
|
ajt73/LFS-10.0
|
/lfs/temp/lfs.stage2
|
UTF-8
| 1,765
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# © Anthony Taylor 2020
#
# Stage 2 of LFS installation
# Installation of remaining Temporary Tools
# Installation of the Basic System Software
#
# Derived from Linux From Scratch, revision 10.0
# http://www.linuxfromscratch.org/lfs/
# Chapter 7
#
# Version 0.1
mkdir -p /{boot,home,mnt,opt,srv}
mkdir -p /etc/{opt,sysconfig}
mkdir -p /lib/firmware
mkdir -p /media/{floppy,cdrom}
mkdir -p /usr/{,local/}{bin,include,lib,sbin,src}
mkdir -p /usr/{,local/}share/{color,dict,doc,info,locale,man}
mkdir -p /usr/{,local/}share/{misc,terminfo,zoneinfo}
mkdir -p /usr/{,local/}share/man/man{1..8}
mkdir -p /var/{cache,local,log,mail,opt,spool}
mkdir -p /var/lib/{color,misc,locate}
ln -sf /run /var/run
ln -sf /run/lock /var/lock
install -d -m 0750 /root
install -d -m 1777 /tmp /var/tmp
ln -sf /proc/self/mounts /etc/mtab
echo "127.0.0.1 localhost $(hostname)" > /etc/hosts
cat > /etc/passwd << "EOF"
root:x:0:0:root:/root:/bin/bash
bin:x:1:1:bin:/dev/null:/bin/false
daemon:x:6:6:Daemon User:/dev/null:/bin/false
messagebus:x:18:18:D-Bus Message Daemon User:/var/run/dbus:/bin/false
nobody:x:99:99:Unprivileged User:/dev/null:/bin/false
EOF
cat > /etc/group << "EOF"
root:x:0:
bin:x:1:daemon
sys:x:2:
kmem:x:3:
tape:x:4:
tty:x:5:
daemon:x:6:
floppy:x:7:
disk:x:8:
lp:x:9:
dialout:x:10:
audio:x:11:
video:x:12:
utmp:x:13:
usb:x:14:
cdrom:x:15:
adm:x:16:
messagebus:x:18:
input:x:24:
mail:x:34:
kvm:x:61:
wheel:x:97:
nogroup:x:99:
users:x:999:
EOF
echo "tester:x:101:101::/home/tester:/bin/bash" >> /etc/passwd
echo "tester:x:101:" >> /etc/group
install -o tester -d /home/tester
touch /var/log/{btmp,lastlog,faillog,wtmp}
chgrp utmp /var/log/lastlog
chmod 664 /var/log/lastlog
chmod 600 /var/log/btmp
# Build temporary toolchain
lfsbuild stage2.lfsbuild
| true
|
3c636c0274bc5a63f1589b2ff0847616a0918cae
|
Shell
|
ECE465-Cloud-Computing/ECE-465-Cloud-Computing-Final
|
/Scripts/run.sh
|
UTF-8
| 2,585
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# fetch config from AWS for currently running infrastructure
source ./load_lab_config.sh
NOW=$(date '+%Y%m%d%H%M%S')
LOGFILE="./run-${NOW}.log"
echo "Running Full AWS infrastructure for ${APP_TAG_NAME}:${APP_TAG_VALUE}" | tee ${LOGFILE}
echo "Running run.sh at ${NOW}" | tee -a ${LOGFILE}
PROG="backend-0.0.1-jar-with-dependencies.jar"
# get public IP addresses of the instances (in the public subnet)
INSTANCES_IPS=$(aws ec2 describe-instances ${PREAMBLE} --filters Name=instance-state-name,Values=running Name=tag:${APP_TAG_NAME},Values=${APP_TAG_VALUE} --query 'Reservations[*].Instances[*].[PublicIpAddress]' --output text | tr '\n' ' ')
PRIVATE_IPS=$(aws ec2 describe-instances ${PREAMBLE} --filters Name=instance-state-name,Values=running Name=tag:${APP_TAG_NAME},Values=${APP_TAG_VALUE} --query 'Reservations[*].Instances[*].[PrivateIpAddress]' --output text | tr '\n' ' ')
echo "Public IP addresses: ${INSTANCES_IPS}" | tee -a ${LOGFILE}
echo "Private IP addresses: ${PRIVATE_IPS}" | tee -a ${LOGFILE}
# Change ips into array to work with
IFS=' ' read -r -a INSTANCES_IPS_ARRAY <<< "$INSTANCES_IPS"
IFS=' ' read -r -a PRIVATE_IPS_ARRAY <<< "$PRIVATE_IPS"
# Get number of ips for indexing
NUM_IPS=0
for host in ${INSTANCES_IPS}
do
NUM_IPS=$((NUM_IPS+1))
done
echo "${NUM_IPS}"
echo "${PRIVATE_IPS_ARRAY[0]}"
# For all but last EC2 instance, download own corresponding graph and start up worker server.
for ((i = 0 ; i < NUM_IPS-1 ; i++)); do
ssh -i ${KEY_FILE} ${USER}@${INSTANCES_IPS_ARRAY[${i}]} "aws s3 cp s3://${S3_NAME}/${AIRLINES[${i}]}_money.txt ./${AIRLINES[${i}]}_money.txt"
ssh -i ${KEY_FILE} ${USER}@${INSTANCES_IPS_ARRAY[${i}]} "aws s3 cp s3://${S3_NAME}/${AIRLINES[${i}]}_time.txt ./${AIRLINES[${i}]}_time.txt"
echo "Running ${PROG} at ${USER}@${INSTANCES_IPS_ARRAY[${i}]}:~/ ..." | tee -a ${LOGFILE}
ssh -i ${KEY_FILE} ${USER}@${INSTANCES_IPS_ARRAY[${i}]} "killall -9 java"
(ssh -i ${KEY_FILE} ${USER}@${INSTANCES_IPS_ARRAY[${i}]} "java -cp ${PROG} edu.cooper.ece465.WorkerMain 6666 ${AIRLINES[${i}]}" | tee -a ${LOGFILE}) & disown %1
# ssh -n -f user@host "sh -c 'nohup java -cp ${PROG} edu.cooper.ece465.Main 6666 > /dev/null 2>&1 &'"
done
sleep 1
# For last EC2 instance, start up coordinator server
ssh -i ${KEY_FILE} ${USER}@${ELASTIC_IP} "killall -9 java"
ssh -i ${KEY_FILE} ${USER}@${ELASTIC_IP} "java -cp ${PROG} edu.cooper.ece465.CoordinatorMainV2 6666 ${PRIVATE_IPS_ARRAY[0]} ${PRIVATE_IPS_ARRAY[1]} ${PRIVATE_IPS_ARRAY[2]} ${PRIVATE_IPS_ARRAY[3]}" | tee -a ${LOGFILE}
echo "Done." | tee -a ${LOGFILE}
exit 0
| true
|
73f02ee9294fcbc8868a60f073a3033c1c55a673
|
Shell
|
sgwilbur/database-scripts
|
/db2/add_buildforge_db.sh
|
UTF-8
| 2,180
| 2.609375
| 3
|
[] |
no_license
|
#REM Tested and fully working as of 9-Feb-10
#REM
#REM --
#REM -- Setup path for the local environment information
#REM --
export USER=$1
export DBNAME=$2
export SCHEMA=${DBNAME}
export DBPATH=/home/db2inst1/db2inst1/NODE0000/${DBNAME}
export DBALIAS=${DBNAME}
export PAGESIZE=32K
export BPNAME=BUFFP1
export TS_SYSTEMP=TEMPSPACE2
export TS_APPTEMP=BFUSER_TEMP
export TS_APPUSR=USERSPACE2
#-- Create our database
db2 CREATE DATABASE ${DBNAME} ALIAS ${DBALIAS} USING CODESET UTF-8 TERRITORY US AUTOCONFIGURE USING MEM_PERCENT 40 APPLY DB ONLY
#-- close any open CLI connections
db2 TERMINATE
# -- Restart our instance.
db2stop
db2start
# -- Connect to our new database and add the buffer pool.
db2 CONNECT TO ${DBALIAS}
db2 CREATE BUFFERPOOL ${BPNAME} IMMEDIATE SIZE 1024 PAGESIZE ${PAGESIZE}
# -- Close connection and reconnect.
db2 CONNECT RESET
db2 CONNECT TO ${DBALIAS}
# -- Create system/temp/user tablespaces in current database
db2 CREATE SYSTEM TEMPORARY TABLESPACE ${TS_SYSTEMP} PAGESIZE ${PAGESIZE} MANAGED BY SYSTEM USING \(\'${DBPATH}/${TS_SYSTEMP}.0\'\) EXTENTSIZE 64 PREFETCHSIZE 64 BUFFERPOOL ${BPNAME}
db2 CREATE USER TEMPORARY TABLESPACE ${TS_APPTEMP} PAGESIZE ${PAGESIZE} MANAGED BY SYSTEM USING \(\'${DBPATH}/${TS_APPTEMP}.0\'\) EXTENTSIZE 64 PREFETCHSIZE 64 BUFFERPOOL ${BPNAME}
db2 CREATE REGULAR TABLESPACE ${TS_APPUSR} PAGESIZE ${PAGESIZE} MANAGED BY SYSTEM USING \(\'${DBPATH}/${TS_APPUSR}.0\'\) EXTENTSIZE 64 PREFETCHSIZE 64 BUFFERPOOL ${BPNAME}
#-- User must be granted use of TSAPPTEMP tablespace
db2 GRANT USE OF TABLESPACE ${TS_APPTEMP} TO USER ${USER} WITH GRANT OPTION
db2 GRANT USE OF TABLESPACE ${TS_APPUSR} TO USER ${USER} WITH GRANT OPTION
db2 COMMIT WORK
db2 CONNECT RESET
db2 TERMINATE
db2 CONNECT TO ${DBNAME}
db2 GRANT CREATETAB,CONNECT,IMPLICIT_SCHEMA ON DATABASE TO USER ${USER}
db2 CREATE SCHEMA ${SCHEMA}
db2 SET CURRENT SCHEMA = ${SCHEMA}
#db2 GRANT AlTERIN, CREATEIN, DROPIN ON SCHEMA ${SCHEMA} TO USER ${USER}
db2 GRANT CREATETAB,CONNECT,IMPLICIT_SCHEMA ON DATABASE TO USER ${USER}
db2 GRANT CREATEIN,DROPIN,ALTERIN ON SCHEMA ${SCHEMA} TO USER ${USER} WITH GRANT OPTION
db2 CONNECT RESET
db2 TERMINATE
| true
|
d4ac1641faee0ff233fb7321dea2903be5cdbe5c
|
Shell
|
ODEX-TOS/packages
|
/libmythes/repos/extra-x86_64/PKGBUILD
|
UTF-8
| 1,113
| 2.609375
| 3
|
[
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
# Maintainer: AndyRTR <andyrtr@archlinux.org>
pkgname=libmythes
pkgver=1.2.4
pkgrel=4
epoch=1
pkgdesc="a simple thesaurus"
arch=('x86_64')
url="https://hunspell.github.io/"
license=('custom')
depends=('glibc' 'perl')
makedepends=('hunspell')
provides=('mythes')
source=(https://downloads.sourceforge.net/hunspell/${pkgname/lib/}-${pkgver}.tar.gz)
sha512sums=('a04da39812bcfb1391a2cba7de73e955eafe141679ec03ed6657d03bebf360b432480d0037dff9ed72a1dfda5a70d77d44ac2bb14cdb109fd8e2a38376feee21')
build() {
cd "${pkgname/lib/}"-$pkgver
./configure --prefix=/usr --disable-static
make
}
check() {
cd "${pkgname/lib/}"-$pkgver
# run the example program:
./example th_en_US_new.idx th_en_US_new.dat checkme.lst
# run the example program with stemming and morphological generation:
# e.g. to check mouse, mice, rodents, eats, eaten, ate, eating etc. words
./example morph.idx morph.dat morph.lst morph.aff morph.dic
}
package() {
cd "${pkgname/lib/}"-$pkgver
make DESTDIR="$pkgdir" install
# license
install -Dm644 "${srcdir}"/${pkgname/lib/}-$pkgver/COPYING "$pkgdir"/usr/share/licenses/$pkgname/COPYING
}
| true
|
db988a2009390f6f3fb7a8c6554a466a6340f90e
|
Shell
|
knikolla/regapp
|
/makemigrations.sh
|
UTF-8
| 801
| 2.625
| 3
|
[] |
no_license
|
#! /bin/bash
echo "##############################################"
echo "Make sure to unset minikube docker-env if set!"
echo "##############################################"
REGAPP_REPO=ghcr.io
PROJECT_LOCATION=/home/jculbert/development/nerc
REGAPP_IMAGE=$REGAPP_REPO/nerc-project/regapp:master
docker login $REGAPP_REPO
docker pull $REGAPP_IMAGE
docker run --rm \
-e "PYTHONPATH=/code" \
-e "DJANGO_SECRET_KEY=dummy" \
-e "REGAPP_EMAIL_SUPPORT=dummy" \
-e "REGAPP_EMAIL_HOST=dummy" \
-e "REGAPP_EMAIL_USE_TLS=dummy" \
-e "REGAPP_EMAIL_PORT=0" \
-e "REGAPP_EMAIL_HOST_USER=dummy" \
-e "REGAPP_EMAIL_HOST_PASSWORD=dummy" \
-e "REGAPP_REGAPP_CLIENT_ID=dummy" \
-e "REGAPP_REGAPP_CLIENT_SECRET=dummy" \
-v $PROJECT_LOCATION:/code $REGAPP_IMAGE \
sh -c "python regapp/manage.py makemigrations"
| true
|
a1cb9871c5d90a74d6e4ec1d2dfe85cc4dcefcfb
|
Shell
|
aigarskadikis/vlc-detect
|
/check.sh
|
UTF-8
| 4,555
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
#this code is tested un fresh 2015-11-21-raspbian-jessie-lite Raspberry Pi image
#by default this script should be located in two subdirecotries under the home
#sudo apt-get update -y && sudo apt-get upgrade -y
#sudo apt-get install git -y
#mkdir -p /home/pi/detect && cd /home/pi/detect
#git clone https://github.com/catonrug/vlc-detect.git && cd vlc-detect && chmod +x check.sh && ./check.sh
#check if script is located in /home direcotry
pwd | grep "^/home/" > /dev/null
if [ $? -ne 0 ]; then
echo script must be located in /home direcotry
return
fi
#it is highly recommended to place this directory in another directory
deep=$(pwd | sed "s/\//\n/g" | grep -v "^$" | wc -l)
if [ $deep -lt 4 ]; then
echo please place this script in deeper directory
return
fi
#set application name based on directory name
#this will be used for future temp directory, database name, google upload config, archiving
appname=$(pwd | sed "s/^.*\///g")
#set temp directory in variable based on application name
tmp=$(echo ../tmp/$appname)
#create temp directory
if [ ! -d "$tmp" ]; then
mkdir -p "$tmp"
fi
#check if database directory has prepared
if [ ! -d "../db" ]; then
mkdir -p "../db"
fi
#set database variable
db=$(echo ../db/$appname.db)
#if database file do not exist then create one
if [ ! -f "$db" ]; then
touch "$db"
fi
#check if google drive config directory has been made
#if the config file exists then use it to upload file in google drive
#if no config file is in the directory there no upload will happen
if [ ! -d "../gd" ]; then
mkdir -p "../gd"
fi
if [ -f ~/uploader_credentials.txt ]; then
sed "s/folder = test/folder = `echo $appname`/" ../uploader.cfg > ../gd/$appname.cfg
else
echo google upload will not be used cause ~/uploader_credentials.txt do not exist
fi
name=$(echo "VLC Media Player")
base=$(echo "https://get.videolan.org/vlc/last")
architectures=$(cat <<EOF
win32
win64
extra line
EOF
)
wget -S --spider -o $tmp/output.log "$base/"
grep -A99 "^Resolving" $tmp/output.log | grep "HTTP.*200 OK"
if [ $? -eq 0 ]; then
#if file request retrieve http code 200 this means OK
printf %s "$architectures" | while IFS= read -r architecture
do {
filename=$(wget -qO- $base/$architecture/ | grep -v "$architecture\.exe\." | sed "s/exe/exe\n/g" | sed "s/\d034\|>/\n/g" | grep "$architecture\.exe" | head -1)
grep "$filename" $db > /dev/null
if [ $? -ne 0 ]; then
echo new version detected!
wget -S --spider -o $tmp/output.log $base/$architecture/$filename -q
url=$(sed "s/http/\nhttp/g" $tmp/output.log | sed "s/exe/exe\n/g" | grep "^http.*exe$")
echo $url
echo Downloading $filename
wget $url -O $tmp/$filename -q
echo
echo creating md5 checksum of file..
md5=$(md5sum $tmp/$filename | sed "s/\s.*//g")
echo
echo creating sha1 checksum of file..
sha1=$(sha1sum $tmp/$filename | sed "s/\s.*//g")
echo
version=$(echo "$filename" | sed "s/-/\n/g" | grep -v "[a-z]")
echo $version | grep "^[0-9]\+[\., ]\+[0-9]\+[\., ]\+[0-9]\+"
if [ $? -eq 0 ]; then
echo
echo "$filename">> $db
echo "$version">> $db
echo "$md5">> $db
echo "$sha1">> $db
echo >> $db
#if google drive config exists then upload and delete file:
if [ -f "../gd/$appname.cfg" ]
then
echo Uploading $filename to Google Drive..
echo Make sure you have created \"$appname\" directory inside it!
../uploader.py "../gd/$appname.cfg" "$tmp/$filename"
echo
fi
case "$architecture" in
win32)
bit=$(echo "(32-bit)")
;;
win64)
bit=$(echo "(64-bit)")
;;
esac
#lets send emails to all people in "posting" file
emails=$(cat ../posting | sed '$aend of file')
printf %s "$emails" | while IFS= read -r onemail
do {
python ../send-email.py "$onemail" "$name $version $bit" "$url
https://4e7299a03ac49455dce684f7851a9aa3b33044ee.googledrive.com/host/0B_3uBwg3RcdVMFVpME1MdThxZ1U/$filename
$md5
$sha1"
} done
echo
else
#version do not match version pattern
echo version do not match version pattern
emails=$(cat ../maintenance | sed '$aend of file')
printf %s "$emails" | while IFS= read -r onemail
do {
python ../send-email.py "$onemail" "To Do List" "Version do not match version pattern:
$site "
} done
fi
else
#filename is already in database
echo filename is already in database
echo
fi
} done
else
#if http statis code is not 200 ok
emails=$(cat ../maintenance | sed '$aend of file')
printf %s "$emails" | while IFS= read -r onemail
do {
python ../send-email.py "$onemail" "To Do List" "the following link do not retrieve good http status code:
$base"
} done
echo
echo
fi
#clean and remove whole temp direcotry
rm $tmp -rf > /dev/null
| true
|
da3da70698fbdc290ecaab502bbf891a606885df
|
Shell
|
onkarbpatil/IPDPS2020-benchmarks
|
/script.sh
|
UTF-8
| 13,389
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "optane: $(hostname)"
module load openmpi likwid
export OMP_NUM_THREADS=1
RESULTS="${HOME}/memsys19-optane/results"
# rm -rf ${RESULTS}
# mkdir -p ${RESULTS}
# cd /home/jlee/memsys19-optane/vpic/optane
# # vpic weak scaling for optane modes
# export MACRO="USE_OPTANE"
# make -j
# bin/vpic lpi_weak.cxx
# for p in 1 2 4 8
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.optane.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.optane.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.optane.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.optane.$p.L3CACHE
# done
# export MACRO="USE_DRAM"
# bin/vpic lpi_weak.cxx
# for p in 1 2 4 8
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.DRAM.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.DRAM.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.DRAM.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./lpi_weak.Linux &> ${RESULTS}/vpic_weak_scaling.DRAM.$p.L3CACHE
# done
# # vpic strong scaling for optane modes
# export MACRO="USE_OPTANE"
# for npcc in 256 512 1024 2048
# do
# bin/vpic strongscaling-${npcc}.cxx
# done
# declare -A npccranks
# npccranks[256]=8
# npccranks[512]=4
# npccranks[1024]=2
# npccranks[2048]=1
# for i in "${!npccranks[@]}"
# do
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g ENERGY ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.optane.$i.${npccranks[$i]}.ENERGY"
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g FLOPS_DP ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.optane.$i.${npccranks[$i]}.FLOPS_DP"
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g MEM ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.optane.$i.${npccranks[$i]}.MEM"
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g L3CACHE ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.optane.$i.${npccranks[$i]}.L3CACHE"
# done
# export MACRO="USE_DRAM"
# for npcc in 256 512 1024 2048
# do
# bin/vpic strongscaling-${npcc}.cxx
# done
# for i in "${!npccranks[@]}"
# do
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g ENERGY ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.DRAM.$i.${npccranks[$i]}.ENERGY"
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g FLOPS_DP ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.DRAM.$i.${npccranks[$i]}.FLOPS_DP"
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g MEM ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.DRAM.$i.${npccranks[$i]}.MEM"
# mpirun -np "${npccranks[$i]}" likwid-perfctr -g L3CACHE ./strongscaling-"$i".Linux &> "${RESULTS}/vpic_strong_scaling.DRAM.$i.${npccranks[$i]}.L3CACHE"
# done
# # SNAP strong scaling
# # process count increases 1,2,4,8, nx, ny, nz stay at 32
# cd /home/jlee/memsys19-optane/SNAP/ports/snap-c
# make
# for p in 1 2 4 8
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.optane.$p.ENERGY OPTANE &> ${RESULTS}/snap_strong_scaling.optane.$p.ENERGY.likwid
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.optane.$p.FLOPS_DP OPTANE &> ${RESULTS}/snap_strong_scaling.optane.$p.FLOPS_DP.likwid
# mpirun -np $p likwid-perfctr -g MEM ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.optane.$p.MEM OPTANE &> ${RESULTS}/snap_strong_scaling.optane.$p.MEM.likwid
# mpirun -np $p likwid-perfctr -g L3CACHE ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.optane.$p.L3CACHE OPTANE &> ${RESULTS}/snap_strong_scaling.optane.$p.L3CACHE.likwid
# mpirun -np $p likwid-perfctr -g ENERGY ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.DRAM.$p.ENERGY DRAM &> ${RESULTS}/snap_strong_scaling.DRAM.$p.ENERGYY.likwid
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.DRAM.$p.FLOPS_DP DRAM &> ${RESULTS}/snap_strong_scaling.DRAM.$p.FLOPS_DP.likwid
# mpirun -np $p likwid-perfctr -g MEM ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.DRAM.$p.MEM DRAM &> ${RESULTS}/snap_strong_scaling.DRAM.$p.MEM.likwid
# mpirun -np $p likwid-perfctr -g L3CACHE ./snap --fi ${p}_strong --fo ${RESULTS}/snap_strong_scaling.DRAM.$p.L3CACHE DRAM &> ${RESULTS}/snap_strong_scaling.DRAM.$p.L3CACHE.likwid
# done
# # SNAP weak scaling
# # process count increases nx,ny,nz 8-64
# for p in 1 2 4 8
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.optane.$p.ENERGY OPTANE &> ${RESULTS}/snap_weak_scaling.optane.$p.ENERGY.likwid
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.optane.$p.FLOPS_DP OPTANE &> ${RESULTS}/snap_weak_scaling.optane.$p.FLOPS_DP.likwid
# mpirun -np $p likwid-perfctr -g MEM ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.optane.$p.MEM OPTANE &> ${RESULTS}/snap_weak_scaling.optane.$p.MEM.likwid
# mpirun -np $p likwid-perfctr -g L3CACHE ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.optane.$p.L3CACHE OPTANE &> ${RESULTS}/snap_weak_scaling.optane.$p.L3CACHE.likwid
# mpirun -np $p likwid-perfctr -g ENERGY ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.DRAM.$p.ENERGY DRAM &> ${RESULTS}/snap_weak_scaling.DRAM.$p.ENERGY.likwid
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.DRAM.$p.FLOPS_DP DRAM &> ${RESULTS}/snap_weak_scaling.DRAM.$p.FLOPS_DP.likwid
# mpirun -np $p likwid-perfctr -g MEM ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.DRAM.$p.MEM DRAM &> ${RESULTS}/snap_weak_scaling.DRAM.$p.MEM.likwid
# mpirun -np $p likwid-perfctr -g L3CACHE ./snap --fi ${p}_weak --fo ${RESULTS}/snap_weak_scaling.DRAM.$p.L3CACHE DRAM &> ${RESULTS}/snap_weak_scaling.DRAM.$p.L3CACHE.likwid
# done
# # AMG weakscaling
# # process 1, 2, 4, 8; n = 64
cd /home/jlee/memsys19-optane/AMG/test
declare -A P
P[1]="1 1 1"
P[2]="1 1 2"
P[4]="1 2 2"
P[8]="2 2 2"
for p in 1 2 4 8
do
mpirun -np $p likwid-perfctr -g ENERGY ./amg -n 256 256 256 -P ${P[$p]} OPTANE &> ${RESULTS}/amg_weak_scaling.optane.$p.ENERGY
mpirun -np $p likwid-perfctr -g FLOPS_DP ./amg -n 256 256 256 -P ${P[$p]} OPTANE &> ${RESULTS}/amg_weak_scaling.optane.$p.FLOPS_DP
mpirun -np $p likwid-perfctr -g MEM ./amg -n 256 256 256 -P ${P[$p]} OPTANE &> ${RESULTS}/amg_weak_scaling.optane.$p.MEM
mpirun -np $p likwid-perfctr -g L3CACHE ./amg -n 256 256 256 -P ${P[$p]} OPTANE &> ${RESULTS}/amg_weak_scaling.optane.$p.L3CACHE
mpirun -np $p likwid-perfctr -g ENERGY ./amg -n 256 256 256 -P ${P[$p]} DRAM &> ${RESULTS}/amg_weak_scaling.DRAM.$p.ENERGY
mpirun -np $p likwid-perfctr -g FLOPS_DP ./amg -n 256 256 256 -P ${P[$p]} DRAM &> ${RESULTS}/amg_weak_scaling.DRAM.$p.FLOPS_DP
mpirun -np $p likwid-perfctr -g MEM ./amg -n 256 256 256 -P ${P[$p]} DRAM &> ${RESULTS}/amg_weak_scaling.DRAM.$p.MEM
mpirun -np $p likwid-perfctr -g L3CACHE ./amg -n 256 256 256 -P ${P[$p]} DRAM &> ${RESULTS}/amg_weak_scaling.DRAM.$p.L3CACHE
done
# AMG strong scaling
# process 1, 2, 3, 8; n = (32, 32, 32), (64, 32, 32), (64, 64, 32), (64, 64, 64)
declare -A N
N[1]="256 256 256"
N[2]="256 256 128"
N[4]="256 128 128"
N[8]="128 128 128"
for p in "${!N[@]}"
do
mpirun -np $p likwid-perfctr -g ENERGY ./amg -n ${N[$p]} -P ${P[$p]} OPTANE &> ${RESULTS}/amg_strong_scaling.optane.$p.ENERGY
mpirun -np $p likwid-perfctr -g FLOPS_DP ./amg -n ${N[$p]} -P ${P[$p]} OPTANE &> ${RESULTS}/amg_strong_scaling.optane.$p.FLOPS_DP
mpirun -np $p likwid-perfctr -g MEM ./amg -n ${N[$p]} -P ${P[$p]} OPTANE &> ${RESULTS}/amg_strong_scaling.optane.$p.MEM
mpirun -np $p likwid-perfctr -g L3CACHE ./amg -n ${N[$p]} -P ${P[$p]} OPTANE &> ${RESULTS}/amg_strong_scaling.optane.$p.L3CACHE
mpirun -np $p likwid-perfctr -g ENERGY ./amg -n ${N[$p]} -P ${P[$p]} DRAM &> ${RESULTS}/amg_strong_scaling.DRAM.$p.ENERGY
mpirun -np $p likwid-perfctr -g FLOPS_DP ./amg -n ${N[$p]} -P ${P[$p]} DRAM &> ${RESULTS}/amg_strong_scaling.DRAM.$p.FLOPS_DP
mpirun -np $p likwid-perfctr -g MEM ./amg -n ${N[$p]} -P ${P[$p]} DRAM &> ${RESULTS}/amg_strong_scaling.DRAM.$p.MEM
mpirun -np $p likwid-perfctr -g L3CACHE ./amg -n ${N[$p]} -P ${P[$p]} DRAM &> ${RESULTS}/amg_strong_scaling.DRAM.$p.L3CACHE
done
# # DGEMM strong scaling
# # process 1, 2, 4, 8; N = 64000
cd /home/jlee/memsys19-optane/hpcc-1.5.0
# make arch=skylake
# for p in 4 8 16 32
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./hpcc hpccinf.txt OPTANE &> ${RESULTS}/hpcc_strong_scaling.optane.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./hpcc hpccinf.txt OPTANE &> ${RESULTS}/hpcc_strong_scaling.optane.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./hpcc hpccinf.txt OPTANE &> ${RESULTS}/hpcc_strong_scaling.optane.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./hpcc hpccinf.txt OPTANE &> ${RESULTS}/hpcc_strong_scaling.optane.$p.L3CACHE
# mpirun -np $p likwid-perfctr -g ENERGY ./hpcc hpccinf.txt DRAM &> ${RESULTS}/hpcc_strong_scaling.DRAM.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./hpcc hpccinf.txt DRAM &> ${RESULTS}/hpcc_strong_scaling.DRAM.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./hpcc hpccinf.txt DRAM &> ${RESULTS}/hpcc_strong_scaling.DRAM.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./hpcc hpccinf.txt DRAM &> ${RESULTS}/hpcc_strong_scaling.DRAM.$p.L3CACHE
# done
# DGEMM weak scaling
# process 1, 2, 3, 8; N = 8K, 16K, 32K, 64K
# unset n
# declare -A n
# n[4]=8000
# n[8]=16000
# n[16]=32000
# n[32]=64000
# for p in "${!n[@]}"
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./hpcc $p OPTANE &> ${RESULTS}/hpcc_weak_scaling.optane.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./hpcc $p OPTANE &> ${RESULTS}/hpcc_weak_scaling.optane.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./hpcc $p OPTANE &> ${RESULTS}/hpcc_weak_scaling.optane.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./hpcc $p OPTANE &> ${RESULTS}/hpcc_weak_scaling.optane.$p.L3CACHE
# mpirun -np $p likwid-perfctr -g ENERGY ./hpcc $p DRAM &> ${RESULTS}/hpcc_weak_scaling.DRAM.$p.ENERGYY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./hpcc $p DRAM &> ${RESULTS}/hpcc_weak_scaling.DRAM.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./hpcc $p DRAM &> ${RESULTS}/hpcc_weak_scaling.DRAM.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./hpcc $p DRAM &> ${RESULTS}/hpcc_weak_scaling.DRAM.$p.L3CACHE
# done
# # LULESH weak scaling
# # processes 1, 8, 27, 64
# cd /home/jlee/memsys19-optane/LULESH
# make
# for p in 1 8 27
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./lulesh2.0 -s 25 OPTANE &> ${RESULTS}/lulesh_weak_scaling.optane.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./lulesh2.0 -s 25 OPTANE &> ${RESULTS}/lulesh_weak_scaling.optane.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./lulesh2.0 -s 25 OPTANE &> ${RESULTS}/lulesh_weak_scaling.optane.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./lulesh2.0 -s 25 OPTANE &> ${RESULTS}/lulesh_weak_scaling.optane.$p.L3CACHE
# mpirun -np $p likwid-perfctr -g ENERGY ./lulesh2.0 -s 25 DRAM &> ${RESULTS}/lulesh_weak_scaling.DRAM.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./lulesh2.0 -s 25 DRAM &> ${RESULTS}/lulesh_weak_scaling.DRAM.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./lulesh2.0 -s 25 DRAM &> ${RESULTS}/lulesh_weak_scaling.DRAM.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./lulesh2.0 -s 25 DRAM &> ${RESULTS}/lulesh_weak_scaling.DRAM.$p.L3CACHE
# done
# # LULESH strong scaling
# # processes 1, 8, 27, 64; problem size 40, 20, 13, 10
# declare -A procsize
# procsize[1]=50
# procsize[8]=25
# procsize[27]=17
# #procsize[64]=10
# for p in "${!procsize[@]}"
# do
# mpirun -np $p likwid-perfctr -g ENERGY ./lulesh2.0 -s ${procsize[$p]} OPTANE &> ${RESULTS}/lulesh_strong_scaling.optane.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./lulesh2.0 -s ${procsize[$p]} OPTANE &> ${RESULTS}/lulesh_strong_scaling.optane.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./lulesh2.0 -s ${procsize[$p]} OPTANE &> ${RESULTS}/lulesh_strong_scaling.optane.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./lulesh2.0 -s ${procsize[$p]} OPTANE &> ${RESULTS}/lulesh_strong_scaling.optane.$p.L3CACHE
# mpirun -np $p likwid-perfctr -g ENERGY ./lulesh2.0 -s ${procsize[$p]} DRAM &> ${RESULTS}/lulesh_strong_scaling.DRAM.$p.ENERGY
# mpirun -np $p likwid-perfctr -g FLOPS_DP ./lulesh2.0 -s ${procsize[$p]} DRAM &> ${RESULTS}/lulesh_strong_scaling.DRAM.$p.FLOPS_DP
# mpirun -np $p likwid-perfctr -g MEM ./lulesh2.0 -s ${procsize[$p]} DRAM &> ${RESULTS}/lulesh_strong_scaling.DRAM.$p.MEM
# mpirun -np $p likwid-perfctr -g L3CACHE ./lulesh2.0 -s ${procsize[$p]} DRAM &> ${RESULTS}/lulesh_strong_scaling.DRAM.$p.L3CACHE
# done
| true
|
318fa0a54fcf616077e2fbdcd66a00c4236974f3
|
Shell
|
nimble-platform/docker_setup
|
/dev/run-dev.sh
|
UTF-8
| 5,625
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
update_images () {
# update infrastructure
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra pull
# update services
docker-compose -f services/docker-compose.yml --project-name nimbleservices pull
}
start_all () {
# start infrastructure
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra up -d --build --remove-orphans
# wait for gateway proxy (last service started before)
echo "*****************************************************************"
echo "******************* Stalling for Gateway Proxy ******************"
echo "*****************************************************************"
docker run --rm --net=nimbleinfra_default -it mcandre/docker-wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 --tries 60 gateway-proxy:80/info
# start services
docker-compose -f services/docker-compose.yml --project-name nimbleservices up -d --build --remove-orphans
echo "*****************************************************************"
echo "********************* Stalling for services *********************"
echo "*****************************************************************"
docker run --rm --net=nimbleinfra_default -it mcandre/docker-wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 --tries 30 frontend-service:8080
docker run --rm --net=nimbleinfra_default -it mcandre/docker-wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 --tries 30 identity-service:9096/info
docker run --rm --net=nimbleinfra_default -it mcandre/docker-wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 --tries 30 catalog-service:8095/info
docker run --rm --net=nimbleinfra_default -it mcandre/docker-wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 --tries 30 business-process-service:8085/info
}
# run infrastructure
if [[ "$1" = "infrastructure" ]]; then
if [[ "$2" != "--no-updates" ]]; then
update_images
fi
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra up -d --build
elif [[ "$1" = "services" ]]; then
if [[ "$2" != "--no-updates" ]]; then
update_images
fi
# start services
docker-compose -f services/docker-compose.yml \
--project-name nimbleservices up \
-d \
--build \
--force-recreate identity-service business-process-service frontend-service catalog-service frontend-service-sidecar trust-service indexing-service datachannel-service
# docker-compose -f services/docker-compose.yml --project-name nimbleservices up --build --force-recreate identity-service
elif [[ "$1" = "start" ]]; then
update_images
start_all
elif [[ "$1" = "start-no-update" ]]; then
start_all
elif [[ "$1" = "stop" ]]; then
docker-compose -f services/docker-compose.yml --project-name nimbleservices stop
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra stop
elif [[ "$1" = "down" ]]; then
read -p "Are you sure? " -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]
then
docker-compose -f services/docker-compose.yml --project-name nimbleservices down -v
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra down -v
fi
elif [[ "$1" = "stop-services" ]]; then
docker-compose -f services/docker-compose.yml --project-name nimbleservices stop
elif [[ "$1" = "restart-single" ]]; then
docker-compose -f services/docker-compose.yml --project-name nimbleservices up --build -d --force-recreate $2
docker-compose -f services/docker-compose.yml \
--project-name nimbleservices \
logs -f $2
elif [[ "$1" = "down" ]]; then
docker-compose -f services/docker-compose.yml --project-name nimbleservices down --remove-orphans -v
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra down --remove-orphans -v
elif [[ "$1" = "services-logs" ]]; then
docker-compose -f services/docker-compose.yml --project-name nimbleservices logs -f
elif [[ "$1" = "cloud-infra" ]]; then
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra up -d --build --force-recreate config-server service-discovery gateway-proxy
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra logs -f config-server service-discovery gateway-proxy
elif [[ "$1" = "keycloak" ]]; then
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra up -d --build --force-recreate keycloak
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra logs -f keycloak keycloak-db
elif [[ "$1" = "dev-infra" ]]; then
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra up -d --build --force-recreate kafka maildev solr dev-main-db
docker-compose -f infra/docker-compose.yml --project-name nimbleinfra logs -f kafka maildev solr dev-main-db
elif [[ "$1" = "create-network" ]]; then
docker network create nimbleinfra_default
else
echo Usage: $0 COMMAND
echo Commands:
echo " infrastructure start only infrastructure components"
echo " services start nimble core services"
echo " start start infrastructure and core services"
echo " start-no-update start infrastructure and core services, without updating the images"
echo " restart-single SERVICE restart a single core service"
echo " stop stop infrastructure and core services"
echo " stop-services stop core services, but leave infrastructure running"
echo " down stop and remove everything (incl. volumes)"
echo " services-logs get the log output from the nimble core services"
exit 2
fi
| true
|
dbffcff3c9013b7eca480b45fe93160faf65b729
|
Shell
|
aranb/amos-master-scripts
|
/run_omid_threads
|
UTF-8
| 2,502
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
export TABLE="usertable"
export CF="fam"
export THREADS=150
export label=$1
shift
export approx_time=240
export ycsb_cmd="${HOME}/inst/ycsb/bin/ycsb run omid"
export ycsb_load_cmd="${HOME}/inst/ycsb/bin/ycsb load omid"
export ycsb_common_params="-cp ${HOME}/inst/ -P ${HOME}/inst/Tworkloada -p table=${TABLE} -p columnfamily=${CF} -p histogram.buckets=10"
export ycsb_run_params="-s"
export ycsb_load_params="-threads 100 -s"
export ops_param=" -p operationcount="
export single_thread_speed=120
export max_capacity=3200
export splits=8
export load_batch_size=100
export timeout=200
export tso_cmd="${HOME}/inst/tso-server/bin/omid.sh tso -metricsConfigs console:_:10:SECONDS -networkIface eth0 -batchPersistTimeout ${timeout}"
export tso_batchsize="-maxBatchSize "
threads=$@ #(1 2 5 10 20 50 100 120 150)
for thread in ${threads[@]}; do
rate=$(echo "rate=${thread}*${single_thread_speed}; if (rate>${max_capacity}) ${max_capacity} else rate" | bc -l)
ops=$(echo "${rate}*${approx_time}" | bc -l)
echo ""
echo "Approx Rate = "${rate}" ops/sec, Threads="${thread}
echo ""
#kill TSO and clients (in case there are any left over
~/bin/kill_clients
~/bin/kill_tso
#clean hbase and start anew
~/bin/clean_hbase.sh
~/bin/fresh_start.sh ${splits}
#start the tso
echo ${tso_cmd} ${tso_batchsize} ${load_batch_size}
ssh -T tso << EOF &> ~/out/tsoload-${label}-threads${thread}.txt &
${tso_cmd} ${tso_batchsize} ${load_batch_size}
EOF
sleep 5
#load the database
echo ${ycsb_load_cmd} ${ycsb_common_params} ${ycsb_load_params}
ssh -T ycsb << EOF
${ycsb_load_cmd} ${ycsb_common_params} ${ycsb_load_params}
EOF
sleep 10
#kill the TSO and start a new one with the correct batch size (== no/ of threads)
~/bin/kill_tso
echo ${tso_cmd} ${tso_batchsize} 1000
ssh -T tso << EOF &> ~/out/tso-${label}-threads${thread}.txt &
${tso_cmd} ${tso_batchsize} 1000
EOF
sleep 5
#flush everything to make sure there are files on the disk
echo 'Flushing everything'
hbase shell << EOF
flush 'OMID_COMMIT_TABLE'
flush 'OMID_TIMESTAMP'
flush 'usertable'
EOF
#start the test
echo ${ycsb_cmd} ${ycsb_common_params} ${ycsb_run_params} ${ops_param}${ops} -threads ${thread}
ssh -T ycsb << EOF &> ~/out/${label}-threads${thread}.txt &
${ycsb_cmd} ${ycsb_common_params} ${ycsb_run_params} ${ops_param}${ops} -threads ${thread}
EOF
wait $!
#stop the tso
sleep 10
echo "Killing TSO"
~/bin/kill_tso
done;
| true
|
56e71972a60d241b13600a37184c94f9c395d2ce
|
Shell
|
flaupretre/mp3-tools
|
/encode_mp3.sh
|
UTF-8
| 1,515
| 3.84375
| 4
|
[] |
no_license
|
# Permet d'encoder ou reencoder un mp3.
#
# Sources possibles : wav et mp3
#
# Recoit en entree une liste de paths a reencoder (sortis par 'fix_libs -r -f'),
# un par ligne. Les paths peuvent etre absolus ou relatif au rep dans lequel
# le script est lance.
#
# Attention: les paths contiennent des espaces et peuvent commencer par un '-' !
# Necessite des manips speciales. Par exemple, lame n'accepte pas les paths
# commencant par '-' ni la syntaxe avec '--' pour separer les options des
# arguments.
#=============================================================================
export LAME_CMD='lame'
export LAME_OPTS='-v --preset standard -m j'
export OPTS="$*"
#-------
fatal()
{
echo "********* Fatal error : $1"
exit 1
}
#-------
base_dir="`pwd`"
while read f
do
dir=`dirname -- "$f"`
source=`basename -- "$f"`
base=`echo "$source" | sed 's/\.[^\.]*$//'`
source_ext=`echo "$source" | sed 's/^.*\.\([^\.]*\)$/\1/'`
target="$base.mp3"
_tmp1=tmp$$.$source_ext
_tmp2=tmp2$$.mp3
#---
echo "Directory: $dir"
cd -- "$dir" || fatal "Cannot change dir to $dir"
echo '*-- Original :'
ls -l -- "$source"
rm -rf $_tmp1 $_tmp2
mv -- "$source" $_tmp1 || fatal "Cannot move $source"
$LAME_CMD $LAME_OPTS $OPTS $_tmp1 $_tmp2
rc=$?
if [ $rc = 0 -a -f $_tmp2 ] ; then
mv -- $_tmp2 "$target"
rm -rf $_tmp1
else
mv -- $_tmp1 "$source"
fatal "Erreur Lame"
fi
echo '*-- Apres codage :'
ls -l -- "$target"
cd "$base_dir"
sleep 1 # Permet de faire un ^C pour arreter le script proprement
done
| true
|
4c302382978d1765e1d0902c4a1cfa80f7158240
|
Shell
|
melanie-oneill/special-topics-labs-linux-docker
|
/bin/httpd-ctl.sh
|
UTF-8
| 1,088
| 3.984375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
DIR="-v /home/oneilm8/Desktop/linux-docker/special-topics-labs-linux-docker/test/:/usr/local/apache2/htdocs/"
PORTOPT="-p 8080:80"
echo "hello"
function HELP {
echo -e \\n"Help documentation for this script"\\n
echo -e "Basic usage: httpd-ctl.sh [command] [options]"\\n
echo -e "Commands are: start, stop, & destroy"\\n
echo -h "This help menu"
echo -d "root directory on your system to find www root"
echo -p "local port you want to be able to access"
}
while getopts 'hd:p:' OPTION; do
case "$OPTION" in
h) HELP
;;
d) DIRECTORY="-v $OPTARG:/usr/local/apache2/htdocs/"
;;
p) echo "get port flag"
PORTOPT="-p $OPTARG:80 "
;;
esac
done
shift $(($OPTIND - 1))
case $1 in
start)
echo "starting your docker images"
docker run -dit --name my-apache-app $PORTOPT $DIR httpd:2.4
;;
stop)
echo "stopping your docker instances"
docker stop my-apache-app
;;
destroy)
echo "stopping and removing your docker instances"
docker stop my-apache-app
docker rm my-apache-app
;;
esac
| true
|
474a675a8b437d4c03f0f9349f178678a11fa2fb
|
Shell
|
sentinelleader/my-puppet-modules
|
/puppet-qmail/files/qmail-send/run
|
UTF-8
| 249
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
exec 2>&1
QMAIL="/var/qmail"
ALIASEMPTY=`cat $QMAIL/control/defaultdelivery 2> /dev/null`
ALIASEMPTY=${ALIASEMPTY:="./Maildir/"}
PATH="$QMAIL/bin:$PATH"
# limit to prevent memory hogs
ulimit -c 204800
exec qmail-start "$ALIASEMPTY"
| true
|
f41a1a39c9507969daf199eb796e85892fb10f31
|
Shell
|
zmarouf/shell-fun
|
/history_expansion.sh
|
UTF-8
| 1,684
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
##
## test.sh
## Login : <zmarouf@Zeids-MacBook-Pro.local>
## Started on Thu May 8 17:30:01 2014 Zeid Marouf
## $Id$
##
## Author(s):
## - Zeid Marouf <>
##
## Copyright (C) 2014 Zeid Marouf
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
# Example of history expansion inside a script (shouldn't be used but fun :D)
echo "So you're typing long commands and you don't feel like retyping arguments? Try these:"
echo "\t!x where x is the command number in the history file"
echo "\t!-x where x is the command number in the list of previously executed commands in LIFO"
echo "\t!^ where ^ is the first argument of the previous command"
echo "\t!$ where $ is the last argument of the previous command"
echo "\t!* where * globs all the arguments of the previous command"
echo 'echo 1 2 3 4' # first command (echoed to screen)
echo 'echo !:2 && echo !:3 && echo !* && echo !$'
set -o history # activating history (won't work in a pure zsh script)
set -o histexpand # activating expansion
echo 1 2 3 4
echo !:2 && echo !:3 && echo !* && echo !$
| true
|
02dcbe93feeb30e6f14d49b512937b4b69e52880
|
Shell
|
seebees/aws-sdk-js-v3
|
/models/copy-model.sh
|
UTF-8
| 552
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
declare -r INTEG="aws-dr-tools-integration-pdx-64002.pdx4.corp.amazon.com"
declare -r APOLLO_PATH="/apollo/env/CoralJSONModels-development"
declare SERVICE="$1"
declare VERSION="$2"
if [ -z "${SERVICE}" ]; then
echo "Usage copy-model.sh SERVICENAME [VERSION]"
exit 1
fi
if [ -z "${VERSION}" ]; then
echo "Syncing all versions and models for ${SERVICE}"
declare COPYTO="./"
else
echo "Syncing the ${VERSION} or ${SERVICE}"
declare COPYTO="./${SERVICE}"
fi
scp -r "${INTEG}:${APOLLO_PATH}/${SERVICE}/${VERSION}" "${COPYTO}"
| true
|
3629dc7deb54eec9f9f2b7d3888a21bae2a5ef7c
|
Shell
|
amlucent/docker-containers
|
/simplehelp/src/firstrun.sh
|
UTF-8
| 320
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(cat /etc/timezone) != $TZ ]] ; then
echo "$TZ" > /etc/timezone
dpkg-reconfigure -f noninteractive tzdata
fi
if [ -d "/opt/SimpleHelp/lib" ]; then
echo "simplehelp files are in place"
else
mkdir -p /opt/SimpleHelp
cp -r /root/SimpleHelp/* /opt/SimpleHelp/
fi
sleep 5s
exec /usr/bin/supervisord
| true
|
bf31ddba6c501cd52b401774a3c1442dce51d33a
|
Shell
|
open-power/skiboot
|
/libstb/tss2/ibmtpm20tss/utils/regtests/testpolicy138.sh
|
UTF-8
| 15,690
| 2.5625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
#################################################################################
# #
# TPM2 regression test #
# Written by Ken Goldman #
# IBM Thomas J. Watson Research Center #
# #
# (c) Copyright IBM Corporation 2016 - 2020 #
# #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are #
# met: #
# #
# Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# Neither the names of the IBM Corporation nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# #
#################################################################################
# used for the name in policy ticket
if [ -z $TPM_DATA_DIR ]; then
TPM_DATA_DIR=.
fi
# PolicyCommandCode - sign
# cc69 18b2 2627 3b08 f5bd 406d 7f10 cf16
# 0f0a 7d13 dfd8 3b77 70cc bcd1 aa80 d811
# NV index name after written
# 000b
# 5e8e bdf0 4581 9419 070c 7d57 77bf eb61
# ffac 4996 ea4b 6fba de6d a42b 632d 4918
# PolicyAuthorizeNV with above Name
# 66 1f a1 02 db cd c2 f6 a0 61 7b 33 a0 ee 6d 95
# ab f6 2c 76 b4 98 b2 91 10 0d 30 91 19 f4 11 fa
# Policy in NV index 01000000
# signing key 80000001
echo ""
echo "Policy Authorize NV"
echo ""
echo "Start a policy session 03000000"
${PREFIX}startauthsession -se p > run.out
checkSuccess $?
echo "Create a signing key, policyauthnv"
${PREFIX}create -hp 80000000 -si -opr tmppriv.bin -opu tmppub.bin -pwdp sto -pwdk sig -pol policies/policyauthorizenv.bin > run.out
checkSuccess $?
echo "Load the signing key under the primary key"
${PREFIX}load -hp 80000000 -ipr tmppriv.bin -ipu tmppub.bin -pwdp sto > run.out
checkSuccess $?
echo "NV Define Space"
${PREFIX}nvdefinespace -hi o -ha 01000000 -sz 50 > run.out
checkSuccess $?
echo "NV not written, policyauthorizenv - should fail"
${PREFIX}policyauthorizenv -ha 01000000 -hs 03000000 > run.out
checkFailure $?
echo "Write algorithm ID into NV index 01000000"
${PREFIX}nvwrite -ha 01000000 -off 0 -if policies/sha256.bin > run.out
checkSuccess $?
echo "Write policy command code sign into NV index 01000000"
${PREFIX}nvwrite -ha 01000000 -off 2 -if policies/policyccsign.bin > run.out
checkSuccess $?
echo "Policy command code - sign"
${PREFIX}policycommandcode -ha 03000000 -cc 15d > run.out
checkSuccess $?
echo "Policy get digest - should be cc 69 ..."
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Policy Authorize NV against 01000000"
${PREFIX}policyauthorizenv -ha 01000000 -hs 03000000 > run.out
checkSuccess $?
echo "Policy get digest - should be 66 1f ..."
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Sign a digest - policy and wrong password"
${PREFIX}sign -hk 80000001 -if msg.bin -os sig.bin -se0 03000000 1 -pwdk xxx > run.out
checkSuccess $?
echo "Policy restart, set back to zero"
${PREFIX}policyrestart -ha 03000000 > run.out
checkSuccess $?
echo "Policy command code - sign"
${PREFIX}policycommandcode -ha 03000000 -cc 15d > run.out
checkSuccess $?
echo "Policy Authorize NV against 01000000"
${PREFIX}policyauthorizenv -ha 01000000 -hs 03000000 > run.out
checkSuccess $?
echo "Quote - policy, should fail"
${PREFIX}quote -hp 0 -hk 80000001 -os sig.bin -se0 03000000 1 > run.out
checkFailure $?
echo "Policy restart, set back to zero"
${PREFIX}policyrestart -ha 03000000 > run.out
checkSuccess $?
echo "Policy command code - quote"
${PREFIX}policycommandcode -ha 03000000 -cc 158 > run.out
checkSuccess $?
echo "Policy Authorize NV against 01000000 - should fail"
${PREFIX}policyauthorizenv -ha 01000000 -hs 03000000 > run.out
checkFailure $?
echo "NV Undefine Space"
${PREFIX}nvundefinespace -hi o -ha 01000000 > run.out
checkSuccess $?
echo "Flush the policy session 03000000"
${PREFIX}flushcontext -ha 03000000 > run.out
checkSuccess $?
echo "Flush the signing key 80000001 "
${PREFIX}flushcontext -ha 80000001 > run.out
checkSuccess $?
echo ""
echo "Policy Template"
echo ""
# create template hash
# run createprimary -si -v, extract template
# policies/policytemplate.txt
# 00 01 00 0b 00 04 04 72 00 00 00 10 00 10 08 00
# 00 00 00 00 00 00
# policymaker -if policies/policytemplate.txt -pr -of policies/policytemplate.bin -nz
# -nz says do not extend, just hash the hexascii line
# yields a template hash for policytemplate
# ef 64 da 91 18 fc ac 82 f4 36 1b 28 84 28 53 d8
# aa f8 7d fc e1 45 e9 25 cf fe 58 68 aa 2d 22 b6
# prepend the command code 00000190 to ef 64 ... and construct the actual object policy
# policymaker -if policies/policytemplatehash.txt -pr -of policies/policytemplatehash.bin
# fb 94 b1 43 e5 2b 07 95 b7 ec 44 37 79 99 d6 47
# 70 1c ae 4b 14 24 af 5a b8 7e 46 f2 58 af eb de
echo ""
echo "Policy Template with TPM2_Create"
echo ""
echo "Create a primary storage key policy template, 80000001"
${PREFIX}createprimary -hi p -pol policies/policytemplatehash.bin > run.out
checkSuccess $?
echo "Start a policy session 03000000"
${PREFIX}startauthsession -se p > run.out
checkSuccess $?
echo "Policy Template"
${PREFIX}policytemplate -ha 03000000 -te policies/policytemplate.bin > run.out
checkSuccess $?
echo "Policy get digest - should be fb 94 ... "
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Create signing key under primary key"
${PREFIX}create -si -hp 80000001 -kt f -kt p -se0 03000000 1 > run.out
checkSuccess $?
echo ""
echo "Policy Template with TPM2_CreateLoaded"
echo ""
echo "Policy restart, set back to zero"
${PREFIX}policyrestart -ha 03000000 > run.out
checkSuccess $?
echo "Policy Template"
${PREFIX}policytemplate -ha 03000000 -te policies/policytemplate.bin > run.out
checkSuccess $?
echo "Policy get digest - should be fb 94 ... "
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Create loaded signing key under primary key"
${PREFIX}createloaded -si -hp 80000001 -kt f -kt p -se0 03000000 1 > run.out
checkSuccess $?
echo "Flush the primary key 80000001"
${PREFIX}flushcontext -ha 80000001 > run.out
checkSuccess $?
echo "Flush the created key 80000002"
${PREFIX}flushcontext -ha 80000002 > run.out
checkSuccess $?
echo ""
echo "Policy Template with TPM2_CreatePrimary"
echo ""
echo "Set primary policy for platform hierarchy"
${PREFIX}setprimarypolicy -hi p -halg sha256 -pol policies/policytemplatehash.bin > run.out
checkSuccess $?
echo "Policy restart, set back to zero"
${PREFIX}policyrestart -ha 03000000 > run.out
checkSuccess $?
echo "Policy Template"
${PREFIX}policytemplate -ha 03000000 -te policies/policytemplate.bin > run.out
checkSuccess $?
echo "Policy get digest - should be fb 94 ... "
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Create loaded primary signing key policy template, 80000001"
${PREFIX}createprimary -si -hi p -se0 03000000 0 > run.out
checkSuccess $?
echo "Flush the primary key 80000001"
${PREFIX}flushcontext -ha 80000001 > run.out
checkSuccess $?
#
# Use case of the PCR brittleness solution using PolicyAuthorize, but
# where the authorizing public key is not hard coded in the sealed
# blob policy. Rather, it's in an NV Index, so that the authorizing
# key can be changed. Here, the authorization to change is platform
# auth. The NV index is locked until reboot as a second level of
# protection.
#
# Policy design
# PolicyAuthorizeNV and Name of NV index AND Unseal
# where the NV index holds PolicyAuthorize with the Name of the authorizing signing key
# where PolicyAuthorize will authorize command Unseal AND PCR values
# construct Policies
# Provision the NV Index data first. The NV Index Name is needed for the policy
# PolicyAuthorize with the Name of the authorizing signing key.
# The authorizing signing key Name can be obtained using the TPM from
# loadexternal below. It can also be calculated off line using this
# utility
# > publicname -ipem policies/rsapubkey.pem -halg sha256 -nalg sha256 -v -ns
# policyauthorize and CA public key
# policies/policyauthorizesha256.txt
# 0000016a000b64ac921a035c72b3aa55ba7db8b599f1726f52ec2f682042fc0e0d29fae81799
# (need blank line for policyRef)
# > policymaker -halg sha256 -if policies/policyauthorizesha256.txt -pr -v -ns -of policies/policyauthorizesha256.bin
# intermediate policy digest length 32
# fc 17 cd 86 c0 4f be ca d7 17 5f ef c7 75 5b 63
# a8 90 49 12 c3 2e e6 9a 4c 99 1a 7b 5a 59 bd 82
# intermediate policy digest length 32
# eb a3 f9 8c 5e af 1e a8 f9 4f 51 9b 4d 2a 31 83
# ee 79 87 66 72 39 8e 23 15 d9 33 c2 88 a8 e5 03
# policy digest length 32
# eb a3 f9 8c 5e af 1e a8 f9 4f 51 9b 4d 2a 31 83
# ee 79 87 66 72 39 8e 23 15 d9 33 c2 88 a8 e5 03
# policy digest:
# eba3f98c5eaf1ea8f94f519b4d2a3183ee79876672398e2315d933c288a8e503
# Once the NV Index Name is known, calculated the sealed blob policy.
# PolicyAuthorizeNV and Name of NV Index AND Unseal
#
# get NV Index Name from nvreadpublic after provisioning
# 000b56e16f0b810a6418daab06822be142858beaf9a79d66f66ad7e8e541f142498e
#
# policies/policyauthorizenv-unseal.txt
#
# policyauthorizenv and Name of NV Index
# 00000192000b56e16f0b810a6418daab06822be142858beaf9a79d66f66ad7e8e541f142498e
# policy command code unseal
# 0000016c0000015e
#
# > policymaker -halg sha256 -if policies/policyauthorizenv-unseal.txt -of policies/policyauthorizenv-unseal.bin -pr -v -ns
# intermediate policy digest length 32
# 2f 7a d9 b7 53 26 35 e5 03 8c e7 7b 8f 63 5e 4c
# f9 96 c8 62 18 13 98 94 c2 71 45 e7 7d d5 e8 e8
# intermediate policy digest length 32
# cd 1b 24 26 fe 10 08 6c 52 35 85 94 22 a0 59 69
# 33 4b 88 47 82 0d 0b d9 8c 43 1f 7f f7 36 34 5d
# policy digest length 32
# cd 1b 24 26 fe 10 08 6c 52 35 85 94 22 a0 59 69
# 33 4b 88 47 82 0d 0b d9 8c 43 1f 7f f7 36 34 5d
# policy digest:
# cd1b2426fe10086c5235859422a05969334b8847820d0bd98c431f7ff736345d
# The authorizing signer signs the PCR white list, here just PCR 16 extended with aaa
# PCR 16 is the resettable debug PCR, convenient for development
echo ""
echo "PolicyAuthorizeNV -> PolicyAuthorize -> PolicyPCR"
echo ""
# Initial provisioning (NV Index)
echo "NV Define Space"
${PREFIX}nvdefinespace -ha 01000000 -hi p -hia p -sz 34 +at wst +at ar > run.out
checkSuccess $?
echo "Write algorithm ID into NV index 01000000"
${PREFIX}nvwrite -ha 01000000 -hia p -off 0 -if policies/sha256.bin > run.out
checkSuccess $?
echo "Write the NV index at offset 2 with policy authorize and the Name of the CA signing key"
${PREFIX}nvwrite -ha 01000000 -hia p -off 2 -if policies/policyauthorizesha256.bin > run.out
checkSuccess $?
echo "Lock the NV Index"
${PREFIX}nvwritelock -ha 01000000 -hia p
checkSuccess $?
echo "Read the NV Index Name to be used above in Policy"
${PREFIX}nvreadpublic -ha 01000000 -ns > run.out
checkSuccess $?
# Initial provisioning (Sealed Data)
echo "Create a sealed data object"
${PREFIX}create -hp 80000000 -nalg sha256 -bl -kt f -kt p -opr tmppriv.bin -opu tmppub.bin -pwdp sto -uwa -if msg.bin -pol policies/policyauthorizenv-unseal.bin > run.out
checkSuccess $?
# Once per new PCR approved values, signer authorizing PCRs in policysha256.bin
echo "Openssl generate and sign aHash (empty policyRef) ${HALG}"
openssl dgst -sha256 -sign policies/rsaprivkey.pem -passin pass:rrrr -out pssig.bin policies/policypcr16aaasha256.bin > run.out 2>&1
echo " INFO:"
# Once per boot, simulating setting PCRs to authorized values, lock
# the NV index, which is unloaded at reboot to permit platform auth to
# roll the authorized signing key
echo "Lock the NV Index"
${PREFIX}nvwritelock -ha 01000000 -hia p
checkSuccess $?
echo "PCR 16 Reset"
${PREFIX}pcrreset -ha 16 > run.out
checkSuccess $?
echo "Extend PCR 16 to correct value"
${PREFIX}pcrextend -halg sha256 -ha 16 -if policies/aaa > run.out
checkSuccess $?
# At each unseal, or reuse the ticket tkt.bin for its lifetime
echo "Load external just the public part of PEM authorizing key sha256 80000001"
${PREFIX}loadexternal -hi p -halg sha256 -nalg sha256 -ipem policies/rsapubkey.pem -ns > run.out
checkSuccess $?
echo "Verify the signature to generate ticket 80000001 sha256"
${PREFIX}verifysignature -hk 80000001 -halg sha256 -if policies/policypcr16aaasha256.bin -is pssig.bin -raw -tk tkt.bin > run.out
checkSuccess $?
# Run time unseal
echo "Start a policy session"
${PREFIX}startauthsession -se p -halg sha256 > run.out
checkSuccess $?
echo "Policy PCR, update with the correct PCR 16 value"
${PREFIX}policypcr -halg sha256 -ha 03000000 -bm 10000 > run.out
checkSuccess $?
echo "Policy get digest - should be policies/policypcr16aaasha256.bin"
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
# policyauthorize process
echo "Policy authorize using the ticket"
${PREFIX}policyauthorize -ha 03000000 -appr policies/policypcr16aaasha256.bin -skn ${TPM_DATA_DIR}/h80000001.bin -tk tkt.bin > run.out
checkSuccess $?
echo "Get policy digest, should be policies/policyauthorizesha256.bin"
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Flush the authorizing public key"
${PREFIX}flushcontext -ha 80000001 > run.out
checkSuccess $?
echo "Policy Authorize NV against NV Index 01000000"
${PREFIX}policyauthorizenv -ha 01000000 -hs 03000000 > run.out
checkSuccess $?
echo "Get policy digest, should be policies/policyauthorizenv-unseal.bin intermediate"
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Policy command code - unseal"
${PREFIX}policycommandcode -ha 03000000 -cc 0000015e > run.out
checkSuccess $?
echo "Get policy digest, should be policies/policyauthorizenv-unseal.bin final"
${PREFIX}policygetdigest -ha 03000000 > run.out
checkSuccess $?
echo "Load the sealed data object"
${PREFIX}load -hp 80000000 -ipr tmppriv.bin -ipu tmppub.bin -pwdp sto > run.out
checkSuccess $?
echo "Unseal the data blob"
${PREFIX}unseal -ha 80000001 -of tmp.bin -se0 03000000 1 > run.out
checkSuccess $?
echo "Verify the unsealed result"
diff msg.bin tmp.bin > run.out
checkSuccess $?
echo "Flush the sealed object"
${PREFIX}flushcontext -ha 80000001 > run.out
checkSuccess $?
echo "Flush the policy session"
${PREFIX}flushcontext -ha 03000000 > run.out
checkSuccess $?
echo "NV Undefine Space"
${PREFIX}nvundefinespace -hi p -ha 01000000 > run.out
checkSuccess $?
# cleanup
rm -f tmppriv.bin
rm -f tmppub.bin
| true
|
cefd5ab95ac2c80e2ae98d28b4f9d790396ccf84
|
Shell
|
landtechnologies/docker-ci-images
|
/kubernetes/install_kube_iam_user.sh
|
UTF-8
| 3,216
| 3.796875
| 4
|
[] |
no_license
|
#! /bin/bash
# Installs and uses a kubectl context which authenticates with the kubernetes cluster using IAM
# Args:
# --cluster <cluster> - REQUIRED
# Will set up the user and context for the given cluster
# Otherwise will use whatever cluster is in the current context
# --aws-profile <aws_profile> - OPTIONAL
# Will always use the profile with this name (in your aws creds) for authenticating with the cluster
# Otherwise uses the current profile
# --role <role> - OPTIONAL
# The role to assume on AWS. Defaults to 'k8-admin' if not provided
# --client-authentication-api-version <client_authentication_api_version> - OPTIONAL
# The api version to set for client.authentication.k8s.io. Defaults to 'v1alpha1' if not provided
set -eo pipefail
role="k8-admin" # default but will be deprecated once cluster roles are set up
client_authentication_api_version="v1alpha1"
aws_profile_env=null
for var in "$@"; do
case "$var" in
--aws-profile)
aws_profile="$2"
;;
--cluster)
cluster="$2"
;;
--role)
role="$2"
;;
--client-authentication-api-version)
client_authentication_api_version="$2"
;;
esac
shift
done
if [ -z ${cluster+x} ]; then
echo "Cluster name (ie --cluster-name) not provided and is required"
exit 1
fi
echo "Getting cluster ca data from credtash..."
cluster_ca_data=$(credstash get "k8/ca-data/$cluster" || echo "")
if [ "$cluster_ca_data" == "" ]; then
echo "Could not get cluster ca data from credtash. Please ensure the ca data has been added to credtash under the key $credstash_key"
exit 1
fi
if [ ! -f ~/.kube/config ]; then
echo "$HOME/.kube/config not found. Generating one..."
mkdir -p ~/.kube
kubectl config view >~/.kube/config
fi
if [ -n "$aws_profile" ]; then
echo "Using defined aws profile '$aws_profile'..."
export AWS_PROFILE="$aws_profile"
aws_profile_env="
- name: AWS_PROFILE
value: $aws_profile
"
fi
aws_account_id="$(aws sts get-caller-identity --output text --query 'Account')"
if [ -z "$aws_account_id" ]; then
echo "Could not find valid AWS account ID for the provided profile (profile: \"$AWS_PROFILE\"). Please try again using valid AWS profile."
exit 1
fi
temp_user="$(mktemp)"
temp_config="$(mktemp)"
echo "
users:
- name: $cluster.iam
user:
exec:
apiVersion: client.authentication.k8s.io/$client_authentication_api_version
args:
- token
- -i
- $cluster
- -r
- arn:aws:iam::${aws_account_id}:role/$role
command: aws-iam-authenticator
env: $aws_profile_env" >"$temp_user"
echo "Merging generated config with defined config using KUBECONFIG var" # https://stackoverflow.com/a/56894036
export KUBECONFIG=~/.kube/config:$temp_user
kubectl config view --raw >"$temp_config"
mv "$temp_config" ~/.kube/config
rm -Rf "$temp_config"
unset KUBECONFIG
echo "Setting additional context values..."
kubectl config set-cluster "$cluster" --server "https://api.$cluster"
kubectl config set "clusters.$cluster.certificate-authority-data" "$cluster_ca_data"
kubectl config set-context "$cluster" --user "$cluster.iam" --cluster "$cluster"
echo "Using the newly generated context..."
kubectl config use-context "$cluster"
| true
|
c7a0df0152eeffc86e56bac04fe4004c1d0f2f40
|
Shell
|
wenxueliu/code_clips
|
/shell/network/eth_affinity
|
UTF-8
| 310
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#count.sh
#
echo "count cpu hex output"
[ $# -ne 1 ] && echo "$1 is cpu core number" && exit 1
ccn=$1
echo "Print eth0 cpu affinity"
for((i=0; i<${ccn}; i++))
do
echo "---------------------"
echo "cpu core $i is affinity"
((affinity=(1<<$i)))
echo "obase=16;${affinity}" | bc
done
| true
|
31e76725c022ac7c5864649b565f01f1ddad5d2d
|
Shell
|
jackboter13/Top-5-Bootloader-Themes
|
/run_linter.sh
|
UTF-8
| 1,371
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#/**
# * TangoMan shellcheck linter
# *
# * @license MIT
# * @author "Matthias Morin" <mat@tangoman.io>
# */
function echo_title() { echo -ne "\033[1;44;37m${*}\033[0m\n"; }
function echo_caption() { echo -ne "\033[0;1;44m${*}\033[0m\n"; }
function echo_bold() { echo -ne "\033[0;1;34m${*}\033[0m\n"; }
function echo_danger() { echo -ne "\033[0;31m${*}\033[0m\n"; }
function echo_success() { echo -ne "\033[0;32m${*}\033[0m\n"; }
function echo_warning() { echo -ne "\033[0;33m${*}\033[0m\n"; }
function echo_secondary() { echo -ne "\033[0;34m${*}\033[0m\n"; }
function echo_info() { echo -ne "\033[0;35m${*}\033[0m\n"; }
function echo_primary() { echo -ne "\033[0;36m${*}\033[0m\n"; }
function echo_error() { echo -ne "\033[0;1;31merror:\033[0;31m\t${*}\033[0m\n"; }
function echo_label() { echo -ne "\033[0;1;32m${*}:\033[0m\t"; }
function echo_prompt() { echo -ne "\033[0;36m${*}\033[0m "; }
clear
echo_title ' ############################## '
echo_title ' # TangoMan Shellcheck Linter # '
echo_title ' ############################## '
echo
if [ ! -x "$(command -v shellcheck)" ]; then
echo_error "\"$(basename "${0}")\" requires shellcheck, try: 'sudo apt-get install -y shellcheck'"
exit 1
fi
echo_info "find . -name '*.sh' | sort -t '\0' -n | xargs shellcheck"
find . -name '*.sh' | sort -t '\0' -n | xargs shellcheck
| true
|
78827b0d5a086f99482e42c0dd0ffe5079fa9a86
|
Shell
|
KaOSx/apps
|
/konsole6/PKGBUILD
|
UTF-8
| 1,274
| 2.75
| 3
|
[] |
no_license
|
# include global config
source ../_buildscripts/${current_repo}-${_arch}-cfg.conf
pkgname=konsole6
_pkgname=konsole
pkgver=${_kdever}
pkgrel=1
pkgdesc="KDE's terminal emulator"
arch=('x86_64')
url='https://konsole.kde.org/'
license=('LGPL')
depends=('kbookmarks6' 'kconfig6' 'kconfigwidgets6' 'kcoreaddons6' 'kcrash6'
'kglobalaccel6' 'kguiaddons6' 'kdbusaddons6' 'ki18n6' 'kiconthemes6' 'kio6'
'knewstuff6' 'knotifications6' 'knotifyconfig6' 'kparts6' 'kpty6' 'kservice6'
'ktextwidgets6' 'kwidgetsaddons6' 'kwindowsystem6' 'kxmlgui6' 'icu')
makedepends=('extra-cmake-modules6' 'kdoctools6')
#source=("${_mirror}/${_pkgname}-${pkgver}.tar.xz"
source=("git+https://invent.kde.org/utilities/${_pkgname}.git")
#md5sums=(`grep ${_pkgname}-${_kdever}.tar.xz ../kde-sc.md5 | cut -d" " -f1`
md5sums=('SKIP')
prepare() {
cd ${_pkgname}
#patch -p1 -i ${srcdir}/0decf4e30acdee6324197b0499cb80e4bc7f835a.diff
#sed -e 's|EQUAL "5"|EQUAL "6"|g' -i desktop/CMakeLists.txt
}
build() {
cmake -B build -S ${_pkgname} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DKDE_INSTALL_LIBDIR=lib \
-DBUILD_TESTING=OFF \
-DQT_MAJOR_VERSION=6
cmake --build build
}
package() {
DESTDIR=${pkgdir} cmake --install build
}
| true
|
d2f06db5196c146e67189e5bd30dcde34e2ccd51
|
Shell
|
giri101995/SSN_TNPOLICE_ASSIGNMENT_DAY1
|
/assignment3.sh
|
UTF-8
| 618
| 3.578125
| 4
|
[] |
no_license
|
echo "Choose any one option"
select var in {"List the records","Search for an employee","Delete an employee","Quit"}
do
case $var in
"List the records")
cat database.txt
;;
"Search for an employee")
echo "Enter the name of the Employee"
read emp
grep -h $emp database.txt
;;
"Delete an employee")
echo "Enter the name of employee to be deleted from the database"
read delemp
grep -v $delemp database.txt > newdatabase.txt
cp newdatabase.txt database.txt
echo "Employee named " $delemp "has been deleted successfully"
cat database.txt
;;
"Quit")
break;
;;
esac
done
| true
|
234ff89457d7f6e5ddf14ef1c61c6353c3910d08
|
Shell
|
YueErro/ModernRobotics
|
/CoppeliaSim_Edu_V4_1_0_Ubuntu18_04/programming/bluezero/include/spotify-json/.travis/install_linux.sh
|
UTF-8
| 1,366
| 3.75
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"JSON",
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
DEPS_DIR="${TRAVIS_BUILD_DIR}/deps"
mkdir -p ${DEPS_DIR} && cd ${DEPS_DIR}
function install_boost {
BOOST_LIBRARIES="chrono,system,test"
BOOST_VERSION="1.62.0"
BOOST_URL="https://sourceforge.net/projects/boost/files/boost/${BOOST_VERSION}/boost_${BOOST_VERSION//\./_}.tar.gz"
BOOST_DIR="${DEPS_DIR}/boost"
echo "Downloading Boost ${BOOST_VERSION} from ${BOOST_URL}"
mkdir -p ${BOOST_DIR} && cd ${BOOST_DIR}
wget -O - ${BOOST_URL} | tar --strip-components=1 -xz -C ${BOOST_DIR} || exit 1
./bootstrap.sh --with-libraries=${BOOST_LIBRARIES} && ./b2
export BOOST_ROOT=${BOOST_DIR}
}
function install_cmake {
CMAKE_VERSION="3.6.2"
CMAKE_URL="https://cmake.org/files/v3.6/cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz"
CMAKE_DIR="${DEPS_DIR}/cmake"
CMAKE_BIN="${CMAKE_DIR}/bin"
echo "Downloading CMake ${CMAKE_VERSION} from ${CMAKE_URL}"
mkdir -p ${CMAKE_DIR}
wget --no-check-certificate -O - ${CMAKE_URL} | tar --strip-components=1 -xz -C ${CMAKE_DIR} || exit 1
export PATH=${CMAKE_BIN}:${PATH}
}
function install_valgrind {
sudo apt-get update -qq
sudo apt-get install -qq valgrind
}
install_boost # at least version 1.60
install_cmake # at least version 3.2
install_valgrind # at least version 3.7
echo "Installed build dependecies."
echo " - Boost: ${BOOST_ROOT}"
echo " - CMake: ${CMAKE_BIN}"
echo " - Valgrind"
| true
|
5ee750ba9b9ded18fddc3d8467cf5cfed2634108
|
Shell
|
norcams/himlar
|
/profile/templates/application/builder/windows_build_script.erb
|
UTF-8
| 1,056
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ssh -tt -o "StrictHostKeyChecking no" -i ~/.ssh/windowsbuilder_rsa windowsbuilder@<%= @buildhost %> << 'ENDSSH'
#!/bin/bash
cd /var/lib/libvirt/images/windows_builder/packer-windows
# Check for already running process
if ! ps axu | grep windows-<%= @version %> | grep -v grep; then
echo "We are not already building"
else
echo "We are already building"
exit 1
fi
# If exist, remove build files
if test -d ./builds/packer-windows-<%= @version %>-x86_64-qemu; then
echo "Build file exists - removing..."
rm -rf ./builds/packer-windows-<%= @version %>-x86_64-qemu
fi
# Build the Windows image
PACKER_CACHE_DIR="./packer_cache" PACKER_LOG=1 /usr/bin/packer init -upgrade
PACKER_CACHE_DIR="./packer_cache" PACKER_LOG=1 /usr/bin/packer build --only=qemu.vm -var-file=os_pkrvars/windows/windows-<%= @version %>-x86_64.pkrvars.hcl ./packer_templates/
result=$?
if test $result -eq 0
then
echo "Windows image built successfully."
else
echo "Windows build failed with status $result"
exit $result
fi
exit
ENDSSH
| true
|
588e80a9792be662b15f9e17f9a7ab0694277e1c
|
Shell
|
ishanjoshi02/Operating-Systems
|
/# 1/Sample Scripts/p6.sh
|
UTF-8
| 229
| 3.3125
| 3
|
[] |
no_license
|
echo "Enter file name to check permission"
read file1
if test -r $file1
then
echo "Read permission is set"
fi
if test -w $file1
then
echo "Write permission is set"
fi
if test -x $file1
then
echo "Execute permission is set"
fi
| true
|
2d545f025f8da83fc8f70bc8e59460fcd31095da
|
Shell
|
aerisweather/sensu-web
|
/build-app
|
UTF-8
| 406
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
script_dir=$(dirname "$0")
cd "$script_dir" || exit 1
if [[ "$IN_NODE_DOCKER" != '1' ]]; then
git_rev=$(git rev-parse --short HEAD)
docker run \
--user "$(id -u)" \
-e IN_NODE_DOCKER=1 \
-v "$PWD:/app" \
--rm -it \
--entrypoint /app/build-app node:16.0.0
docker build -t "aerisweather/sensu-web:$git_rev" .
else
rm -rf build/
yarn run build --mode production
fi
| true
|
20c58260bba98e3c11971c0699b440bdb39707fe
|
Shell
|
mastersign/mastersign-datascience
|
/auto/doc-build.sh
|
UTF-8
| 892
| 4.03125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Bash script for building the HTML output of the Sphinx documentation
cd "$(dirname $0)/../doc"
source_dir=source
build_dir=build
function assert_python_cli() {
command="$1"
package="$2"
title="$3"
url="$4"
if ! which $1 >/dev/null 2>&1; then
echo "The command '$command' was not found in PATH."
echo ""
echo "Install $title with:"
echo ""
echo "pip3 install --user $package"
echo ""
echo "Or grab it from $url"
exit 1
fi
}
assert_python_cli sphinx-build sphinx Sphinx http://sphinx-doc.org/
if [ "$1" == "" ]; then
format="html"
else
format="$1"
fi
if ! [ -d "$source_dir/_static" ]; then mkdir "$source_dir/_static"; fi
if ! [ -d "$source_dir/_templates" ]; then mkdir "$source_dir/_templates"; fi
exec sphinx-build -M $format "$source_dir" "$build_dir" $SPHINXOPTS
| true
|
84e4708673a18bc8820bf784629779805cbf1396
|
Shell
|
silky/bramble
|
/doc/drawdata.sh
|
UTF-8
| 1,181
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MODEL="src/Bramble/RAML.hs"
echo 'digraph {'
data="$(grep '^data' $MODEL | sed 's/data //; s/ .*//')"
data="$data $(grep '^newtype' $MODEL | grep -v 'newtype Schema' | sed 's/newtype //; s/ .*//')"
for i in $data
do
echo "$i;"
targets="$(grep "data $i .*=" $MODEL -A 6 \
| sed '/^$/,$d' \
| grep '::' \
| sed 's/.*:: //; s/Maybe //; s/J.Value//; s/T.Text//; s/String//; s/ .*//; s/\[//g; s/\]//g')"
targets="$targets $(grep "newtype $i .*=" $MODEL -A 6 \
| sed '/^$/,$d' \
| grep '::' \
| sed 's/.*:: //; s/Maybe //; s/J.Value//; s/T.Text//; s/String//; s/ .*//; s/\[//g; s/\]//g')"
for j in $targets
do
echo "$i -> $j;"
done
done
types="$(grep '^type' $MODEL | grep -v '^type Lookup' | sed 's/type //; s/ .*//')"
for i in $types
do
echo "$i;"
targets="$(grep "type $i .*=" $MODEL \
| sed 's/^.*= //' \
| sed 's/[()]//g' \
| sed 's/Maybe//g' \
| sed 's/[[:<:]]M.Map[[:>:]]/ /g' \
| sed 's/[[:<:]]J.Value[[:>:]]/ /g' \
| sed 's/[[:<:]]T.Text[[:>:]]/ /g' \
| sed 's/[[:<:]]String[[:>:]]/ /g' \
| sed 's/[[:<:]]Int[[:>:]]/ /g' \
| sed 's/[[:<:]]Lookup[[:>:]]/ /g')"
for j in $targets
do
echo "$i -> $j;"
done
done
echo '}'
| true
|
ffff77420b99fa992eb84032523276f3c12b22bb
|
Shell
|
lifepatch/website-backup-script
|
/website_backup.sh
|
UTF-8
| 1,132
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# user, password, gzip file out
function backup_mysql(){
param=`mysqldump --host=127.0.0.1 -A --user="${1}" --password="${2}" --all-databases | gzip > "${3}"`
echo ${param}
#echo "mysqldump --host=127.0.0.1 -A --user=${bak_mysql_user} --password=${bak_mysql_pass} --all-databases | gzip > $bak_dir/$bak_mysql_file"
}
function backup_files()
{
echo "tar -czvf '${1}' '${2}'"
#echo "tar -czvf $bak_dir/$bak_file $bak_home/$bak_target"
}
function backup_sites()
{
for element in "${sites[@]}"
do
IFS='| ' read -a array <<< "$element"
echo "backup: ${array[0]}"
source_folder=${array[0]}
source_sql_user=${array[1]}
source_sql_pass=${array[2]}
bak_target_clean=$(echo "${source_folder}" | sed 's#/#\_#g')
bak_mysql_file=${bak_target_clean}_sql_$(date +%Y%m%d).sql.gz
bak_file=${bak_target_clean}_$(date +%Y%m%d).tar.gz
backup_mysql $source_sql_user $source_sql_pass $bak_dir/$bak_mysql_file
backup_files $bak_dir/$bak_file $bak_home/$source_folder
echo "backup for $bak_target_clean"
echo "sql: $bak_url/$bak_mysql_file"
echo "files: $bak_url/$bak_file"
echo ""
done
}
| true
|
ca0222a8b68771d7f118b7c856fdbd57020fb9f1
|
Shell
|
Maryam81609/FMKe
|
/runBenchLocal.sh
|
UTF-8
| 2,566
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd -P`
popd > /dev/null
ANTIDOTE_FOLDER=""
if [ "$#" -eq 0 ]; then
# Assume that Antidote is already running
if nc -z localhost 8087; then
echo "Antidote is running"
else
echo "Antidote is not running on PB port 8087"
echo "Start Antidote manually, or start the script with a different option."
exit 1
fi
elif [ $1 = "docker" ]; then
# load antidote from docker:
if docker inspect antidote; then
# start existing docker container:
docker start antidote
else
# setup new antidote docker container:
docker run -d --name antidote -p "4368:4368" -p "8085:8085" -p "8087:8087" -p "8099:8099" -p "9100:9100" -e NODE_NAME=antidote@127.0.0.1 peterzel/antidote_map_rr
fi
elif [ $1 = "github" ]; then
# clone antidote from github
ANTIDOTE_FOLDER=_build/antidote
if cd $ANTIDOTE_FOLDER; then
echo "Using antidote clone in $ANTIDOTE_FOLDER"
# already cloned
else
git clone https://github.com/SyncFree/antidote $ANTIDOTE_FOLDER
cd $ANTIDOTE_FOLDER
fi
# use fixed branch
git checkout crdt-lib-map_rr
cd $SCRIPTPATH
else
# use provided path to antidote
ANTIDOTE_FOLDER=$1
fi
if [ -n "$ANTIDOTE_FOLDER" ]; then
cd $ANTIDOTE_FOLDER
# clean last release
rm -rf _build/default/rel/
make rel
_build/default/rel/antidote/bin/env start
fi
cd $SCRIPTPATH
# compile FMK:
echo "Compiling FMK"
make all
# Start FMK:
echo "Starting FMK"
_build/default/rel/fmk/bin/env start
# wait for FMK to start (TODO better way?)
echo "Waiting for FMK to start"
sleep 2
# Fill database with testdata:
echo "Filling Antidote with testdata"
make populate || true
# Start benchmark
echo "Starting Benchmark"
make bench
echo "Benchmark done"
# Stop FMK
echo "Stopping FMK"
_build/default/rel/fmk/bin/env stop
if [ -n "$ANTIDOTE_FOLDER" ]; then
# Stop Antidote
echo "Stopping Antidote"
cd $ANTIDOTE_FOLDER
_build/default/rel/antidote/bin/env stop
cd $SCRIPTPATH
fi
# Display the summary
cat tests/current/summary.csv
ERRORS=`awk -F "\"*, \"*" '{print $5}' tests/current/summary.csv | sed '2q;d'`
SUCCESS=`awk -F "\"*, \"*" '{print $4}' tests/current/summary.csv | sed '2q;d'`
echo "in the first 10 seconds $SUCCESS successful requests were done and $ERRORS failed."
if [ "$ERRORS" -gt 100 ]; then
echo "too many errors"
exit 1
fi
if [ "$SUCCESS" -lt 500 ]; then
echo "not enough successful requests"
exit 1
fi
| true
|
0fb2034b8310ef9649f1ec608dbd0958bd3dc485
|
Shell
|
nathan8299/OSX_MediaCenter_MountainLion
|
/scripts/install_tmux.sh
|
UTF-8
| 1,046
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "#------------------------------------------------------------------------------"
echo "# Installing Tmux"
echo "#------------------------------------------------------------------------------"
brew install tmux
[ -d ~/.tmux/conf ] || mkdir -p ~/.tmux/conf
[ -d ~/.tmux/tmthemes ] || mkdir -p ~/.tmux/tmthemes
[ -d ~/.tmux/segment ] || mkdir -p ~/.tmux/segment
[ -f ~/.tmux/conf/tmux_bash.conf ] || cp -v conf/tmux/tmux_bash.conf ~/.tmux/conf/
[ -f ~/.tmux/conf/tmux.bindings.conf ] || cp -v conf/tmux/tmux.bindings.conf ~/.tmux/conf/
[ -f ~/.tmux/conf/tmux.mouse.conf ] || cp -v conf/tmux/tmux.mouse.conf ~/.tmux/conf/
#[ -f ~/.tmux/conf/tmux.powerline.conf ] || cp -v conf/tmux/tmux.powerline.conf ~/.tmux/conf/
#[ -f ~/.tmux/conf/tmux_powerline.conf ] || cp -v conf/tmux/tmux_powerline.conf ~/.tmux/conf/
echo "#------------------------------------------------------------------------------"
echo "# Install Tmux - Complete"
echo "#------------------------------------------------------------------------------"
| true
|
2fb55f82459fa4b2f44facf23c0b084990409810
|
Shell
|
caqg/linux-home
|
/cmd/cprod
|
UTF-8
| 682
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
# cartesian product of two relations presented as files
usage="usage: `basename $0` [-d <delimiter>] file1 file2"
set -- `getopt d: $*` || {
echo >&2 "$usage"
exit 1
}
delimiter=' ' #tab is default
for arg
do case $arg in
-d) delimiter="$2"; shift 2 ;;
--) shift; break ;;
esac
done
case $# in
2) ;;
*) echo >&2 "$usage"
exit 1
esac
## Now do the product "by hand" with the obvious quadratic algorithm
## Potential for optimization: The first file is opened only once,
## while the second is opened repeatedly. Is there a point in
## choosing among the two possible orders?
while read r1
do while read r2
do echo "$r1$delimiter$r2"
done < $2
done < $1
| true
|
6b4b0d32b90c0d294bda0b762c740772f6883aea
|
Shell
|
ojaswee/dnaQ
|
/summary_report_creator/01_run_from_server_terminal.sh
|
UTF-8
| 1,370
| 2.59375
| 3
|
[] |
no_license
|
# first loginto the server
sshpass -p 'pvamudna1!Q' ssh odhungana@129.207.46.222
# summary report creator to run for server terminal
# make dir in server
mkdir dnaq
cd dnaq
mkdir 01_data
mkdir 03_summary_result
# create bigfile
for i in {1..15}
do
cat home/odhungana/dnaq/01_data/cosmic.filter.vcf >> home/odhungana/dnaq/01_data/02_files_for_summary_report/bigfile.txt
echo $i
done
# all g1000 file
time python3 /home/odhungana/dnaq/02_summary_report/03_2_naive_method.py /home/odhungana/dnaq/01_data/g1000.filter.vcf /home/odhungana/dnaq/01_data/g1000.filter.vcf /home/odhungana/dnaq/01_data/g1000.filter.vcf
time python3 /home/odhungana/dnaq/02_summary_report/04_threading.py /home/odhungana/dnaq/01_data/g1000.filter.vcf /home/odhungana/dnaq/01_data/g1000.filter.vcf /home/odhungana/dnaq/01_data/g1000.filter.vcf
# multiprocessing from input file
time python3 /home/odhungana/dnaq/02_summary_report/07_multiporcessing_newfunc.py /home/odhungana/dnaq/01_data/cosmic.filter.vcf /home/odhungana/dnaq/summary_report_creator/input_files.txt
# to check if we can create a folder in server
bash /home/odhungana/dnaq/04_pipeline_scripts/01_user_dir_creator.sh -d20190703105347 -u15 -t1 -r2
#to check for parser and merger ---------------------------------------error mariadb cannot connect remotly
bash /home/odhungana/dnaq/04_pipeline_scripts/02_check_queue.sh
| true
|
373c5a3ec0570187110ee203d38f30ebdfbd9e68
|
Shell
|
zgulde/ssl-helper
|
/ssl-in-dev.sh
|
UTF-8
| 2,064
| 4.125
| 4
|
[] |
no_license
|
# this script will 'fake' ssl for a development machine using a self-signed
# certificate.
# This should be run on the development server you want to setup
# this script was built to work with the codeup vagrant box setup,
# but the general principles will apply to any setup
# usage
# bash ssl-in-dev.sh codeup.dev
# make sure the site is passed on the command line
if [ -z "$1" ]; then
echo 'Please pass the name of the site you would like to add a ssl '
echo 'certificate to when invoking the script.'
exit 1
fi
site=$1
# make sure a configuration file for that site exists
if [[ ! -e /etc/nginx/sites-available/$site ]]; then
echo 'That site does not exist, please give a valid site'
exit 1
fi
# check if the server is already listening on 443 and bail out if it already is
if grep 'listen 443 ssl' /etc/nginx/sites-available/$site > /dev/null; then
echo "$site already has ssl setup."
exit 1
fi
# create a folder for ssl certs if its not there already
sudo mkdir -p /etc/nginx/ssl/$site
# create a private key
sudo openssl genrsa -out /etc/nginx/ssl/$site/$site.key > /dev/null 2>&1
# generate csr without being prompted for input
sudo openssl req -new \
-key /etc/nginx/ssl/$site/$site.key \
-out /etc/nginx/ssl/$site/$site.csr \
-subj "/C=US/ST=Texas/L=SA/O=Codeup/OU=IT Department/CN=$site" > /dev/null 2>&1
# sign the certificate
sudo openssl x509 -req -days 365 \
-in /etc/nginx/ssl/$site/$site.csr \
-signkey /etc/nginx/ssl/$site/$site.key \
-out /etc/nginx/ssl/$site/$site.crt > /dev/null 2>&1
# modify the nginx config
# append the relevent ssl directives after the server name
cat /etc/nginx/sites-available/$site |\
perl -pe 's/server_name '$site';/$&
listen 443 ssl;
ssl_certificate \/etc\/nginx\/ssl\/'$site\\/$site'.crt;
ssl_certificate_key \/etc\/nginx\/ssl\/'$site\\/$site'.key;/' |\
sudo tee /etc/nginx/sites-available/$site > /dev/null
# for some reason a restart wasn't doing it, so we'll be explicit here
sudo service nginx stop
sudo service nginx start
| true
|
e859126bf857c0822f90edcc9b22263bd71ce880
|
Shell
|
rshipp/pkgbuilds
|
/pigz/PKGBUILD
|
UTF-8
| 855
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: george <rpubaddr0 {at} gmail [dot] com>
# Contributor: Frank Thieme <frank@fthieme.net>
# Contributor: Laszlo Papp <djszapi2@gmail.com>
pkgname=pigz
pkgver=2.3
pkgrel=1
pkgdesc='Parallel implementation of the gzip file compressor.'
arch=('i686' 'x86_64')
url='http://www.zlib.net/pigz/'
license=('GPL')
depends=('zlib')
source=("http://www.zlib.net/${pkgname}/${pkgname}-${pkgver}.tar.gz")
md5sums=('042e3322534f2c3d761736350cac303f')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
sed -i s/"CFLAGS=".*/"CFLAGS=${CFLAGS}"/g Makefile
sed -i 's/$(CC)\(.*\)$/$(CC)\1 -lm/g' Makefile
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
install -Dm755 pigz "${pkgdir}/usr/bin/pigz"
ln -s /usr/bin/pigz "${pkgdir}/usr/bin/unpigz"
install -Dm644 pigz.1 "${pkgdir}/usr/share/man/man1/pigz.1"
install -Dm644 pigz.pdf "${pkgdir}/usr/share/doc/pigz/pigz.pdf"
}
| true
|
a4b61f15c4666eaa32db46cdb4f49e9085a09371
|
Shell
|
petronny/aur3-mirror
|
/aqualung-alsa/PKGBUILD
|
UTF-8
| 1,499
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Robert McCathie <archaur at rmcc dot com dot au>
# Contributor: leepesjee <lpeschier at xs4all dot nl>
# Contributor: fancris3 <fancris3 at aol dot com>
_name=aqualung
pkgname="$_name-alsa"
pkgver=0.9beta11
pkgrel=2
pkgdesc="Gapless music player compiled with only ALSA support. Jack, Pulse and OSS are disabled."
arch=('i686' 'x86_64')
url="http://aqualung.factorial.hu/"
license=('GPL')
provides=('aqualung')
conflicts=('aqualung' 'aqualung-svn')
depends=('gtk2' 'libxml2' 'liblrdf' 'libcdio' 'libcddb' 'libsamplerate' 'lua>=5.1.0' \
'libsndfile' 'flac' 'libvorbis' 'liboggz' 'speex' 'libmad' 'lame' 'libmodplug' \
'libmpcdec' 'mac' 'wavpack' 'ffmpeg' 'alsa-lib')
makedepends=('gettext')
source=("http://downloads.sourceforge.net/aqualung/$_name-$pkgver.tar.gz"
'aqualung.desktop')
md5sums=('cfc470e0738e6edf9532af5f92aac959'
'3c62be5733541593fa13e0e799426ddc')
build() {
cd "$srcdir/$_name-$pkgver"
# remove annoying "http://" preset at 'Add URL'
sed -i 's|(url_entry), "http://"|(url_entry), ""|' "$srcdir/$_name-$pkgver/src/playlist.c"
./autogen.sh
./configure --prefix=/usr \
--with-ifp=no \
--with-pulse=no \
--with-oss=no \
--with-jack=no
make
make DESTDIR="$pkgdir" install
install -D -m644 "$srcdir/$_name-$pkgver/src/img/icon_64.png" "$pkgdir/usr/share/pixmaps/aqualung.png"
install -D -m644 "$startdir/aqualung.desktop" "$pkgdir/usr/share/applications/aqualung.desktop"
}
| true
|
cce142df8aa41215c0382c104b9af6f9971efcef
|
Shell
|
Lky/laura
|
/rtorrent-svn/PKGBUILD
|
UTF-8
| 970
| 2.796875
| 3
|
[] |
no_license
|
# Maintainer: Lucky <aur.archlinux.org [at] lucky.take0ver [dot] net>
# Contributor: Jonny Gerold <jonny@fsk141.com>
pkgname=rtorrent-svn
_pkgname=rtorrent
pkgver=1164
pkgrel=1
pkgdesc="Ncurses BitTorrent client based on libTorrent"
url="http://libtorrent.rakshasa.no"
arch=('i686' 'x86_64')
license=('GPL')
depends=('libtorrent-svn>=1153' 'curl' 'xmlrpc-c-svn>=1894')
makedepends=('subversion' 'cppunit')
conflicts=('rtorrent')
provides=('rtorrent')
install=${_pkgname}.install
source=()
md5sums=()
_svnmod="${_pkgname}"
_svntrunk="svn://rakshasa.no/libtorrent/trunk/${_svnmod}"
build() {
cd "${srcdir}"
msg "Connecting to ${_svnmod} SVN server..."
svn co ${_svntrunk} ${_svnmod} -r ${pkgver}
msg "SVN checkout done or server timeout"
cd "${_svnmod}"
msg "Starting make..."
./autogen.sh
CXXFLAGS="${CXXFLAGS} -fno-strict-aliasing" \
./configure --prefix=/usr --disable-debug --with-xmlrpc-c || return 1
make || return 1
make DESTDIR="${pkgdir}" install
}
| true
|
ddb1b193edc58e3a252bf41e9c68d0033caf7ef9
|
Shell
|
lulalachen/boilerplates
|
/run-tests.sh
|
UTF-8
| 656
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
STATUS=0
echo "======================================================="
echo " [Test] [es6-node-server] [Processing] 🚀"
echo "======================================================="
cd es6-node-server
npm install -s -q
npm run test:coverage
STATUS=$(( $STATUS+$? ))
echo "======================================================="
echo " [Test] [npm-packages] [Processing] 🚀"
echo "======================================================="
cd ../npm-packages
npm install -s -q
npm run test:coverage
STATUS=$(( $STATUS+$? ))
if [[ $STATUS -eq 0 ]]; then
echo "Test Success"
exit 0
else
echo "Test Failed"
exit 1
fi
| true
|
4042734b22ec3e0b9e21bcb7526bd740198b6bb3
|
Shell
|
knative/serving
|
/vendor/knative.dev/hack/presubmit-tests.sh
|
UTF-8
| 12,816
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative presubmit test scripts.
# See README.md for instructions on how to use it.
source $(dirname "${BASH_SOURCE[0]}")/library.sh
# Custom configuration of presubmit tests
readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0}
# Extensions or file patterns that don't require presubmit tests.
readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS \.github/.*)
# Flag if this is a presubmit run or not.
(( IS_PROW )) && [[ ${JOB_TYPE} == "presubmit" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0
readonly IS_PRESUBMIT
# List of changed files on presubmit, LF separated.
CHANGED_FILES=""
# Flags that this PR is exempt of presubmit tests.
IS_PRESUBMIT_EXEMPT_PR=0
# Flags that this PR contains only changes to documentation.
IS_DOCUMENTATION_PR=0
# Returns true if PR only contains the given file regexes.
# Parameters: $1 - file regexes, space separated.
function pr_only_contains() {
[[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]]
}
# Initialize flags and context for presubmit tests:
# CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR.
function initialize_environment() {
CHANGED_FILES=""
IS_PRESUBMIT_EXEMPT_PR=0
IS_DOCUMENTATION_PR=0
(( ! IS_PRESUBMIT )) && return
CHANGED_FILES="$(list_changed_files)"
if [[ -n "${CHANGED_FILES}" ]]; then
echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}"
local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}"
pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1
# A documentation PR must contain markdown files
if pr_only_contains "\.md ${no_presubmit_files}"; then
[[ -n "$(echo "${CHANGED_FILES}" | grep '\.md')" ]] && IS_DOCUMENTATION_PR=1
fi
else
header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY"
fi
readonly CHANGED_FILES
readonly IS_DOCUMENTATION_PR
readonly IS_PRESUBMIT_EXEMPT_PR
}
# Display a pass/fail banner for a test group.
# Parameters: $1 - test group name (e.g., build)
# $2 - result (0=passed, 1=failed)
function results_banner() {
local result
[[ $2 -eq 0 ]] && result="PASSED" || result="FAILED"
header "$1 tests ${result}"
}
# Run build tests. If there's no `build_tests` function, run the default
# build test runner.
function run_build_tests() {
(( ! RUN_BUILD_TESTS )) && return 0
header "Running build tests"
local failed=0
# Run pre-build tests, if any
if function_exists pre_build_tests; then
pre_build_tests || { failed=1; step_failed "pre_build_tests"; }
fi
# Don't run build tests if pre-build tests failed
if (( ! failed )); then
if function_exists build_tests; then
build_tests || { failed=1; step_failed "build_tests"; }
else
default_build_test_runner || { failed=1; step_failed "default_build_test_runner"; }
fi
fi
# Don't run post-build tests if pre/build tests failed
if (( ! failed )) && function_exists post_build_tests; then
post_build_tests || { failed=1; step_failed "post_build_tests"; }
fi
results_banner "Build" ${failed}
return ${failed}
}
# Run a build test and report its output as the failure if it fails.
# Parameters: $1 - report name.
# $2... - command (test) to run.
function report_build_test() {
local report
report="$(mktemp)"
local report_name="$1"
shift
local errors=""
capture_output "${report}" "$@" || errors="$(cat "${report}")"
create_junit_xml _build_tests "${report_name}" "${errors}"
[[ -z "${errors}" ]]
}
# Default build test runner that:
# * run `/hack/verify-codegen.sh` (if it exists)
# * `go build` on the entire repo
# * check licenses in all go packages
function default_build_test_runner() {
foreach_go_module __build_test_runner_for_module
}
function __build_test_runner_for_module() {
local failed=0
subheader "Build tests for $(go_mod_module_name)"
# Run verify-codegen check
if [[ -f ./hack/verify-codegen.sh ]]; then
subheader "Checking autogenerated code is up-to-date"
report_build_test Verify_CodeGen ./hack/verify-codegen.sh || failed=1
fi
# For documentation PRs, just check the md files and run
# verify-codegen (as md files can be auto-generated in some repos).
(( IS_DOCUMENTATION_PR )) && return ${failed}
# Don't merge these two lines, or return code will always be 0.
# Get all build tags in go code (ignore /vendor, /hack and /third_party)
local tags
tags="$(go run knative.dev/toolbox/go-ls-tags@latest --joiner=,)"
local go_pkg_dirs
go_pkg_dirs="$(go list -tags "${tags}" ./...)" || return $?
if [[ -z "${go_pkg_dirs}" ]]; then
subheader "No golang code found, skipping build tests"
return 0
fi
# Ensure all the code builds
subheader "Checking that go code builds"
report_build_test Build_Go \
go test -vet=off -tags "${tags}" -exec echo ./... || failed=2
# Check that we don't have any forbidden licenses in our images.
subheader "Checking for forbidden licenses"
report_build_test Check_Licenses check_licenses || failed=3
return ${failed}
}
# Run unit tests. If there's no `unit_tests` function, run the default
# unit test runner.
function run_unit_tests() {
(( ! RUN_UNIT_TESTS )) && return 0
if (( IS_DOCUMENTATION_PR )); then
header "Documentation only PR, skipping unit tests"
return 0
fi
header "Running unit tests"
local failed=0
# Run pre-unit tests, if any
if function_exists pre_unit_tests; then
pre_unit_tests || { failed=1; step_failed "pre_unit_tests"; }
fi
# Don't run unit tests if pre-unit tests failed
if (( ! failed )); then
if function_exists unit_tests; then
unit_tests || { failed=1; step_failed "unit_tests"; }
else
default_unit_test_runner || { failed=1; step_failed "default_unit_test_runner"; }
fi
fi
# Don't run post-unit tests if pre/unit tests failed
if (( ! failed )) && function_exists post_unit_tests; then
post_unit_tests || { failed=1; step_failed "post_unit_tests"; }
fi
results_banner "Unit" ${failed}
return ${failed}
}
# Default unit test runner that runs all go tests in the repo.
function default_unit_test_runner() {
foreach_go_module __unit_test_runner_for_module
}
function __unit_test_runner_for_module() {
subheader "Unit tests for $(go_mod_module_name)"
report_go_test -short -race -count 1 ./...
}
# Run integration tests. If there's no `integration_tests` function, run the
# default integration test runner.
function run_integration_tests() {
# Don't run integration tests if not requested OR on documentation PRs
(( ! RUN_INTEGRATION_TESTS )) && return 0
if (( IS_DOCUMENTATION_PR )); then
header "Documentation only PR, skipping integration tests"
return 0
fi
header "Running integration tests"
local failed=0
# Run pre-integration tests, if any
if function_exists pre_integration_tests; then
pre_integration_tests || { failed=1; step_failed "pre_integration_tests"; }
fi
# Don't run integration tests if pre-integration tests failed
if (( ! failed )); then
if function_exists integration_tests; then
integration_tests || { failed=1; step_failed "integration_tests"; }
else
default_integration_test_runner || { failed=1; step_failed "default_integration_test_runner"; }
fi
fi
# Don't run integration tests if pre/integration tests failed
if (( ! failed )) && function_exists post_integration_tests; then
post_integration_tests || { failed=1; step_failed "post_integration_tests"; }
fi
results_banner "Integration" ${failed}
return ${failed}
}
# Default integration test runner that runs all `test/e2e-*tests.sh`.
function default_integration_test_runner() {
local failed=0
while read -r e2e_test; do
subheader "Running integration test ${e2e_test}"
"${e2e_test}" || failed=$?
if (( failed )); then
echo "${e2e_test} failed: $failed" >&2
return $failed
fi
done < <(find test/ -maxdepth 1 ! -name "$(printf "*\n*")" -name "e2e-*tests.sh")
return ${failed}
}
# Options set by command-line flags.
RUN_BUILD_TESTS=0
RUN_UNIT_TESTS=0
RUN_INTEGRATION_TESTS=0
# Process flags and run tests accordingly.
function main() {
initialize_environment
if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then
header "Commit only contains changes that don't require tests, skipping"
exit 0
fi
# Show the version of the tools we're using
if (( IS_PROW )); then
# Disable gcloud update notifications
gcloud config set component_manager/disable_update_check true
header "Current test setup"
echo ">> gcloud SDK version"
gcloud version
echo ">> kubectl version"
kubectl version --client
echo ">> go version"
go version
echo ">> go env"
go env
echo ">> python3 version"
python3 --version
echo ">> git version"
git version
echo ">> ko version"
[[ -f /ko_version ]] && cat /ko_version || echo "unknown"
if [[ "${DOCKER_IN_DOCKER_ENABLED:-}" == "true" ]]; then
echo ">> docker version"
docker version
fi
if type java > /dev/null; then
echo ">> java version"
java -version
echo "JAVA_HOME: ${JAVA_HOME:-}"
fi
if command -v mvn > /dev/null; then
echo ">> maven version"
mvn --version
fi
if command -v cosign > /dev/null; then
echo ">> cosign version"
cosign version
fi
echo ">> prow-tests image version"
[[ -f /commit_hash ]] && echo "Prow test image was built from $(cat /commit_hash) commit which is viewable at https://github.com/knative/test-infra/tree/$(cat /commit_hash) " || echo "unknown"
fi
[[ -z ${1:-} ]] && set -- "--all-tests"
local TESTS_TO_RUN=()
while [[ $# -ne 0 ]]; do
local parameter=$1
case ${parameter} in
--help|-h)
echo "Usage: ./presubmit-tests.sh [options...]"
echo " --build-tests: run build tests."
echo " --unit-tests: run unit tests."
echo " --integration-tests: run integration tests, basically all the e2e-*tests.sh."
echo " --all-tests: run build tests, unit tests and integration tests in sequence."
echo " --run-test: run custom tests. Can be used to run multiple tests that need different args."
echo " For example, ./presubmit-tests.sh --run-test \"e2e-tests1.sh arg1\" \"e2e-tests2.sh arg2\"."
;;
--build-tests) RUN_BUILD_TESTS=1 ;;
--unit-tests) RUN_UNIT_TESTS=1 ;;
--integration-tests) RUN_INTEGRATION_TESTS=1 ;;
--all-tests)
RUN_BUILD_TESTS=1
RUN_UNIT_TESTS=1
RUN_INTEGRATION_TESTS=1
;;
--run-test)
shift
[[ $# -ge 1 ]] || abort "missing executable after --run-test"
TESTS_TO_RUN+=("$1")
;;
*) abort "error: unknown option ${parameter}" ;;
esac
shift
done
readonly RUN_BUILD_TESTS
readonly RUN_UNIT_TESTS
readonly RUN_INTEGRATION_TESTS
readonly TESTS_TO_RUN
cd "${REPO_ROOT_DIR}" || exit
# Tests to be performed, in the right order if --all-tests is passed.
local failed=0
if [[ ${#TESTS_TO_RUN[@]} -gt 0 ]]; then
if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then
abort "--run-test must be used alone"
fi
# If this is a presubmit run, but a documentation-only PR, don't run the test
if (( IS_PRESUBMIT && IS_DOCUMENTATION_PR )); then
header "Documentation only PR, skipping running custom test"
exit 0
fi
for test_to_run in "${TESTS_TO_RUN[@]}"; do
${test_to_run} || { failed=$?; step_failed "${test_to_run}"; }
done
fi
run_build_tests || { failed=$?; step_failed "run_build_tests"; }
# If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed
if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
run_unit_tests || { failed=$?; step_failed "run_unit_tests"; }
fi
# If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed
if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
run_integration_tests || { failed=$?; step_failed "run_integration_tests"; }
fi
exit ${failed}
}
| true
|
a026216f6ae5f95561aab8a1270715d59dd3ee4e
|
Shell
|
Prescrypto/heroku-buildpack-tex
|
/bin/compile
|
UTF-8
| 2,973
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir>
# fail fast
set -e
BUILD_DIR=$1
CACHE_DIR=$2
BIN_DIR=$(cd "$(dirname "$0")"; pwd) # absolute path
TEXLIVE_REPOSITORY="http://mirror.ctan.org/systems/texlive/tlnet"
# TODO: remove this in future versions.
# This is only kept for backwards support
if [ -f "$BUILD_DIR/.texlive-repository" ]; then
TEXLIVE_REPOSITORY=$(cat "$BUILD_DIR/.texlive-repository")
fi
# Optional: use custom path to texlive installer
if [ -f "$BUILD_DIR/texlive.repository" ]; then
TEXLIVE_REPOSITORY=$(cat "$BUILD_DIR/texlive.repository")
fi
TEXLIVE_INSTALLER_URL="$TEXLIVE_REPOSITORY/install-tl-unx.tar.gz"
TEXLIVE_HOME=$BUILD_DIR/.texlive
TEXLIVE_CACHE=$CACHE_DIR/.texlive
PATH=$TEXLIVE_HOME/bin/x86_64-linux:$PATH
PROFILE_D=$BUILD_DIR/.profile.d/texlive.sh
# Output helpers
# shellcheck source=bin/utils
source "$BIN_DIR/utils"
# Prepare the various paths
mkdir -p "$TEXLIVE_HOME"
mkdir -p "$TEXLIVE_CACHE"
mkdir -p "$(dirname "$PROFILE_D")"
if [ "$(ls -A "$TEXLIVE_CACHE")" ]; then
build-step "Setting up build cache..."
cp -R "$TEXLIVE_CACHE/"* "$TEXLIVE_HOME"
fi
if [ ! -f "$TEXLIVE_HOME/install-tl" ]; then
build-step "Downloading install-tl..."
echo "Using $TEXLIVE_INSTALLER_URL"
curl "$TEXLIVE_INSTALLER_URL" -L -s -o - | tar --strip-components=1 -xzf - -C "$TEXLIVE_HOME"
fi
if [ ! "$(which pdflatex)" ]; then
build-step "Installing TeX Live..."
PROF=$BIN_DIR/../conf/texlive.profile
{
echo "TEXDIR $TEXLIVE_HOME";
echo "TEXMFCONFIG $TEXLIVE_HOME/var/texmf-config";
echo "TEXMFHOME $TEXLIVE_HOME/var/texmf";
echo "TEXMFLOCAL $TEXLIVE_HOME/texmf-local";
echo "TEXMFSYSCONFIG $TEXLIVE_HOME/texmf-config";
echo "TEXMFSYSVAR $TEXLIVE_HOME/texmf-var";
echo "TEXMFVAR $TEXLIVE_HOME/var/texmf-var";
} >> "$PROF"
cd "$TEXLIVE_HOME"
./install-tl --repository="$TEXLIVE_REPOSITORY" --profile="$PROF"
fi
build-step "Updating TeX Live..."
tlmgr option repository "$TEXLIVE_REPOSITORY"
tlmgr update --self
# install user-provided-packages
if [ -f "$BUILD_DIR/texlive.packages" ]; then
build-step "Installing custom packages..."
# shellcheck disable=SC2046
tlmgr install $(cat "$BUILD_DIR/texlive.packages")
fi
build-step "upgrading installed packages"
tlmgr update --all --exclude hyphen-german
build-step "Cleaning up temporary files..."
# Make sure the cache is empty
rm -rf "${TEXLIVE_CACHE:?}/"*
build-step "Caching..."
# Store a copy of it in the cache so it doesn't have to be fetched again
cp -R "$TEXLIVE_HOME/"* "$TEXLIVE_CACHE/"
# Check for an essential binary to make sure it's installed
if [ ! "$(which pdflatex)" ]; then
build-warn "TeX Live installation failed"
exit 1
fi
# Set up the environment for runtimes now that compilation has finished
# shellcheck disable=SC2016
echo 'export PATH=$HOME/.texlive/bin/x86_64-linux:$PATH' > "$PROFILE_D"
build-step "TeX Live installation successful!"
| true
|
a1ed8bf3a9770e6db917c16c01ac75bb77a4854e
|
Shell
|
EExuke/shell
|
/linux_shell/8_3_test.sh
|
UTF-8
| 3,318
| 3.28125
| 3
|
[] |
no_license
|
############################################################################# ##
# Copyright (C) 2010-2011 Cameo Communications, Inc.
############################################################################ ##
#
# -------------------------------------------------------------------------- --
# AUTHOR : EExuke
# FILE NAME : 8_3_test.sh
# FILE DESCRIPTION : Linux shell script file
# FIRST CREATION DATE : 2020/03/30
# --------------------------------------------------------------------------
# Version : 1.0
# Last Change : 2020/03/30
## ************************************************************************** ##
#!/bin/bash
#-----------------------------------------------------------
# COLOUR VARIABLES
#-----------------------------------------------------------
UNDL="\033[4m" F6_E="\033[0m" B_WT="\033[47m"
F_BL="\033[30m" F_RD="\033[31m" F_GR="\033[32m"
F_YL="\033[33m" F_BU="\033[34m" F_PU="\033[35m"
F_DG="\033[36m" F_WT="\033[37m" B_BL="\033[40m"
B_RE="\033[41m" B_GR="\033[42m" B_YL="\033[43m"
B_BU="\033[44m" B_PR="\033[45m" B_DG="\033[46m"
#-----------------------------------------------------------
# 使用图形
#-----------------------------------------------------------
# kdialog和zenity包,它们各自为KDE和GNOME桌面提供了图形化窗口部件
# GNOME图形化环境支持两种流行的可生成标准窗口的包:
# gdialog
# zenity
# zenity部件:
#选项 描述
#--calendar 显示一整月日历
#--entry 显示文本输入对话窗口
#--error 显示错误消息对话窗口
#--file-selection 显示完整的路径名和文件名对话窗口
#--info 显示信息对话窗口
#--list 显示多选列表或单选列表对话窗口
#--notification 显示通知图标
#--progress 显示进度条对话窗口
#--question 显示yes/no对话窗口
#--scale 显示可调整大小的窗口
#--text-info 显示含有文本的文本框
#--warning 显示警告对话窗口
# calendar
zenity --calendar
# 实例:
temp=$(mktemp -t temp.XXXXXX)
temp2=$(mktemp -t temp2.XXXXXX)
function diskspace {
df -k > $temp
zenity --text-info --title "Disk space" --filename=$temp
--width 750 --height 10
}
function whoseon {
who > $temp
zenity --text-info --title "Logged in users" --filename=$temp
--width 500 --height 10
}
function memusage {
cat /proc/meminfo > $temp
zenity --text-info --title "Memory usage" --filename=$temp
--width 300 --height 500
}
while [ 1 ]
do
zenity --list --radiolist --title "Sys Admin Menu" \
--column "Select" \
--column "Menu Item" FALSE \
"Display diskspace" FALSE \
"Display users" FALSE \
"Display memory usage" FALSE \
"Exit" > $temp2
if [ $? -eq 1 ]
then
break
fi
selection=$(cat $temp2)
case $selection in
"Display disk space")
diskspace ;;
"Display users")
whoseon ;;
"Display memory usage")
memusage ;;
Exit)
break ;;
*)
zenity --info "Sorry, invalid selection"
esac
done
#----------------------------------------------------------------
| true
|
febd5c1cb0973cf720bbb6e154d28d1e7329cc73
|
Shell
|
smo921/docker
|
/images/zk/run.sh
|
UTF-8
| 702
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Original source github:fabric8io/fabric8-zookeeper-docker
#
CONF="/zk/conf/zoo.cfg"
DATA="/zk/data"
echo "$SERVER_ID / $MAX_SERVERS"
if [ ! -z "$SERVER_ID" ] && [ ! -z "$MAX_SERVERS" ]; then
echo "Starting up in clustered mode"
echo "" >> $CONF
echo "#Server List" >> $CONF
for i in $( eval echo {1..$MAX_SERVERS});do
if [ "$SERVER_ID" == "$i" ];then
echo "server.$i=0.0.0.0:2888:3888" >> $CONF
else
echo "server.$i=zookeeper-$i:2888:3888" >> $CONF
fi
done
cat $CONF
# Persists the ID of the current instance of Zookeeper
echo ${SERVER_ID} > $DATA/myid
else
echo "Starting up in standalone mode"
fi
exec /zk/bin/zkServer.sh start-foreground
| true
|
b59c4fc06294f8c532fa17903769594ed231eb2a
|
Shell
|
MaseraTiGo/scaffold
|
/deploy/script/conf/project_online_env.sh
|
UTF-8
| 515
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# base directory parameters
currentdir=${PWD}
confdir=${currentdir}/../
codedir=${confdir}/../
# deployment directory
deploydir=/deploy
# data directory
datadir=/data
# database to backup of path
backup_dir=/data/databases/mysql-master/backup
# docker
docker_compose_file=online_production.yml
docker_backup_dir=/var/lib/backup
# invalide project files
invalide_files=(
"${deploydir}/application/web/tuoen/settings_local.py"
"${deploydir}/application/web/tuoen/settings_local.pyc"
)
| true
|
58613be1f0b2a052ad873f807d79a1b63bf8e368
|
Shell
|
aborle1/GEOS2
|
/Baselibs/src/h5edit/bin/setversion
|
UTF-8
| 4,482
| 4.4375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
# This sets the version information of h5edit in all related files.
# Usage: setversion [-r] <version>
# <version> is the new version number in the form of M.m.R-relstr where
# M is major, m is minor, R is release and -relstr is the
# optional subrelease.
# -r is the optional release mode.
# When given, all the files are set for release by adding today's
# date as the release date. The default is development mode which
# sets all version information files with the
# "currently under development" notice.
# Function definitions
USAGE()
{
echo "Usage: $0 [-r] <M.m.R-relstr>"
}
# Print Debug output. Activate this by removing the leading colon.
# With a leading colon, it is a label, effectively a noop.
DPRINT()
{
: echo $@
}
# Show a banner for message given as $*
# Need to quote $* to allow metacharacters in the message.
BANNER()
{
echo ""
echo "=================================================="
echo "$*"
echo "=================================================="
}
# Main body
# Check command arguments
if [ "$1" = "-r" ]; then
# Release mode
ReleaseDate="released on "`date +%F`
shift
else
ReleaseDate="currently under development"
fi
if [ $# -ne 1 ]; then
USAGE
exit 1
fi
arg=$1
DPRINT "====== arg=$arg ============"
# Verify given arg is the correct format and pull out the optional release
# string.
case "$arg" in
[0-9]*.[0-9]*.[0-9]*-* )
# pull out the release string
Vrelstr=`echo $arg | cut -d- -f2`
# trim the release string.
Versions=`echo $arg | cut -d- -f1`
;;
[0-9]*.[0-9]*.[0-9]* )
Versions=$arg
;;
*)
echo "Bad argument $arg"
USAGE
exit 1
;;
esac
# Parse the Version argument into different parts
Vmajor=`echo $Versions | cut -d. -f1`
Vminor=`echo $Versions | cut -d. -f2`
Vrelease=`echo $Versions | cut -d. -f3`
Vinfo="H5edit version $arg, $ReleaseDate"
DPRINT "Vrelstr=$Vrelstr; Versions=$Versions;"
DPRINT "Major=$Vmajor; minor=$Vminor; Release=$Vrelease;"
BANNER Update files with new version information $arg $ReleaseDate
# Update configure.ac by fixing the AC_INIT statement and run bin/reconfigure
f=configure.ac
echo Update ${f} ...
if [ -w $f ]; then
ed - $f <<EOF
/AC_INIT/s/, \[[^]]*/, [$arg/
w
q
EOF
echo running reconfigure ...
bin/reconfigure
else
echo $f is not writable
USAGE
exit 1
fi
# Update src/h5edit.h by fixing the version information macros.
# The "$" is needed so that the "5" in H5EDIT is not replaced.
f=src/h5edit.h
echo Update ${f} ...
if [ -w $f ]; then
ed - $f <<EOF
/define H5EDIT_VER_MAJOR/s/[0-9][0-9]*$/$Vmajor/
/define H5EDIT_VER_MINOR/s/[0-9][0-9]*$/$Vminor/
/define H5EDIT_VER_RELEASE/s/[0-9][0-9]*$/$Vrelease/
/define H5EDIT_VER_SUBRELEASE/s/".*"/"$Vrelstr"/
/define H5EDIT_VER_INFO/s/".*"/"$Vinfo"/
w
q
EOF
else
echo $f is not writable
USAGE
exit 1
fi
# Update README and doc/RELEASE.txt by fixing the version information in the
# header line at the beginning.
for f in README doc/RELEASE.txt; do
echo Update ${f} ...
if [ -w $f ]; then
ed - $f <<EOF
/^H5edit version/s/.*/H5edit version $arg $ReleaseDate./
w
q
EOF
else
echo $f is not writable
USAGE
exit 1
fi
done
BANNER Files require manual update
# The following are not automized yet and should be done by hand.
echo you need to update the following files:
echo doc/h5edit.docx:
echo doc/h5edit-Command-Language-Defininition.docx:
echo doc/H5edit_User_Guide.docx:
echo " The version in the footer and last edit date"
echo " Then regenerate the corresponding .pdf and .htm files"
BANNER Reminder of post-release tasks
# Post-release cleanup and reset for next release. Docuement here temporary.
# Should be moved to a permenant spot later.
cat <<EOF
Reminder:
Generate the release tarball by
$ svn export http://svn.hdfgroup.uiuc.edu/h5edit/trunk h5edit-<version>
E.g., svn export http://svn.hdfgroup.uiuc.edu/h5edit/trunk h5edit-1.1.0
$ tar zcf h5edit-<version>.tar.gz h5edit-<version>
E.g., tar zcf h5edit-1.1.0.tar.gz h5edit-1.1.0
After release tarball is generated, you need to tag the released version by:
$ svn copy http://svn.hdfgroup.uiuc.edu/h5edit/trunk http://svn.hdfgroup.uiuc.edu/h5edit/tags/<version>
E.g., svn copy http://svn.hdfgroup.uiuc.edu/h5edit/trunk http://svn.hdfgroup.uiuc.edu/h5edit/tags/h5edit_1_1_0
Then in doc,
1. archive RELEASE.txt to History
2. empty the "Changes since ...<version>" section.
3. reset version in the header
EOF
| true
|
209bd73b677088ca79a60077f1d3ded433c43dc8
|
Shell
|
fusion809/AppImages
|
/recipes/leafpad/Recipe
|
UTF-8
| 2,157
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# A "hello world" recipe
# This is just a proof-of-concept, in reality you should use a proper "recipe" script
# to create proper AppDirs in order to ensure binary compatibility of your AppImages.
# See the documentation and the other recipes for details and for examples on how to
# bundle real-world software such as LibreOffice, Google Chrome, and others as AppImages.
export APP=leafpad
mkdir -p $APP/$APP.AppDir
cd $APP
# Note we are using a really old binary so that it runs hopefully everywhere
# ALso note this is a very simplistic example as we assume this package has no dependencies
# that are not part of the base system - unlike most other packages
URL=http://ftp.rz.tu-bs.de/pub/mirror/ubuntu-packages/pool/universe/l/leafpad/leafpad_0.8.17-2_amd64.deb
wget -c "${URL}"
cd $APP.AppDir
dpkg -x ../$APP_*.deb .
# Copy icon and desktop file in place
cp ./usr/share/icons/hicolor/scalable/apps/$APP.svg .
cp ./usr/share/applications/$APP.desktop .
# Add desktop integration
wget -O ./usr/bin/$APP.wrapper https://raw.githubusercontent.com/probonopd/AppImageKit/master/desktopintegration
chmod a+x ./usr/bin/$APP.wrapper
sed -i -e "s|Exec=$APP |Exec=$APP.wrapper |g" $APP.desktop
# Put in AppRun file
wget -c https://github.com/probonopd/AppImageKit/releases/download/5/AppRun # 64-bit
chmod a+x AppRun
###############################################################
# Experimental autorun support
###############################################################
cat > autorun.inf <<EOF
[AutoRun]
label=$APP
icon=.DirIcon
[Content]
MusicFiles=false
PictureFiles=false
VideoFiles=false
EOF
cat > autorun.sh <<\EOF
HERE="$(dirname "$(readlink -f "${0}")")"
exec "${HERE}/AppRun" "$@"
EOF
chmod a+x autorun.sh
###############################################################
cd ..
VERSION=$(echo $URL | cut -d "_" -f 2 | cut -d "-" -f 1 | head -n 1 | xargs)
wget -c https://github.com/probonopd/AppImageKit/releases/download/5/AppImageAssistant # 64-bit
chmod a+x AppImageAssistant
mkdir -p ../out
rm ../out/Leafpad-$VERSION-x86_64.AppImage || true
./AppImageAssistant $APP.AppDir ../out/Leafpad-$VERSION-x86_64.AppImage
| true
|
8db7e94f29ab4ebe8cbdcbf0dec309a085af307d
|
Shell
|
brnz/shell
|
/scripts/aliases
|
UTF-8
| 623
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
alias l="ls -G"
alias la="l -A"
alias ll="l -hl"
alias lla="l -Ahl"
alias lm="ll | more"
alias cp="cp -i"
alias mv="mv -i"
#alias reset_dock="_reset_dock"
alias gc="git checkout --no-guess"
alias gcb="gc -b"
alias gco="git commit"
alias gd="git diff"
#alias gpu="_gpu"
alias gs="git status"
alias asit="adb shell input text"
alias simulator="open /Applications/Xcode.app/Contents/Developer/Applications/Simulator.app"
search() {
if [[ -z "$2" ]]; then
find . -iname "*$1*" -type f
else
find . -iname "*$1*" -type f -exec $2 {} \+
fi
}
alias caesar="rename y/A-Za-z/N-ZA-Mn-za-m/"
| true
|
4ffa551acd991cf00d9bec0883aba71e1d05286b
|
Shell
|
eklingen88/gcd
|
/includes/out.sh
|
UTF-8
| 484
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Function for notifying
function out_error {
echo -e "\e[31m!!! ${1} !!!\e[39m"
if [ ! -z $2 ] && [ $2 -ne 0 ]; then
# Since we just created the .git directory at the beginning, we can safely remove it now
sudo rm -rf "${source_dir}/.git"
echo -e "\e[31m!!! EXITING NOW !!!\e[39m"
exit 1
fi
}
function out_notice {
echo -e "\e[33m--- ${1} ---\e[39m"
}
function out_ok {
echo -e "\e[32m--- ${1} ---\e[39m"
}
| true
|
feb8abc9424d62417db975f36060b233910f8c37
|
Shell
|
ucberkeley/bce
|
/provisioning/guest-scripts/setup_ipython_notebook.sh
|
UTF-8
| 2,039
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/bash
# Make it easy to set up password / encryption for IPython notebook
echo "
This script will update your default ipython notebook profile.
It will be configured to use the SSL certificate it creates in the
current directory.
Please type a password to use for the notebook server:"
sha_str=$(python3 -c \
'from IPython.lib import passwd; print(passwd(input()))' )
# Just for the vertical space
echo
# Create the ipython profile_default
ipython profile create
# There's probably a slightly better way to get this filename
cfile=~/.ipython/profile_default/ipython_notebook_config.py
cat >> $cfile <<EOF
# Automatically added by setup_ipython_notebook.sh
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 9999
c.NotebookApp.certfile = u'$PWD/ipython.pem'
# Necessary for some versions of Tornado?
c.NotebookApp.keyfile = u'$PWD/ipython.pem'
c.NotebookApp.password = u'$sha_str'
EOF
echo "Added config to end of:
$cfile
Generating SSL certificate. Answer questions however you like.
(Hit enter to continue)"
# We don't care what they type, as long as they hit enter
read
# Generate a reasonable SSL cert
openssl req -x509 -nodes -days 365 -newkey rsa:1024 \
-keyout ipython.pem -out ipython.pem
# Thanks internet!
myip=$(dig +short myip.opendns.com @resolver1.opendns.com)
echo "
The following line should now run the notebook on port 9999
\$ ipython notebook
To modify this behavior (including if you move ipython.pem) edit:
$cfile
You'll also need to open TCP access from your web browser's IP. For example,
using Amazon's EC2 console, click on the security group for your instance and
add an incoming rule for port 9999.
Then, you should be able to point your (local) web browser to:
https://$myip:9999/
You'll probably need to click through security warnings in Chrome each time.
Firefox should let you save the certificate for future usage.
Your password will be the one you typed above. It's in cleartext, so you can
check if you've forgotten already!
"
| true
|
93f18a8f0988df3794d2b9e1737a90114954f30e
|
Shell
|
thoradia/LibreELEC.tv
|
/scripts/build
|
UTF-8
| 20,773
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2009-2016 Stephan Raue (stephan@openelec.tv)
# Copyright (C) 2018-present Team LibreELEC (https://libreelec.tv)
. config/options "${1}"
if [ -z "${1}" ]; then
die "usage: ${0} package_name[:<host|target|init|bootstrap>] [parent_pkg]"
fi
if [ "${1}" = "--all" ]; then
if [ -n "${2}" ]; then
for build_dir in $(ls -1d ${ROOT}/build.*); do
load_build_config ${build_dir} && ${SCRIPTS}/build "${2}"
done
fi
exit 0
fi
if [ -z "${PKG_NAME}" ]; then
die "$(print_color CLR_ERROR "${1}: no package.mk file found")"
fi
if [ -n "${PKG_ARCH}" ]; then
listcontains "${PKG_ARCH}" "!${TARGET_ARCH}" && exit 0
listcontains "${PKG_ARCH}" "${TARGET_ARCH}" || listcontains "${PKG_ARCH}" "any" || exit 0
fi
if [ "${1//:/}" != "${1}" ]; then
TARGET="${1#*:}"
else
TARGET=
fi
TARGET="${TARGET:-target}"
PARENT_PKG="${2:-${PKG_NAME}:${TARGET}}"
pkg_lock "${PKG_NAME}:${TARGET}" "build" "${PARENT_PKG}"
mkdir -p ${STAMPS}/${PKG_NAME}
STAMP=${STAMPS}/${PKG_NAME}/build_${TARGET}
if [ -f ${STAMP} ]; then
. ${STAMP}
PKG_DEEPHASH=$(calculate_stamp)
if [ "${PKG_DEEPHASH}" = "${STAMP_PKG_DEEPHASH}" -a "${BUILD_WITH_DEBUG}" = "${STAMP_BUILD_WITH_DEBUG}" ]; then
# stamp matched: already built, do nothing
pkg_lock_status "UNLOCK" "${PKG_NAME}:${TARGET}" "build" "already built"
exit 0
fi
rm -f ${STAMP}
fi
${SCRIPTS}/unpack "${PKG_NAME}" "${PARENT_PKG}"
if [ "$PKG_TOOLCHAIN" = "python2" ]; then
PKG_DEPENDS_HOST="toolchain distutilscross:host $PKG_DEPENDS_HOST"
PKG_DEPENDS_TARGET="toolchain distutilscross:host Python2 $PKG_DEPENDS_TARGET"
fi
# build dependencies, only when PKG_DEPENDS_? is filled
unset _pkg_depends
case "${TARGET}" in
"target") _pkg_depends="${PKG_DEPENDS_TARGET}";;
"host") _pkg_depends="${PKG_DEPENDS_HOST}";;
"init") _pkg_depends="${PKG_DEPENDS_INIT}";;
"bootstrap") _pkg_depends="${PKG_DEPENDS_BOOTSTRAP}";;
esac
for p in ${_pkg_depends}; do
${SCRIPTS}/build "${p}" "${PARENT_PKG}"
done
# virtual packages are not built as they only contain dependencies, so dont go further here
if [ "${PKG_SECTION}" = "virtual" ]; then
PKG_DEEPHASH=$(calculate_stamp)
for i in PKG_NAME PKG_DEEPHASH BUILD_WITH_DEBUG; do
echo "STAMP_${i}=\"${!i}\"" >> ${STAMP}
done
pkg_lock_status "UNLOCK" "${PKG_NAME}:${TARGET}" "build" "built"
exit 0
fi
# build this package
if [ "${BUILD_WITH_DEBUG}" = "yes" ]; then
build_msg "CLR_BUILD" "BUILD" "${PKG_NAME} $(print_color "CLR_TARGET" "(${TARGET})") [DEBUG]" "indent"
else
build_msg "CLR_BUILD" "BUILD" "${PKG_NAME} $(print_color "CLR_TARGET" "(${TARGET})")" "indent"
fi
setup_toolchain ${TARGET}
# configure install directory
if [ "${TARGET}" = "target" ]; then
INSTALL="${PKG_BUILD}/.install_pkg"
elif [ "${TARGET}" = "init" ]; then
INSTALL="${PKG_BUILD}/.install_init"
else
unset INSTALL
fi
# remove previous install files
if [ -n "${INSTALL}" -a -d "${INSTALL}" ]; then
rm -rf "${INSTALL}"
fi
# configure debug build defaults
if [ "${BUILD_WITH_DEBUG}" = "yes" ]; then
CMAKE_BUILD_TYPE="Debug"
MESON_BUILD_TYPE="debug"
else
CMAKE_BUILD_TYPE="MinSizeRel"
MESON_BUILD_TYPE="plain"
fi
CMAKE_GENERATOR_NINJA="-GNinja \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON"
# configure TARGET build defaults
TARGET_CONFIGURE_OPTS="--host=${TARGET_NAME} \
--build=${HOST_NAME} \
--prefix=/usr \
--bindir=/usr/bin \
--sbindir=/usr/sbin \
--sysconfdir=/etc \
--libdir=/usr/lib \
--libexecdir=/usr/lib \
--localstatedir=/var \
--disable-static \
--enable-shared"
TARGET_CMAKE_OPTS="-DCMAKE_TOOLCHAIN_FILE=${CMAKE_CONF} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}"
TARGET_MESON_OPTS="--prefix=/usr \
--bindir=/usr/bin \
--sbindir=/usr/sbin \
--sysconfdir=/etc \
--libdir=/usr/lib \
--libexecdir=/usr/lib \
--localstatedir=/var \
--buildtype=${MESON_BUILD_TYPE}"
# configure HOST build defaults
HOST_CONFIGURE_OPTS="--host=${HOST_NAME} \
--build=${HOST_NAME} \
--prefix=${TOOLCHAIN} \
--bindir=${TOOLCHAIN}/bin \
--sbindir=${TOOLCHAIN}/sbin \
--sysconfdir=${TOOLCHAIN}/etc \
--libexecdir=${TOOLCHAIN}/lib \
--localstatedir=${TOOLCHAIN}/var \
--disable-static \
--enable-shared"
HOST_CMAKE_OPTS="${CMAKE_GENERATOR} \
-DCMAKE_TOOLCHAIN_FILE=${CMAKE_CONF} \
-DCMAKE_INSTALL_PREFIX=${TOOLCHAIN}"
HOST_MESON_OPTS="--prefix=${TOOLCHAIN} \
--bindir=${TOOLCHAIN}/bin \
--sbindir=${TOOLCHAIN}/sbin \
--sysconfdir=${TOOLCHAIN}/etc \
--libdir=${TOOLCHAIN}/lib \
--libexecdir=${TOOLCHAIN}/lib \
--localstatedir=${TOOLCHAIN}/var \
--buildtype=plain"
# configure INIT build defaults
INIT_CONFIGURE_OPTS="${TARGET_CONFIGURE_OPTS}"
INIT_CMAKE_OPTS="${TARGET_CMAKE_OPTS}"
INIT_MESON_OPTS="${TARGET_MESON_OPTS}"
# configure BOOTSTRAP build defaults
BOOTSTRAP_CONFIGURE_OPTS="${HOST_CONFIGURE_OPTS}"
BOOTSTRAP_CMAKE_OPTS="${HOST_CMAKE_OPTS}"
BOOTSTRAP_MESON_OPTS="${HOST_MESON_OPTS}"
# setup configure scripts
PKG_CONFIGURE_SCRIPT="${PKG_CONFIGURE_SCRIPT:-${PKG_BUILD}/configure}"
PKG_CMAKE_SCRIPT="${PKG_CMAKE_SCRIPT:-${PKG_BUILD}/CMakeLists.txt}"
PKG_MESON_SCRIPT="${PKG_MESON_SCRIPT:-${PKG_BUILD}/meson.build}"
# auto detect toolchain
_auto_toolchain=""
if [ -z "${PKG_TOOLCHAIN}" -o "${PKG_TOOLCHAIN}" = "auto" ]; then
if [ -f "${PKG_MESON_SCRIPT}" ]; then
PKG_TOOLCHAIN="meson"
elif [ -f "${PKG_CMAKE_SCRIPT}" ]; then
PKG_TOOLCHAIN="cmake"
elif [ -f "${PKG_CONFIGURE_SCRIPT}" ]; then
PKG_TOOLCHAIN="configure"
elif [ -f "${PKG_BUILD}/Makefile" ]; then
PKG_TOOLCHAIN="make"
else
die "Not possible to detect toolchain automatically. Add PKG_TOOLCHAIN= to package.mk"
fi
_auto_toolchain=" (auto-detect)"
fi
if ! listcontains "meson cmake cmake-make configure ninja make autotools manual python2" "${PKG_TOOLCHAIN}"; then
die "$(print_color "CLR_ERROR" "ERROR:") unknown toolchain ${PKG_TOOLCHAIN}"
fi
build_msg "CLR_TOOLCHAIN" "TOOLCHAIN" "${PKG_TOOLCHAIN}${_auto_toolchain}"
# make autoreconf
if [ "${PKG_TOOLCHAIN}" = "autotools" ]; then
${SCRIPTS}/autoreconf "${PKG_NAME}" "${PARENT_PKG}"
fi
pkg_lock_status "ACTIVE" "${PKG_NAME}:${TARGET}" "build"
# include build template and build
pkg_call_exists pre_build_${TARGET} && pkg_call pre_build_${TARGET}
# ensure ${PKG_BUILD} is there. (installer? PKG_URL="")
mkdir -p "${PKG_BUILD}"
cd "${PKG_BUILD}"
if [ -f "${PKG_CONFIGURE_SCRIPT}" -o -f "${PKG_CMAKE_SCRIPT}" -o -f "${PKG_MESON_SCRIPT}" ]; then
case "${TARGET}" in
"target") PKG_REAL_BUILD="${PKG_BUILD}/.${TARGET_NAME}" ;;
"host") PKG_REAL_BUILD="${PKG_BUILD}/.${HOST_NAME}" ;;
"init") PKG_REAL_BUILD="${PKG_BUILD}/.${TARGET_NAME}-${TARGET}" ;;
"bootstrap") PKG_REAL_BUILD="${PKG_BUILD}/.${HOST_NAME}-${TARGET}" ;;
esac
mkdir -p "${PKG_REAL_BUILD}"
cd "${PKG_REAL_BUILD}"
MESON_CONF="${PKG_REAL_BUILD}/meson.conf"
fi
# configure
pkg_call_exists pre_configure && pkg_call pre_configure
pkg_call_exists pre_configure_${TARGET} && pkg_call pre_configure_${TARGET}
if pkg_call_exists configure_${TARGET}; then
pkg_call configure_${TARGET}
else
case "${PKG_TOOLCHAIN}:${TARGET}" in
# meson builds
"meson:target")
create_meson_conf ${TARGET} ${MESON_CONF}
echo "Executing (target): meson ${TARGET_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_TARGET} $(dirname ${PKG_MESON_SCRIPT})" | tr -s " "
CC="${HOST_CC}" CXX="${HOST_CXX}" meson ${TARGET_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_TARGET} $(dirname ${PKG_MESON_SCRIPT})
;;
"meson:host")
create_meson_conf ${TARGET} ${MESON_CONF}
echo "Executing (host): meson ${HOST_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_HOST} $(dirname ${PKG_MESON_SCRIPT})" | tr -s " "
meson ${HOST_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_HOST} $(dirname ${PKG_MESON_SCRIPT})
;;
"meson:init")
create_meson_conf ${TARGET} ${MESON_CONF}
echo "Executing (init): meson ${INIT_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_INIT} $(dirname ${PKG_MESON_SCRIPT})" | tr -s " "
meson ${INIT_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_INIT} $(dirname ${PKG_MESON_SCRIPT})
;;
"meson:bootstrap")
create_meson_conf ${TARGET} ${MESON_CONF}
echo "Executing (bootstrap): meson ${BOOTSTRAP_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_BOOTSTRAP} $(dirname ${PKG_MESON_SCRIPT})" | tr -s " "
meson ${BOOTSTRAP_MESON_OPTS} --cross-file=${MESON_CONF} ${PKG_MESON_OPTS_BOOTSTRAP} $(dirname ${PKG_MESON_SCRIPT})
;;
# cmake builds with ninja
"cmake:target")
echo "Executing (target): cmake ${CMAKE_GENERATOR_NINJA} ${TARGET_CMAKE_OPTS} ${PKG_CMAKE_OPTS_TARGET} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${CMAKE_GENERATOR_NINJA} ${TARGET_CMAKE_OPTS} ${PKG_CMAKE_OPTS_TARGET} $(dirname ${PKG_CMAKE_SCRIPT})
;;
"cmake:host")
echo "Executing (host): cmake ${CMAKE_GENERATOR_NINJA} ${HOST_CMAKE_OPTS} ${PKG_CMAKE_OPTS_HOST} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${CMAKE_GENERATOR_NINJA} ${HOST_CMAKE_OPTS} ${PKG_CMAKE_OPTS_HOST} $(dirname ${PKG_CMAKE_SCRIPT})
;;
"cmake:init")
echo "Executing (init): cmake ${CMAKE_GENERATOR_NINJA} ${INIT_CMAKE_OPTS} ${PKG_CMAKE_OPTS_INIT} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${CMAKE_GENERATOR_NINJA} ${INIT_CMAKE_OPTS} ${PKG_CMAKE_OPTS_INIT} $(dirname ${PKG_CMAKE_SCRIPT})
;;
"cmake:bootstrap")
echo "Executing (bootstrap): cmake ${CMAKE_GENERATOR_NINJA} ${BOOTSTRAP_CMAKE_OPTS} ${PKG_CMAKE_OPTS_BOOTSTRAP} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${CMAKE_GENERATOR_NINJA} ${BOOTSTRAP_CMAKE_OPTS} ${PKG_CMAKE_OPTS_BOOTSTRAP} $(dirname ${PKG_CMAKE_SCRIPT})
;;
# cmake builds with make
"cmake-make:target")
echo "Executing (target): cmake ${TARGET_CMAKE_OPTS} ${PKG_CMAKE_OPTS_TARGET} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${TARGET_CMAKE_OPTS} ${PKG_CMAKE_OPTS_TARGET} $(dirname ${PKG_CMAKE_SCRIPT})
;;
"cmake-make:host")
echo "Executing (host): cmake ${HOST_CMAKE_OPTS} ${PKG_CMAKE_OPTS_HOST} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${HOST_CMAKE_OPTS} ${PKG_CMAKE_OPTS_HOST} $(dirname ${PKG_CMAKE_SCRIPT})
;;
"cmake-make:init")
echo "Executing (init): cmake ${INIT_CMAKE_OPTS} ${PKG_CMAKE_OPTS_INIT} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${INIT_CMAKE_OPTS} ${PKG_CMAKE_OPTS_INIT} $(dirname ${PKG_CMAKE_SCRIPT})
;;
"cmake-make:bootstrap")
echo "Executing (bootstrap): cmake ${BOOTSTRAP_CMAKE_OPTS} ${PKG_CMAKE_OPTS_BOOTSTRAP} $(dirname ${PKG_CMAKE_SCRIPT})" | tr -s " "
cmake ${BOOTSTRAP_CMAKE_OPTS} ${PKG_CMAKE_OPTS_BOOTSTRAP} $(dirname ${PKG_CMAKE_SCRIPT})
;;
# configure builds
"configure:target"|"autotools:target")
echo "Executing (target): ${PKG_CONFIGURE_SCRIPT} ${TARGET_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_TARGET}" | tr -s " "
${PKG_CONFIGURE_SCRIPT} ${TARGET_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_TARGET}
;;
"configure:host"|"autotools:host")
echo "Executing (host): ${PKG_CONFIGURE_SCRIPT} ${HOST_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_HOST}" | tr -s " "
${PKG_CONFIGURE_SCRIPT} ${HOST_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_HOST}
;;
"configure:init"|"autotools:init")
echo "Executing (init): ${PKG_CONFIGURE_SCRIPT} ${INIT_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_INIT}" | tr -s " "
${PKG_CONFIGURE_SCRIPT} ${INIT_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_INIT}
;;
"configure:bootstrap"|"autotools:bootstrap")
echo "Executing (bootstrap): ${PKG_CONFIGURE_SCRIPT} ${BOOTSTRAP_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_BOOTSTRAP}" | tr -s " "
${PKG_CONFIGURE_SCRIPT} ${BOOTSTRAP_CONFIGURE_OPTS} ${PKG_CONFIGURE_OPTS_BOOTSTRAP}
;;
# python based build
"python2:host")
echo "Executing (host): python2 setup.py build" | tr -s " "
export LDSHARED="$CC -shared $LDSHARED"
$TOOLCHAIN/bin/python2 setup.py build --build-base .$HOST_NAME
;;
"python2:target")
echo "Executing (target): python2 setup.py easy_install" | tr -s " "
mkdir -p .$TARGET_NAME
cp -r * .$TARGET_NAME
cd .$TARGET_NAME
export CFLAGS="$CFLAGS -I$SYSROOT_PREFIX/usr/include -L$SYSROOT_PREFIX/usr/lib"
export LDSHARED="$CC -shared $LDSHARED"
export PYTHONXCPREFIX="$SYSROOT_PREFIX/usr"
_prefix="$INSTALL/usr"
_pythonpath="$_prefix/lib/$PKG_PYTHON_VERSION/site-packages"
mkdir -p "$_pythonpath"
PYTHONPATH="$PYTHONPATH:$_pythonpath" \
$TOOLCHAIN/bin/python2 setup.py easy_install \
--prefix "$_prefix" \
.
find "$_prefix" -name "*.py" -exec rm -rf "{}" ";"
find "$_prefix" -name "*.egg" -type f -exec zip -dq {} "*.exe" "*.py" ";"
;;
esac
fi
pkg_call_exists post_configure_${TARGET} && pkg_call post_configure_${TARGET}
# make
pkg_call_exists pre_make_${TARGET} && pkg_call pre_make_${TARGET}
if pkg_call_exists make_${TARGET}; then
pkg_call make_${TARGET}
else
case "${PKG_TOOLCHAIN}:${TARGET}" in
# ninja based builds
"meson:target"|"cmake:target"|"ninja:target")
echo "Executing (target): ninja ${PKG_MAKE_OPTS_TARGET}" | tr -s " "
ninja ${NINJA_OPTS} ${PKG_MAKE_OPTS_TARGET}
;;
"meson:host"|"cmake:host"|"ninja:host")
echo "Executing (host): ninja ${PKG_MAKE_OPTS_HOST}" | tr -s " "
ninja ${NINJA_OPTS} ${PKG_MAKE_OPTS_HOST}
;;
"meson:init"|"cmake:init"|"ninja:init")
echo "Executing (init): ninja ${PKG_MAKE_OPTS_INIT}" | tr -s " "
ninja ${NINJA_OPTS} ${PKG_MAKE_OPTS_INIT}
;;
"meson:bootstrap"|"cmake:bootstrap"|"ninja:bootstrap")
echo "Executing (bootstrap): ninja ${PKG_MAKE_OPTS_BOOTSTRAP}" | tr -s " "
ninja ${NINJA_OPTS} ${PKG_MAKE_OPTS_BOOTSTRAP}
;;
# make based builds
"configure:target"|"cmake-make:target"|"autotools:target"|"make:target")
echo "Executing (target): make ${PKG_MAKE_OPTS_TARGET}" | tr -s " "
make ${PKG_MAKE_OPTS_TARGET}
;;
"configure:host"|"cmake-make:host"|"autotools:host"|"make:host")
echo "Executing (host): make ${PKG_MAKE_OPTS_HOST}" | tr -s " "
make ${PKG_MAKE_OPTS_HOST}
;;
"configure:init"|"cmake-make:init"|"autotools:init"|"make:init")
echo "Executing (init): make ${PKG_MAKE_OPTS_INIT}" | tr -s " "
make ${PKG_MAKE_OPTS_INIT}
;;
"configure:bootstrap"|"cmake-make:bootstrap"|"autotools:bootstrap"|"make:bootstrap")
echo "Executing (bootstrap): make ${PKG_MAKE_OPTS_BOOTSTRAP}" | tr -s " "
make ${PKG_MAKE_OPTS_BOOTSTRAP}
;;
# python based build
"python2:host")
echo "Executing (host): python2 setup.py install" | tr -s " "
$TOOLCHAIN/bin/python2 setup.py build --build-base .$HOST_NAME install
;;
esac
fi
pkg_call_exists post_make_${TARGET} && pkg_call post_make_${TARGET}
# Hack around directly writing/modifying the content of a shared sysroot
# by temporarily installing new files to a package specific sysroot
PKG_ORIG_SYSROOT_PREFIX="${SYSROOT_PREFIX}"
export SYSROOT_PREFIX="${BUILD}/.sysroot/${PKG_NAME}.${TARGET}"
rm -rf "${SYSROOT_PREFIX}"
# Create common sysroot directories as some packages expect them to exist.
# TODO: Fix those packages so we don't need to pre-create directories.
for d in /usr/lib /usr/include /usr/bin /usr/lib/pkgconfig; do
mkdir -p "${SYSROOT_PREFIX}${d}"
done
# make install
pkg_call_exists pre_makeinstall_${TARGET} && pkg_call pre_makeinstall_${TARGET}
if pkg_call_exists makeinstall_${TARGET}; then
pkg_call makeinstall_${TARGET}
else
case "${PKG_TOOLCHAIN}:${TARGET}" in
# ninja based builds
"meson:target"|"cmake:target")
DESTDIR=${SYSROOT_PREFIX} ninja install ${PKG_MAKEINSTALL_OPTS_TARGET}
DESTDIR=${INSTALL} ninja install ${PKG_MAKEINSTALL_OPTS_TARGET}
;;
"meson:host"|"cmake:host")
ninja install ${PKG_MAKEINSTALL_OPTS_HOST}
;;
"meson:init"|"cmake:init")
DESTDIR=${INSTALL} ninja install ${PKG_MAKEINSTALL_OPTS_INIT}
;;
"meson:bootstrap"|"cmake:bootstrap")
ninja install ${PKG_MAKEINSTALL_OPTS_BOOTSTRAP}
;;
# make based builds
"configure:target"|"cmake-make:target"|"autotools:target"|"make:target")
make install DESTDIR=${SYSROOT_PREFIX} -j1 ${PKG_MAKEINSTALL_OPTS_TARGET}
make install DESTDIR=${INSTALL} ${PKG_MAKEINSTALL_OPTS_TARGET}
;;
"configure:host"|"cmake-make:host"|"autotools:host"|"make:host")
make install ${PKG_MAKEINSTALL_OPTS_HOST}
;;
"configure:init"|"cmake-make:init"|"autotools:init"|"make:init")
make install DESTDIR=${INSTALL} ${PKG_MAKEINSTALL_OPTS_INIT}
;;
"configure:bootstrap"|"cmake-make:bootstrap"|"autotools:bootstrap"|"make:bootstrap")
make install ${PKG_MAKEINSTALL_OPTS_BOOTSTRAP}
;;
esac
fi
pkg_call_exists post_makeinstall_${TARGET} && pkg_call post_makeinstall_${TARGET}
# Fixup temporary sysroot references to the shared sysroot
for i in $(find "${SYSROOT_PREFIX}/usr/lib" -type f -name "*.la" 2>/dev/null); do
sed -e "s:\(['= ]\)/usr:\\1${PKG_ORIG_SYSROOT_PREFIX}/usr:g" -i "${i}"
done
for i in $(find "${SYSROOT_PREFIX}/usr/bin" -type f -name "*-config" 2>/dev/null); do
sed -e "s#${SYSROOT_PREFIX}/usr#${PKG_ORIG_SYSROOT_PREFIX}/usr#g" -i "${i}"
done
for i in $(find "${SYSROOT_PREFIX}/usr/lib" -type f -name "*.pc" 2>/dev/null); do
sed -e "s#${SYSROOT_PREFIX}/usr#${PKG_ORIG_SYSROOT_PREFIX}/usr#g" -i "${i}"
done
for i in $(find "${SYSROOT_PREFIX}/usr"/{lib,share} -type f -name "*.cmake" 2>/dev/null); do
sed -e "s#${SYSROOT_PREFIX}/usr#${PKG_ORIG_SYSROOT_PREFIX}/usr#g" -i "${i}"
done
for i in $(find "${SYSROOT_PREFIX}" -type l 2>/dev/null); do
_tmp="$(readlink -m "${i}")"
[[ ${_tmp} =~ ^/usr ]] && _tmp="${SYSROOT_PREFIX}${_tmp}"
if [[ ${_tmp} =~ ^${SYSROOT_PREFIX}/ ]]; then
ln -sfn "${_tmp/${SYSROOT_PREFIX}\//${PKG_ORIG_SYSROOT_PREFIX}\/}" "${i}"
fi
done
# Transfer the new sysroot content to the shared sysroot
mkdir -p "${PKG_ORIG_SYSROOT_PREFIX}"
cp -PRf "${SYSROOT_PREFIX}"/* "${PKG_ORIG_SYSROOT_PREFIX}"
rm -rf "${SYSROOT_PREFIX}"
export SYSROOT_PREFIX="${PKG_ORIG_SYSROOT_PREFIX}"
if [ "${TARGET}" = "target" -o "${TARGET}" = "init" ]; then
if [ -d ${INSTALL} ]; then
rm -rf ${INSTALL}/{usr/,}include
rm -rf ${INSTALL}/{usr/,}lib/cmake
rm -rf ${INSTALL}/{usr/,}lib/pkgconfig
rm -rf ${INSTALL}/{usr/,}man
rm -rf ${INSTALL}/{usr/,}share/aclocal
rm -rf ${INSTALL}/{usr/,}share/bash-completion
rm -rf ${INSTALL}/{usr/,}share/doc
rm -rf ${INSTALL}/{usr/,}share/gtk-doc
rm -rf ${INSTALL}/{usr/,}share/info
rm -rf ${INSTALL}/{usr/,}share/locale
rm -rf ${INSTALL}/{usr/,}share/man
rm -rf ${INSTALL}/{usr/,}share/pkgconfig
rm -rf ${INSTALL}/{usr/,}share/zsh
rm -rf ${INSTALL}/{usr/,}var
find ${INSTALL} \( -name "*.orig" \
-o -name "*.rej" \
-o -name "*.a" \
-o -name "*.la" \
-o -name "*.o" \
-o -name "*.in" \
-o -name ".git*" \) \
-exec rm -f {} \; 2>/dev/null || :
find ${INSTALL} -type d -exec rmdir -p {} \; 2>/dev/null || :
if [ ! "${BUILD_WITH_DEBUG}" = "yes" ]; then
${STRIP} $(find ${INSTALL} \
-type f -name "*.so*" \
! -name "ld-*.so" \
! -name "libc-*.so" \
! -name "libpthread-*.so" \
! -name "libthread_db-*so" \
2>/dev/null) 2>/dev/null || :
if [ "${TARGET}" = "init" ]; then
${STRIP} $(find ${INSTALL} -type f -name "*.so*" 2>/dev/null) 2>/dev/null || :
fi
${STRIP} $(find ${INSTALL} ! -name "*.so*" ! -name "*.ko" \
-type f -executable 2>/dev/null) 2>/dev/null || :
fi
fi
fi
cd ${ROOT}
PKG_DEEPHASH=$(calculate_stamp)
for i in PKG_NAME PKG_DEEPHASH BUILD_WITH_DEBUG; do
echo "STAMP_${i}=\"${!i}\"" >> ${STAMP}
done
pkg_lock_status "UNLOCK" "${PKG_NAME}:${TARGET}" "build" "built"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.