blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
42a5b6e9c8c39e32874112384fb390d841e843c2
|
Shell
|
cs0x7f/worldcubeassociation.org
|
/scripts/deploy.sh
|
UTF-8
| 5,566
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
pull_latest() {
# From http://stackoverflow.com/a/8084186
git pull --recurse-submodules && git submodule update
}
restart_app() {
if ps -efw | grep "unicorn master" | grep -v grep; then
# Found a unicorn master process, restart it gracefully as per
# http://unicorn.bogomips.org/SIGNALS.html
pid=$(<"WcaOnRails/pids/unicorn.pid")
kill -SIGUSR2 $pid
sleep 5
kill -SIGQUIT $pid
else
# We could not find a unicorn master process running, lets start one up!
(cd WcaOnRails; bundle exec unicorn -D -c config/unicorn.rb)
fi
}
commit_hash() {
REMOTE_URL=$1
REMOTE_BRANCHNAME=$2
echo $(git ls-remote $REMOTE_URL $REMOTE_BRANCHNAME | sed 's/\(.\{7\}\).*/\1/')
}
rebuild_regs() {
# Build WCA regulations
# Uses wrc, see here: https://github.com/thewca/wca-regulations-compiler
# pdf generations relies on wkhtmltopdf (with patched qt), which should be in $PATH
build_folder=regulations/build
regs_folder_root=WcaOnRails/app/views
tmp_dir=/tmp/regs-todelete
regs_folder=$regs_folder_root/regulations
regs_version=$regs_folder/version
regs_data_version=$regs_folder/data_version
translations_version=$regs_folder/translations/version
rm -rf $build_folder
mkdir -p $build_folder
# The /regulations directory build relies on three sources:
# - The WCA Regulations
# - The WCA Regulations translations
# - The 'regulations-data' branch of this repo, which contains data such as TNoodle binaries
git_reg_hash=`commit_hash https://github.com/thewca/wca-regulations.git official`
git_translations_hash=`commit_hash https://github.com/thewca/wca-regulations-translations.git HEAD`
git_reg_data_hash=`commit_hash https://github.com/thewca/worldcubeassociation.org.git regulations-data`
rebuild_regulations=1
rebuild_regulations_data=1
rebuild_translations=1
# Check if the cloned regulations match the current version
if [ -r $regs_version ] && [ "`cat $regs_version`" == "$git_reg_hash" ]; then
rebuild_regulations=0
fi
# Check if the latest regulations-data match the current version
if [ -r $regs_data_version ] && [ "`cat $regs_data_version`" == "$git_reg_data_hash" ]; then
rebuild_regulations_data=0
fi
# Check if the cloned translations match the current version
if [ -r $translations_version ] && [ "`cat $translations_version`" == "$git_translations_hash" ]; then
rebuild_translations=0
fi
if [ $rebuild_regulations -eq 0 ] && [ $rebuild_translations -eq 0 ] && [ $rebuild_regulations_data -eq 0 ]; then
echo "WCA Regulations and translations are up to date."
return
fi
# Else we have to rebuild something
# This saves tracked files that may have unstashed changes too
cp -r $regs_folder $build_folder
# Checkout data (scramble programs, history)
# Assuming we ran pull_latest, this automatically checks out the latest regulations-data
git fetch https://github.com/thewca/worldcubeassociation.org.git regulations-data
git checkout FETCH_HEAD $build_folder
git reset HEAD $build_folder
if [ $rebuild_translations -eq 1 ]; then
git clone --depth=1 https://github.com/thewca/wca-regulations-translations.git $build_folder/wca-regulations-translations
languages=`wrc-languages`
# Clean up translations directories
find $build_folder/regulations/translations ! -name 'translations' -type d -exec rm -rf {} +
# Rebuild all translations
for kind in html pdf; do
for l in $languages; do
inputdir=$build_folder/wca-regulations-translations/${l}
outputdir=$build_folder/regulations/translations/${l}
mkdir -p $outputdir
echo "Generating ${kind} for language ${l}"
wrc --target=$kind -l $l -o $outputdir -g $git_translations_hash $inputdir
done
done
# Update version built
echo $git_translations_hash > $build_folder/regulations/translations/version
else
echo "Translations are up to date."
fi
outputdir=$build_folder/regulations
if [ $rebuild_regulations -eq 1 ]; then
git clone --depth=1 --branch=official https://github.com/thewca/wca-regulations.git $build_folder/wca-regulations
# Clean up regulations directory files
find $build_folder/regulations -maxdepth 1 -type f -exec rm -f {} +
# Rebuild Regulations
wrc --target=json -o $outputdir -g $git_reg_hash $build_folder/wca-regulations
wrc --target=html -o $outputdir -g $git_reg_hash $build_folder/wca-regulations
wrc --target=pdf -o $outputdir -g $git_reg_hash $build_folder/wca-regulations
# Update version built
echo $git_reg_hash > $build_folder/regulations/version
else
echo "Regulations are up to date"
fi
# Update regulations-data version built
echo $git_reg_data_hash > $build_folder/regulations/data_version
rm -rf $tmp_dir
mv $regs_folder $tmp_dir
mv $outputdir $regs_folder
rm -rf $tmp_dir
}
restart_dj() {
sudo supervisorctl update
sudo supervisorctl restart workers:*
}
rebuild_rails() {
(
cd WcaOnRails
bundle install
bundle exec rake assets:clean assets:precompile
# Note that we are intentionally not automating database migrations.
)
restart_dj
restart_app
}
cd "$(dirname "$0")"/..
if [ "$(hostname)" == "production" ] || [ "$(hostname)" == "staging" ]; then
export RACK_ENV=production
else
export RACK_ENV=development
fi
# Workaround for https://github.com/rails/webpacker/issues/773
export RAILS_ENV=${RACK_ENV}
allowed_commands="pull_latest restart_app restart_dj rebuild_rails rebuild_regs"
source scripts/_parse_args.sh
| true
|
870c146ceb489236ad30009446eb47b4cf563703
|
Shell
|
avcaliani/mr-owlf
|
/mr-owlf-api/init-env.sh
|
UTF-8
| 810
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
#
# @author Anthony Vilarim Caliani
# @contact github.com/avcaliani
#
python_venv() {
if [ ! -d ".venv" ]; then
echo -e "\n\033[1;32m ¯\_(ツ)_/¯ \033[00m Creating Python VEnv...\n"
python3 -m venv .venv \
&& source .venv/bin/activate \
&& pip install --upgrade pip \
&& pip install -r requirements.txt \
&& deactivate
fi
}
echo -e "
_ ___ ___
/_\ | _ \_ _|
/ _ \| _/| |
/_/ \_\_| |___| ENVIRONMENT
"
mkdir tmp || true
cd ../mr-owlf-mls/ \
&& ./start-dev.sh --package \
&& mv dist/mr_olwf_mls-*.tar.gz ../mr-owlf-api/tmp/ \
&& cd - \
&& python_venv \
&& source .venv/bin/activate \
&& pip install tmp/mr_olwf_mls-*.tar.gz \
&& deactivate \
|| exit 1
echo -e "\n\033[1;32m (งツ)ว \033[00m You are free to go!\n"
exit 0
| true
|
4113ab9c52ab58053b08eb1ea9b500c4e67aae05
|
Shell
|
Pink2Dev/PinkPiUi
|
/scripts/network_assign.sh
|
UTF-8
| 524
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ]; then
echo "Usage: $0 <Network SSID> <Network Password>"
exit 1
fi
# Clear static network
> /etc/dnsmasq.conf
> /etc/hostapd/hostapd.conf
> /etc/network/interfaces.d/wlan0
sed -i "/denyinterfaces wlan0/d" /etc/dhcpcd.conf
# Assign dynamic network
cat > /etc/wpa_supplicant/wpa_supplicant.conf << EOL
ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
update_config=1
network={
scan_ssid=1
ssid="$1"
psk="$2"
}
EOL
# Give the PinkPiUi a moment to render
sleep 5s
# Reboot
reboot
| true
|
6325e05557529caa5a56e284fdbcecd6e0a3033d
|
Shell
|
Estecka/42-minishell
|
/.test_hh/.git-cmd.sh
|
UTF-8
| 717
| 2.828125
| 3
|
[] |
no_license
|
# --------------------------------------------------------------------------#
# git stash stocke (ou stashe) temporairement les changements apportés
git stash -u
# --------------------------------------------------------------------------#
# Récupére toutes les données des commits effectués sur la branche courante
# qui n'existent pas encore dans votre version en local
git fetch --prune
# --------------------------------------------------------------------------#
# Intègre les modifications des commits nommés dans la branche actuelle
git merge origin/master
# --------------------------------------------------------------------------#
# Réapplique les changements stashés au préalable
#git stash pop
| true
|
227e67f5e9accb1b998f2132f795ea0c6a277c0f
|
Shell
|
davidcl/jupyter_binder
|
/postBuild
|
UTF-8
| 587
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -ex
# Download and local install Scilab
VERSION=6.1.0
if [ ! -d scilab-$VERSION ]; then
curl https://www.scilab.org/download/$VERSION/scilab-$VERSION.bin.linux-x86_64.tar.gz |tar xzf -
fi
PATH=$(pwd)/scilab-$VERSION/bin:$PATH
export PATH
# fix Scilab bug #16368
rm "$(pwd)"/scilab-$VERSION/lib/thirdparty/libssl.so*
rm "$(pwd)"/scilab-$VERSION/lib/thirdparty/libcrypto.so*
rm "$(pwd)"/scilab-$VERSION/lib/thirdparty/libcurl.so*
# disable TK
SCI_DISABLE_TK=1
export SCI_DISABLE_TK
# only headless java
SCI_JAVA_ENABLE_HEADLESS=1
export SCI_JAVA_ENABLE_HEADLESS
| true
|
169f2d478f87aae2f51046bba8634c69311c95f8
|
Shell
|
kirill-markin/jupyter_datascience
|
/startup.sh
|
UTF-8
| 770
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
pwd
whoami
## Настрйока проекта
sudo apt-get update
sudo apt-get install unzip
#yes Y | sudo rm -r notebooks
mkdir notebooks
sudo chgrp 100 notebooks
sudo chmod g+w notebooks
echo "vm.overcommit_memory=1" | sudo tee -a /etc/sysctl.conf
## Докер
if [ "$1" != "-r" ]; then
yes Y | sudo apt-get remove docker docker-engine docker.io
yes Y | sudo apt install docker.io
fi
sudo systemctl start docker
sudo systemctl enable docker
docker --version
sudo docker build -t jupyter_datascience ./
sudo docker kill jupyter_datascience_instance
sudo docker rm jupyter_datascience_instance
sudo docker run -it -p 8888:8888 --privileged --name jupyter_datascience_instance -v $(pwd)/notebooks:/home/jovyan/work jupyter_datascience:latest
| true
|
1494b1cda8ff2646ec3ad75aad4a7b4946619fdc
|
Shell
|
Yuukiki/jenkins_bot
|
/scripts/sync_source.sh
|
UTF-8
| 885
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
RUNTIMEDIR=$(cd "$(dirname "$0")";pwd)
source $RUNTIMEDIR/utils.sh
if [ $# == "0" ]; then
echo "Usage: sync_source.sh \$REPOURL \$REPOBRANCH \$REPOARGS \$REPODIR"
echo "eg: sync_source.sh \"https://github.com/LineageOS/android\" \"lineage-15.1\" \"-j12\" ~/ROM/lineage"
exit 0
fi
REPOBIN=`which repo`
if [ ! -f $REPOBIN ]; then
pr_err_exit "repo cmd doesn't exist"
fi
REPOURL=$1
REPOBRANCH=$2
REPOARGS=$3
REPODIR=$4
assert_equal $REPOURL "" "REPOURL empty!"
assert_equal $REPOBRANCH "" "REPOBRANCH empty!"
if [[ $REPODIR == "" ]]; then
pr_err "REPODIR not specified,assume to using default(~/android)"
REPODIR=~/android
fi
cd $REPODIR
repo init -u $REPOURL -b $REPOBRANCH
if [ $? -ne 0 ]; then
pr_err_exit "Faile to init repo"
fi
repo sync $REPOARGS
if [$? -ne 0 ]; then
pr_err_exit "Failed to repo sync"
fi
echo "Sources successfully synced at $REPODIR"
| true
|
6dbf9567cf5b81085ad4fa6eade07b04bb5b068c
|
Shell
|
jorgerdc/raven-course
|
/raven-course-service/h2-startup.sh
|
UTF-8
| 1,138
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#This script is used to start H2 for testing.
#To use it:
# 1. open a new terminal
# 2. change to the directory where this script is located.
# 3. Run this script : ./h2-startup.sh
# 4. A browser window will appear with de H2 console interface
# 5. In the JDBC Url value type the following: jdbc:h2:./raven-course
# 6. press Connect.
#
# ./raven-course is a relative path where the database files should be located. If
# the files are not found, a new database will be created.
# Physically the database is made up of two files using this name:
# raven-course.mv.db and raven-course.trace.db
#
# To full understand, look at the value of the -baseDir flag below.
# This is also a relative path where databases should be located. If we join both
# relative paths we get: src/test/resources/databases/h2/schemas/raven-course
# which match the directory where the test database will be located.
#
# To stop the Instance, just close the terminal or type Ctrl - C
echo "Starting H2 Server"
java -cp src/test/resources/databases/h2/h2*.jar \
org.h2.tools.Server -baseDir src/test/resources/databases/h2/schemas/
| true
|
a3c147ab17c1255be407ff2a70be9f0ec47e6998
|
Shell
|
barbw1re/crouton-buddy
|
/crouton-buddy/cb-host.sh
|
UTF-8
| 13,251
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
################################################################
# Host (outside chroot) Menu
################################################################
# Set by caller:
#
# me => full path to caller (crouton-buddy.sh script)
# CB_ROOT => full path to Crouton Buddy application directory
# HOME_DIR => full path to home directory
# ROOT_DIR => full path to Downloads directory
# Globals
LINUX_RELEASE="xenial"
CHROOT_ROOT="/mnt/stateful_partition/crouton/chroots"
CROUTON_APP="$ROOT_DIR/crouton"
CROUTON_BOOTSTRAP="$ROOT_DIR/$LINUX_RELEASE.tar.bz2"
CROUTON_TARGETS="core,audio,x11,chrome,cli-extra,extension,gtk-extra,gnome,kde,keyboard,xorg,xiwi"
# Load dependencies
. "$CB_ROOT/cb-ui.sh"
. "$CB_ROOT/cb-crouton.sh"
. "$CB_ROOT/cb-common.sh"
. "$CB_ROOT/menu/bash-menu.sh"
#
# Menu item handlers
#
cbCreate() {
# Ensure needed globals:
[[ "$CROUTON_APP" ]] || cbAbort "CROUTON_APP not configured"
[[ "$CROUTON_BOOTSTRAP" ]] || cbAbort "CROUTON_BOOTSTRAP not configured"
[[ "$CROUTON_TARGETS" ]] || cbAbort "CROUTON_TARGETS not configured"
[[ "$LINUX_RELEASE" ]] || cbAbort "LINUX_RELEASE not configured"
cbInitAction "Create a new environment" || return 1
chrootName=`cbAsk "Enter name of new environment to create (or 'L' to list current environments): "`
while [[ "$chrootName" && ( "$chrootName" = "l" || "$chrootName" = "L" || "$(cbIsChroot "$chrootName")" -eq 1 ) ]]; do
echo ""
if [[ "$chrootName" != "l" && "$chrootName" != "L" ]]; then
cbError "There is already an environment named $chrootName"
fi
cbListChroots "Currently created environments:"
chrootName=`cbAsk "Enter name of new environment to create (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting environment creation."
return 1
fi
echo ""
if (( ! "$(cbConfirm "Are you sure you want to create new environment $chrootName")" )); then
cbAcknowledgeAbort "Aborting environment creation."
return 1
fi
echo ""
cbEnsureBootstrap || return 1
cbInfo "Creating installation of $LINUX_RELEASE as $chrootName" \
"Using targets: $CROUTON_TARGETS"
if (( ! "$(cbConfirm "Are you sure you want to create the new environment $chrootName")" )); then
cbAcknowledgeAbort "Aborting environment creation."
return 1
fi
# Finally - call Crouton to create new environment
sudo sh $CROUTON_APP -n $chrootName -f $CROUTON_BOOTSTRAP -t $CROUTON_TARGETS
cbAcknowledge "New environment $chrootName created."
return 1
}
cbConfigure() {
# Ensure needed globals:
[[ "$me" ]] || cbAbort "'me' not configured"
[[ "$CHROOT_ROOT" ]] || cbAbort "CHROOT_ROOT not configured"
cbInitAction "Configure/manage environment" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "No environment found to manage."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to manage: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to manage (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting environment management."
return 1
fi
echo ""
# Call Crouton to enter environment and execute crouton-buddy.sh script
local chrootUser=`ls $CHROOT_ROOT/$chrootName/home/ | awk '{print $1}'`
sudo enter-chroot -n $chrootName -l sh /home/$chrootUser/Downloads/$me
return 1
}
cbEnter() {
cbInitAction "Open terminal to environment" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "No environment found to enter."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to enter: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to enter (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting entering environment."
return 1
fi
echo ""
# Finally - call Crouton to enter environment
sudo enter-chroot -n $chrootName
return 1
}
cbStartGnome() {
cbInitAction "Start Gnome environment" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "No environment found to start."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to start: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to start (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting starting environment."
return 1
fi
echo ""
# Finally - call Crouton to start gnome in background
sudo startgnome -n $chrootName -b
cbAcknowledge "Environment started. Logout of $chrootName to exit."
return 1
}
cbStartKde() {
cbInitAction "Start KDE environment" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "No environment found to start."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to start: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to start (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting starting environment."
return 1
fi
echo ""
# Finally - call Crouton to start KDE in background
sudo startkde -n $chrootName -b
cbAcknowledge "Environment started. Logout of $chrootName to exit."
return 1
}
cbUpdate() {
# Ensure needed globals:
[[ "$CROUTON_APP" ]] || cbAbort "CROUTON_APP not configured"
cbInitAction "Update an existing environment" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "Nothing to update. No environments found."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to update: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to update (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting environment update."
return 1
fi
echo ""
if (( ! "$(cbConfirm "Are you sure you want to update environment $chrootName")" )); then
cbAcknowledgeAbort "Aborting environment update."
return 1
fi
echo ""
# Finally - call Crouton to update environment
sudo sh $CROUTON_APP -n $chrootName -u
cbAcknowledge "Environment updated."
return 1
}
cbBackup() {
cbInitAction "Backup environmewnt" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "No environment found to backup."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to backup: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to backup (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting environment backup."
return 1
fi
echo ""
if (( ! "$(cbConfirm "Are you sure you want to backup environment $chrootName")" )); then
cbAcknowledgeAbort "Aborting environment backup."
return 1
fi
echo ""
# Finally - call Crouton to backup environment
sudo edit-chroot -b $chrootName
cbAcknowledge "Environment backup completed."
return 1
}
cbRestore() {
# Ensure needed globals:
[[ "$ROOT_DIR" ]] || cbAbort "ROOT_DIR not configured"
cbInitAction "Restore environment" || return 1
if (( ! $(cbCountBackups) )); then
cbAcknowledgeAbort "No backup files found to restore."
return 1
fi
cbListBackups
local backupFile=`cbAsk "Enter backup filename to restore: "`
while [[ "$backupFile" && ! -f "$ROOT_DIR/$backupFile" ]]; do
echo ""
cbError "There is no backup file named $backupFile"
cbListBackups
backupFile=`cbAsk "Enter backup filename to restore (or '' to abort): "`
done
if [[ ! "$backupFile" ]]; then
cbAcknowledgeAbort "Aborting environment restore."
return 1
fi
echo ""
if (( $(cbCountChroots) )); then
cbListChroots
fi
chrootName=`cbAsk "Enter name of environment create via restore: "`
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting environment restore."
return 1
fi
echo ""
if [[ "$(cbIsChroot "$chrootName")" -eq 1 ]]; then
if (( ! "$(cbConfirm "Are you sure you want to overwrite environment $chrootName with restore of $backupFile")" )); then
cbAcknowledgeAbort "Aborting environment restore."
return 1
fi
echo ""
# Call Crouton to restore into existing environment
sudo edit-chroot -rr $chrootName -f "$ROOT_DIR/$backupFile"
else
if (( ! "$(cbConfirm "Are you sure you want to create environment $chrootName via restore of $backupFile")" )); then
cbAcknowledgeAbort "Aborting environment restore."
return 1
fi
echo ""
# Call Crouton to restore new environment
sudo edit-chroot -r $chrootName -f "$ROOT_DIR/$backupFile"
fi
cbAcknowledge "Environment $chrootName restored."
return 1
}
cbDelete() {
cbInitAction "Delete environment" || return 1
if (( ! $(cbCountChroots) )); then
cbAcknowledgeAbort "No environment found to delete."
return 1
fi
cbListChroots
chrootName=`cbAsk "Enter name of environment to delete: "`
while [[ "$chrootName" && "$(cbIsChroot "$chrootName")" -eq 0 ]]; do
echo ""
cbError "There is no environment named $chrootName"
cbListChroots
chrootName=`cbAsk "Enter name of environment to delete (or '' to abort): "`
done
if [[ ! "$chrootName" ]]; then
cbAcknowledgeAbort "Aborting environment deletion."
return 1
fi
echo ""
if (( ! "$(cbConfirm "Are you sure you want to delete environment $chrootName")" )); then
cbAcknowledgeAbort "Aborting environment deletion."
return 1
fi
echo ""
# Finally - call Crouton to delete environment
sudo delete-chroot $chrootName
cbAcknowledge "Environment deleted."
return 1
}
cbPurge() {
# Ensure needed globals:
[[ "$CROUTON_BOOTSTRAP" ]] || cbAbort "CROUTON_BOOTSTRAP not configured"
[[ "$LINUX_RELEASE" ]] || cbAbort "LINUX_RELEASE not configured"
cbInitAction "Purge cached bootstrap" || return 1
if [[ ! -s "$CROUTON_BOOTSTRAP" ]]; then
cbAcknowledgeAbort "No cached bootstrap for $LINUX_RELEASE found (expected $CROUTON_BOOTSTRAP)"
return 1
fi
if (( ! "$(cbConfirm "Are you sure you want to purge the cached $LINUX_RELEASE bootstrap")" )); then
cbAcknowledgeAbort "Aborting bootstrap purge."
return 1
fi
sudo rm "$CROUTON_BOOTSTRAP"
local ret=$?
if [[ $ret -ne 0 || -s "$CROUTON_BOOTSTRAP" ]]; then
cbError "ERROR: Unable to purge $LINUX_RELEASE bootstrap"
cbAcknowledge
return 1
fi
cbAcknowledge "Bootstrap cache purged."
return 1
}
################################################################
# Menu Setup
################################################################
# Menu item labels
menuItems=(
"Create a new environment "
"Configure/manage environment "
"Enter an environment (terminal)"
"Start an environment (Gnome) "
"Start an environment (KDE) "
"Update an existing environment "
"Backup environment "
"Restore environment "
"Delete environment "
"Purge cached bootstrap "
"Update Crouton Buddy scripts "
"Quit "
)
# Menu item action functions
menuActions=(
cbCreate
cbConfigure
cbEnter
cbStartGnome
cbStartKde
cbUpdate
cbBackup
cbRestore
cbDelete
cbPurge
cbInstall
"return 0"
)
# Menu configuration overrides
menuTitle=" Crouton Administration"
menuWidth=60
################################################################
# Execute Script
################################################################
cbRun() {
menuInit
menuLoop
}
| true
|
39d0ae924c653453c5f2ffb1ae6bb1e7078d8935
|
Shell
|
mauricerashad/worksofcode
|
/rotate.sh
|
UTF-8
| 2,582
| 4.5
| 4
|
[] |
no_license
|
#!/bin/bash
# Rotate info and be stateful :)
# Useful when rotating people for notifications weekly, such as in a custom monitoring service / function / script
# Variables
script_name='rotate.sh'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
rotation=('user1' 'thisGuy' 'thatGirl' 'botThing')
count=$(echo ${#rotation[@]} - 1 | bc)
target=/etc/service/conf.d/target.conf
weekdays='monday tuesday wednesday thursday friday saturday sunday'
# The below var is updated each time this script is run so that it remains stateful
# Don't edit manually - just run the script untill the correct rotation is acheived!!!
current_rotation=botThing
# Skip updating target when always
always_rotation='botThing'
# Report rotation and exit
if [ $# -ge 1 ] && [[ $1 != '-F' ]]; then
printf '%s ' "Pool is ${rotation[@]}" && echo ''
echo Current rotation is $current_rotation
echo Argurment \'-F\' will force the run
[[ $always_rotation != '' ]] && echo Always rotation is $always_rotation
exit 0
fi
# Run only on Sunday
day=$(date +%a)
if ! [[ $day == Sun ]] && [[ $1 != '-F' ]]; then
echo Sorry - please run this ONCE on Sunday only
exit 1
fi
# Only allow root to run
if [[ $EUID -ne 0 ]]; then
printf "This can only be run as ${RED}root${NC}.\n" 1>&2
exit 1
fi
for next_rotated_idx in $(seq 0 $count); do
# If we are at the last element in the array, then the next rotated person is the first element
if [ $next_rotated_idx -eq $count ]; then
last_rotated_idx=$next_rotated_idx
next_rotated_idx=0
break
# If this element in the array is current, then the next element in the array is assigned
elif [[ $current_rotation == "$(echo ${rotation[$next_rotated_idx]})" ]]; then
last_rotated_idx=$next_rotated_idx
next_rotated_idx=$((next_rotated_idx + 1))
break
fi
done
# Do some work
for weekday in $weekdays; do
grep -q $(echo ${rotation[next_rotated_idx]}) <<<$always_rotation && break
echo Doing some work now - please wait
done
# Do some more work
for weekday in $weekdays; do
grep -q $(echo ${rotation[last_rotated_idx]}) <<<$always_rotation && break
echo Doing some work now - please wait
done
# Apply changes
service myservice reload
# Stateful update. Update this script to remain stateful
sed -i "/^current_rotation/c current_rotation=${rotation[$next_rotated_idx]}" $DIR/$script_name
# Report on current operations and exit
echo ${rotation[$last_rotated_idx]} has been rotated out
echo ${rotation[$next_rotated_idx]} has been rotated in
| true
|
448105e240bca7d9fdbd9e7595a4c132c7e7ce03
|
Shell
|
ehacke/coreos-multi-node-k8s-gce
|
/bootstrap_k8s_cluster.sh
|
UTF-8
| 3,375
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ssh-add ~/.ssh/google_compute_engine &>/dev/null
# SET ETCD VERSION TO BE USED !!!
ETCD_RELEASE=v2.0.5
# SET FLEET VERSION TO BE USED !!!
FLEET_RELEASE=v0.9.1
# SET KUBERNETES VERSION TO BE USED !!!
k8s_version=v0.13.2
## change Google Cloud settings as per your requirements
# GC settings
# SET YOUR PROJECT AND ZONE !!!
project=groupby-cloud-1701
zone=us-central1-a
# CoreOS RELEASE CHANNEL
channel=alpha
# CONTROL AND NODES MACHINE TYPES
control_machine_type=g1-small
node_machine_type=n1-standard-1
##
###
# control node name
control_name=k8s-control
# node name
node_name=k8s-node
###
echo "Removing previous config"
rm -rf cloud-config/ > /dev/null 2>&1
rm -rf fleet-units/ > /dev/null 2>&1
echo "Creating new config"
# prep new configs and units
cp -rf raw-cloud-config/ cloud-config/
cp -rf raw-fleet-units/ fleet-units/
# get the latest full image name
image=$(gcloud compute images list | grep -v grep | grep coreos-$channel | awk {'print $1'})
echo "Modifying configs"
# update cloud-configs with CoreOS release channel
sed -i "" -e 's/GROUP/'$channel'/g' ./cloud-config/*.yaml
# update fleet units with k8s version
sed -i "" -e 's/k8s_version/'$k8s_version'/g' ./fleet-units/*.service
#
echo "Creating control machine"
# CONTROL
# create control node
gcloud compute instances create $control_name \
--project=$project --image=$image --image-project=coreos-cloud \
--boot-disk-type=pd-ssd --boot-disk-size=10 --zone=$zone \
--machine-type=$control_machine_type --metadata-from-file user-data=./cloud-config/control.yaml \
--can-ip-forward --scopes compute-rw --tags k8s-cluster --network kubernachos
# get control node internal IP
control_node_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep $control_name | awk {'print $4'});
# NODES
# update node's cloud-config with control node's internal IP
sed -i "" -e 's/CONTROL-NODE-INTERNAL-IP/'$control_node_ip'/g' ./cloud-config/node.yaml
# create nodes
# by defaul it creates two nodes, e.g. to add a third one, add after '$node_name-02' $node_name-03 and so on
gcloud compute instances create $node_name-01 $node_name-02 \
--project=$project --image=$image --image-project=coreos-cloud \
--boot-disk-type=pd-ssd --boot-disk-size=20 --zone=$zone \
--machine-type=$node_machine_type --metadata-from-file user-data=./cloud-config/node.yaml \
--can-ip-forward --tags k8s-cluster
# FLEET
# update fleet units with control node's internal IP
sed -i "" -e 's/CONTROL-NODE-INTERNAL-IP/'$control_node_ip'/g' ./fleet-units/*.service
# download etcdctl, fleetctl and k8s binaries for OS X or Linux
if [[ "$OSTYPE" == "linux-gnu" ]]; then
./get_k8s_fleet_etcd_linux.sh
else
./get_k8s_fleet_etcd_osx.sh
fi
# set binaries folder, fleet tunnel to control's external IP
export PATH=${HOME}/k8s-bin:$PATH
control_external_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep $control_name | awk {'print $5'});
export FLEETCTL_TUNNEL="$control_external_ip"
export FLEETCTL_STRICT_HOST_KEY_CHECKING=false
# deploy k8s fleet units
cd ./fleet-units
echo "Installing k8s fleet units !!!"
fleetctl start kube-kubelet.service
fleetctl start kube-proxy.service
fleetctl start kube-apiserver.service
fleetctl start kube-controller-manager.service
fleetctl start kube-scheduler.service
fleetctl start kube-register.service
echo " "
fleetctl list-units
| true
|
3c0d68ad2dc11402a39520b86e332bd85f4aae08
|
Shell
|
0x4447-cloud-admin/0x4447_scripts
|
/Curl/Follow Each Redirect/script.sh
|
UTF-8
| 1,850
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# 1. Request the name of site
#
echo "Enter the site name: (Press enter for https://demo.0x4447.com) ";
#
# 2. Take input
#
read site;
#
# 3. If no site entered, use default
#
if [ -z $site ];
then
site="https://demo.0x4447.com";
fi;
#
# 4. Empty Space
#
echo ""
#
# 5. Array for the redirect URLs (starting with default)
#
newsite+=($site);
#
# 6. Start counter
#
counter=0
#
# 7. Create tabs variable
#
tabs=""
#
# 8. Loop through redirects
#
while true;
do
#
# 9. Set nextLoc = to the redirected URL based on location line
#
nextLoc=$(curl -I ${newsite[counter]} 2>/dev/null | tr -d $'\r' | grep -i ^"location:" | awk '{print $2}');
#
# 10. If there is redirect, add URL to array
#
if [ ! -z $nextLoc ];
then
newsite+=($nextLoc);
nextLoc="";
counter=$(( $counter + 1 ))
else
#
# 11. Stop While Loop if no more redirects
#
break;
fi;
done;
#
# 12. Separator
#
echo "=============================================================================";
#
# 13. Loop through array and print hierarchy with URLs
#
for i in $(seq 0 $(expr ${#newsite[@]} - 1));
do
#
# 14. Add -- for hierarchy
#
for j in $(seq 0 $i);
do
tabs="${tabs}--"
done;
#
# 15. Print "--" and URL
#
echo "${tabs}${newsite[i]}"
done;
#
# 16. Two empty lines
#
echo ""
echo ""
#
# 17. Print URL and header info of the URL for each redirect
#
for i in $(seq 0 $(expr ${#newsite[@]} - 1));
do
echo "=============================================================================";
#
# 18. Print url of redirect
#
echo ${newsite[i]}
echo "=============================================================================";
#
# 19. Print head info of redirect from URL
#
curl -I "${newsite[i]}"
echo ""
done;
#
# 20. Separator
#
echo "=============================================================================";
| true
|
5b6089646b1387f56a00b58b72419acaf2669b3c
|
Shell
|
XinYao1994/Shell
|
/remote/yes.sh
|
UTF-8
| 300
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/expect
set no [lindex $argv 0]
spawn ssh student@student${no}
expect "*password:*"
send "student\n"
expect "~$*"
send "ssh coc-server@10.42.0.1 \n"
expect "*(yes/no)?*"
send "yes\n"
expect "*password:*"
send "coc-server\n"
expect "~$*"
send "exit\n"
expect "~$*"
send "exit\n"
interact
| true
|
cb6dbec6cdd2cf65d0694bf126f80f9d80042a30
|
Shell
|
ford153focus/apt-beget
|
/installers/joomlatools.sh
|
UTF-8
| 497
| 2.96875
| 3
|
[] |
no_license
|
:<<=
==analog of wp-cli for Joomla
==https://github.com/joomlatools/joomlatools-console
=
function install_joomlatools {
echo_y "Installing joomlatools..."
#prepare folders
prepare_folders
#depencies
install_composer
#download
composer global require joomlatools/console
#install
printf "\n\nPATH=$PATH:~/.composer/vendor/bin\n" >> $HOME/.bashrc
source $HOME/.bashrc
echo_y 'Now execute `source $HOME/.bashrc`'
echo_g "joomlatools installed"
}
| true
|
85abc9fbc45670178818bfaed7c627a9d6e787bb
|
Shell
|
PiresMA/bash_with_python_R_c
|
/bash_and_R_example_01/runner-r.bh
|
UTF-8
| 400
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
# FIRST PART
printf '\n running first-code.R \n'
pids=""
for index in {1..3}
do
Rscript first-code.R $index &
sleep 1
pids="$pids $!"
done
echo $pids # just to see the array of pids
wait $pids # wait for all pids-jobs are completed.
# SECOND PART
printf '\n running second-code.R \n'
for index in {1..3}
do
Rscript second-code.R $index &
sleep 1
done
| true
|
17f90f54e011b1a1989dc6676571acd66990078d
|
Shell
|
lasigeBioTM/MESINESP2
|
/preprocessing/prepare_input.sh
|
UTF-8
| 2,102
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#Executes the code to process the data given by MESINESP and stores it in the format required for X-Transformer.
#This script requires the name of the dataset to preprocess (scientific_literature, clinical_trials or patents)
#a threshold determining the fraction of entities to include in the processed dataset (1.0, 0.75, 0.5, 0.25).
############## Args ##############
DATASET=$1
THRESHOLD=$2 # 100%, 75%, 50%, and 25% top entities to consider
############## pre-processing ##############
out_dir="./processed_data/${DATASET}_${THRESHOLD}/"
mkdir -p $out_dir
python pre_process_dataset.py $DATASET $THRESHOLD $out_dir
############## Moving the created files ##############
#Creates the directory for the BioASQ in the X-Transformer datasets folder
mkdir -p ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/
mkdir -p ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/mapping
#Then, copies the files required by X-Transformer to the directory
cp ${out_dir}label_map.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/mapping/label_map.txt
cp ${out_dir}label_vocab.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/label_vocab.txt
cp ${out_dir}train.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/train.txt
cp ${out_dir}train_raw_labels.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/train_raw_labels.txt
cp ${out_dir}train_raw_texts.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/train_raw_texts.txt
cp ${out_dir}dev.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/dev.txt
cp ${out_dir}dev_raw_labels.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/dev_raw_labels.txt
cp ${out_dir}dev_raw_texts.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/dev_raw_texts.txt
cp ${out_dir}test.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/test.txt
cp ${out_dir}test_raw_labels.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/test_raw_labels.txt
cp ${out_dir}test_raw_texts.txt ../X-Transformer/datasets/${DATASET}_${THRESHOLD}/test_raw_texts.txt
echo "Input files moved to dir ../X-Transformer/datasets/${DATASET}_${THRESHOLD}"
| true
|
ec962f574677c32c9946526e7cc318146eb0469b
|
Shell
|
dilawar/Scripts
|
/bfg
|
UTF-8
| 298
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
VERSION=1.14.0
JARFILE="$HOME/Scripts/bfg-$VERSION.jar"
URL=https://repo1.maven.org/maven2/com/madgag/bfg/$VERSION/bfg-$VERSION.jar
if [ ! -f "$JARFILE" ]; then
echo "Downloading jar file"
wget $URL -O "$JARFILE"
fi
set -x
java -jar "$HOME/Scripts/bfg-$VERSION.jar" "$@"
| true
|
0eed19644a11ebaf6b0267abdf15b7d0c4eb645c
|
Shell
|
katarina-gres/fit-projects
|
/PRL/3/test.sh
|
UTF-8
| 643
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# if [ ! -f "mat1" ]; then
# echo "Soubor mat1 neexistuje."
# exit 1
# fi
#
# if [ ! -f "mat2" ]; then
# echo "Soubor mat2 neexistuje."
# exit 1
# fi
mat1=$(head -n1 mat1)
mat2=$(head -n1 mat2)
# if [[ ! $mat1 =~ ^[[:space:]]*[0-9]+[[:space:]]*$ ]]; then
# echo "Soubor mat1 musi mit na prvnim radku rozmer."
# exit 1
# fi
#
# if [[ ! $mat2 =~ ^[[:space:]]*[0-9]+[[:space:]]*$ ]]; then
# echo "Soubor mat2 musi mit na prvnim radku rozmer."
# exit 1
# fi
cpus=$((mat1*mat2))
mpic++ --prefix /usr/local/share/OpenMPI -o mm mm.cpp -std=c++0x
mpirun --prefix /usr/local/share/OpenMPI -np $cpus mm
rm -f mm
| true
|
8bce03d87fa8ec6b9c933239877bf6685c02f0e5
|
Shell
|
manang/android-sdk
|
/scripts/install_ffmpeg.sh
|
UTF-8
| 2,121
| 3.21875
| 3
|
[] |
no_license
|
#############################################################################
# Copyright (c) 2017 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#!/bin/bash
set -ex
ABI=$1
INSTALLATION_DIR=$2
OS=`echo $OS | tr '[:upper:]' '[:lower:]'`
export BASE_DIR=`pwd`
mkdir -p ${INSTALLATION_DIR}
mkdir -p ${INSTALLATION_DIR}/include
if [ ! -d ${INSTALLATION_DIR}/include/libavcodec ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libavfilter ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libavresample ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libswresample ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libavdevice ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libavformat ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libavutil ] \
|| [ ! -d ${INSTALLATION_DIR}/include/libswscale ]; then
cd src
if [ ! -d ffmpeg ]; then
if [ ! -f ffmpeg-4.2-android-clang.tar.xz ]; then
wget https://iweb.dl.sourceforge.net/project/avbuild/android/ffmpeg-4.2-android-clang.tar.xz
fi
tar xf ffmpeg-4.2-android-clang.tar.xz
mv ffmpeg-4.2-android-clang ffmpeg
rm -rf ffmpeg-4.2-android-clang.tar.xz
fi
cp -r ffmpeg/include/* ${INSTALLATION_DIR}/include/
if [ "${ABI}" = "arm64" ]; then
cp ffmpeg/lib/arm64-v8a/lib* ${INSTALLATION_DIR}/lib/
elif [ "${ABI}" = "x86" ]; then
cp ffmpeg/lib/x86/lib* ${INSTALLATION_DIR}/lib/
fi
touch ${VERSIONS_FILE}
${SED} -i "/${ABI}_ffmpeg/d" ${VERSIONS_FILE}
echo ${ABI}_ffmpeg=4.2 >> ${VERSIONS_FILE}
cd ..
fi
| true
|
14095100dc3a5de819dac7359ba575ef7087cc7f
|
Shell
|
PawarKishori/Alignment1
|
/working_debug/statistic-shell.sh
|
UTF-8
| 1,117
| 2.875
| 3
|
[] |
no_license
|
str1="sent_no\th_al_%\te_al_%\te_leftover"
str2="sent_no\th_leftover_ids\te_leftover_ids"
str3="sent_no\th_aligned\te_aligned"
echo -e $str1 > $HOME_anu_tmp/tmp/$1_tmp/alignment_percent_info.txt
echo -e $str2 > $HOME_anu_tmp/tmp/$1_tmp/alignment_leftover_info.txt
echo -e $str3 > $HOME_anu_tmp/tmp/$1_tmp/alignment_aligned_info.txt
#n=`wc -l $1 | awk '{print $1}'`
i=1
#n=125 #`ls -d */ | wc -l`
#n=86
END=`wc -l $1 | awk '{print $1}'`
#END=67
#echo $END
END=`expr $END + 1`
current=`pwd`
#echo "$current"
while [ $i -le $END ]
do
sentence_dir='2.'$i
echo $sentence_dir
tmp_path=$HOME_anu_tmp/tmp/$1_tmp
#python working_debug/eng_lwg.py ai2E 2.1
python $HOME_alignment/working_debug/statistic.py $tmp_path "2."$i
i=`expr $i + 1`
done
(head -1 $HOME_anu_tmp/tmp/$1_tmp/alignment_percent_info.txt && tail -n+2 $HOME_anu_tmp/tmp/$1_tmp/alignment_percent_info.txt | sort -k 2gr) > $HOME_anu_tmp/tmp/$1_tmp/alignment_percent_info_sorted.txt
rm $HOME_anu_tmp/tmp/$1_tmp/alignment_percent_info.txt
#sed "1i\ $str" $HOME_anu_tmp/tmp/$1/alignment_percent_info_sorted.txt
| true
|
59cad249f38f9880e995ca6e7c0ca43062c282bc
|
Shell
|
MODU-FTNC/google-language-resources
|
/third_party/festvox/setup.sh
|
UTF-8
| 10,023
| 2.9375
| 3
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT",
"BSD-4-Clause",
"CC-BY-SA-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
#!/bin/sh
###########################################################################
## ##
## Carnegie Mellon University ##
## Copyright (c) 2014 ##
## All Rights Reserved. ##
## ##
## Permission is hereby granted, free of charge, to use and distribute ##
## this software and its documentation without restriction, including ##
## without limitation the rights to use, copy, modify, merge, publish, ##
## distribute, sublicense, and/or sell copies of this work, and to ##
## permit persons to whom this work is furnished to do so, subject to ##
## the following conditions: ##
## 1. The code must retain the above copyright notice, this list of ##
## conditions and the following disclaimer. ##
## 2. Any modifications must be clearly marked as such. ##
## 3. Original authors' names are not deleted. ##
## 4. The authors' names are not used to endorse or promote products ##
## derived from this software without specific prior written ##
## permission. ##
## ##
## CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK ##
## DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ##
## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT ##
## SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE ##
## FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ##
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ##
## AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ##
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF ##
## THIS SOFTWARE. ##
## ##
###########################################################################
## ##
## Example script used to test the Festival 2.4/Flite 2.0 release ##
## ##
## Downloads code, compiles it, runs the voices, and builds a voice ##
## ##
###########################################################################
# Modification - To setup tools at a given directory
cd $FESTIVAL_SUIT_PATH
echo "Setting up at festival at ${FESTIVAL_SUIT_PATH:?Set env variable FESTIVAL_SUIT_PATH}"
# Download the code and voices, if not already downloaded
if [ ! -d packed ]
then
mkdir packed
cd packed
wget http://festvox.org/packed/festival/2.4/festival-2.4-release.tar.gz
wget http://festvox.org/packed/festival/2.4/speech_tools-2.4-release.tar.gz
wget http://festvox.org/packed/festival/2.4/festlex_CMU.tar.gz
wget http://festvox.org/packed/festival/2.4/festlex_OALD.tar.gz
wget http://festvox.org/packed/festival/2.4/festlex_POSLEX.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_ahw_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_aup_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_awb_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_axb_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_bdl_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_clb_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_fem_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_gka_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_jmk_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_ksp_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_rms_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_rxr_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_cmu_us_slt_cg.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_kallpc16k.tar.gz
wget http://festvox.org/packed/festival/2.4/voices/festvox_rablpc16k.tar.gz
wget http://festvox.org/festvox-2.7/festvox-2.7.0-release.tar.gz
# Following sptk link is not available.
# wget http://tts.speech.cs.cmu.edu/awb/sptk/SPTK-3.6.tar.gz
wget https://download.sourceforge.net/project/sp-tk/SPTK/SPTK-3.6/SPTK-3.6.tar.gz
wget http://festvox.org/flite/packed/flite-2.0/flite-2.0.0-release.tar.bz2
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_ahw.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_aup.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_awb.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_axb.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_bdl.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_clb.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_fem.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_gka.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_jmk.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_ksp.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_rms.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_rxr.flitevox
wget http://festvox.org/flite/packed/flite-2.0/voices/cmu_us_slt.flitevox
cd ..
fi
# Unpack the code and voices
mkdir build
cd build
for i in ../packed/*.gz
do
tar zxvf $i
done
for i in ../packed/*.bz2
do
tar jxvf $i
done
mv flite-2.0.0-release flite
# Set up the environment variables for voice building
export ESTDIR=`pwd`/speech_tools
export FESTVOXDIR=`pwd`/festvox
export FLITEDIR=`pwd`/flite
export SPTKDIR=`pwd`/SPTK
mkdir SPTK
export EXDIR=`pwd`/examples
mkdir examples
echo "A whole joy was reaping, but they've gone south, you should fetch azure mike." >allphones.txt
patch -p0 <festvox/src/clustergen/SPTK-3.6.patch
cd SPTK-3.6
./configure --prefix=$SPTKDIR
make
make install
cd ..
cd speech_tools
./configure
make
make test
cd ..
cd festival
./configure
make
make test
./bin/text2wave -eval '(voice_cmu_us_ahw_cg)' -o $EXDIR/festival_ahw_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_aup_cg)' -o $EXDIR/festival_aup_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_awb_cg)' -o $EXDIR/festival_awb_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_axb_cg)' -o $EXDIR/festival_axb_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_bdl_cg)' -o $EXDIR/festival_bdl_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_clb_cg)' -o $EXDIR/festival_clb_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_fem_cg)' -o $EXDIR/festival_fem_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_gka_cg)' -o $EXDIR/festival_gka_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_jmk_cg)' -o $EXDIR/festival_jmk_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_ksp_cg)' -o $EXDIR/festival_ksp_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_rms_cg)' -o $EXDIR/festival_rms_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_rxr_cg)' -o $EXDIR/festival_rxr_hw.wav ../allphones.txt
./bin/text2wave -eval '(voice_cmu_us_slt_cg)' -o $EXDIR/festival_slt_hw.wav ../allphones.txt
cd ..
cd flite
./configure
make
mkdir voices
cd voices
ln ../../../packed/*.flitevox .
cd ..
# Generate some waveform files with the different voices
./bin/flite -voice voices/cmu_us_ahw.flitevox -f ../allphones.txt $EXDIR/flite_ahw_hw.wav
./bin/flite -voice voices/cmu_us_aup.flitevox -f ../allphones.txt $EXDIR/flite_aup_hw.wav
./bin/flite -voice voices/cmu_us_awb.flitevox -f ../allphones.txt $EXDIR/flite_awb_hw.wav
./bin/flite -voice voices/cmu_us_axb.flitevox -f ../allphones.txt $EXDIR/flite_axb_hw.wav
./bin/flite -voice voices/cmu_us_bdl.flitevox -f ../allphones.txt $EXDIR/flite_bdl_hw.wav
./bin/flite -voice voices/cmu_us_clb.flitevox -f ../allphones.txt $EXDIR/flite_clb_hw.wav
./bin/flite -voice voices/cmu_us_fem.flitevox -f ../allphones.txt $EXDIR/flite_fem_hw.wav
./bin/flite -voice voices/cmu_us_gka.flitevox -f ../allphones.txt $EXDIR/flite_gka_hw.wav
./bin/flite -voice voices/cmu_us_jmk.flitevox -f ../allphones.txt $EXDIR/flite_jmk_hw.wav
./bin/flite -voice voices/cmu_us_ksp.flitevox -f ../allphones.txt $EXDIR/flite_ksp_hw.wav
./bin/flite -voice voices/cmu_us_rms.flitevox -f ../allphones.txt $EXDIR/flite_rms_hw.wav
./bin/flite -voice voices/cmu_us_rxr.flitevox -f ../allphones.txt $EXDIR/flite_rxr_hw.wav
./bin/flite -voice voices/cmu_us_slt.flitevox -f ../allphones.txt $EXDIR/flite_slt_hw.wav
./bin/flite -voice kal -f ../allphones.txt $EXDIR/flite_kal_hw.wav
cd ..
# Voice building example (100 rms utts)
# Build festvox voice building tools
cd festvox
./configure
make
cd ..
mkdir cmu_us_rms100
cd cmu_us_rms100
$FESTVOXDIR/src/clustergen/setup_cg cmu us rms100
wget http://tts.speech.cs.cmu.edu/awb/short_arctic/rms100.tar.bz2
tar jxvf rms100.tar.bz2
# Build the voice (may take some time)
echo "Run ./bin/build_cg_rfs_voice to build the voice"
cd ..
# Modification - To notice a user to export environment variables
echo "Please add the following lines to your environment setting (.bashrc, .zshrc, etc.)"
echo "export ESTDIR=`pwd`/speech_tools"
echo "export FESTVOXDIR=`pwd`/festvox"
echo "export FESTIVALDIR=`pwd`/festival"
echo "export FLITEDIR=`pwd`/flite"
echo "export SPTKDIR=`pwd`/SPTK"
echo "export EXDIR=`pwd`/examples"
| true
|
b1a10bb0157e368c4666e01e8944c1afdb75ccaa
|
Shell
|
bkolli/nonrtric
|
/test/auto-test/FTC1800.sh
|
UTF-8
| 15,889
| 2.578125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# ============LICENSE_START===============================================
# Copyright (C) 2020 Nordix Foundation. All rights reserved.
# ========================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=================================================
#
TC_ONELINE_DESCR="ECS Create 10000 jobs and restart, test job persistency"
#App names to include in the test when running docker, space separated list
DOCKER_INCLUDED_IMAGES="ECS PRODSTUB CR CP"
#App names to include in the test when running kubernetes, space separated list
KUBE_INCLUDED_IMAGES="ECS PRODSTUB CP CR"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
#Supported test environment profiles
SUPPORTED_PROFILES="ONAP-HONOLULU ORAN-CHERRY ORAN-DAWN"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
. ../common/testcase_common.sh $@
. ../common/ecs_api_functions.sh
. ../common/prodstub_api_functions.sh
. ../common/control_panel_api_functions.sh
. ../common/controller_api_functions.sh
. ../common/cr_api_functions.sh
#### TEST BEGIN ####
FLAT_A1_EI="1"
clean_environment
use_ecs_rest_http
use_prod_stub_http
start_ecs NOPROXY $SIM_GROUP/$ECS_COMPOSE_DIR/application.yaml
start_prod_stub
set_ecs_trace
start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/application.properties
start_cr
CB_JOB="$PROD_STUB_SERVICE_PATH$PROD_STUB_JOB_CALLBACK"
CB_SV="$PROD_STUB_SERVICE_PATH$PROD_STUB_SUPERVISION_CALLBACK"
TARGET="http://localhost:80/target" # Dummy target
NUM_JOBS=10000
# Setup prodstub sim to accept calls for producers, types and jobs
prodstub_arm_producer 200 prod-a
prodstub_arm_producer 200 prod-b
prodstub_arm_producer 200 prod-c
prodstub_arm_producer 200 prod-d
prodstub_arm_type 200 prod-a type1
prodstub_arm_type 200 prod-b type1
prodstub_arm_type 200 prod-b type2
prodstub_arm_type 200 prod-c type1
prodstub_arm_type 200 prod-c type2
prodstub_arm_type 200 prod-c type3
prodstub_arm_type 200 prod-d type4
prodstub_arm_type 200 prod-d type5
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
prodstub_arm_job_create 200 prod-a job$i
prodstub_arm_job_create 200 prod-b job$i
prodstub_arm_job_create 200 prod-c job$i
fi
if [ $(($i%5)) -eq 1 ]; then
prodstub_arm_job_create 200 prod-b job$i
prodstub_arm_job_create 200 prod-c job$i
fi
if [ $(($i%5)) -eq 2 ]; then
prodstub_arm_job_create 200 prod-c job$i
fi
if [ $(($i%5)) -eq 3 ]; then
prodstub_arm_job_create 200 prod-d job$i
fi
if [ $(($i%5)) -eq 4 ]; then
prodstub_arm_job_create 200 prod-d job$i
fi
done
if [ $ECS_VERSION == "V1-1" ]; then
ecs_api_edp_put_producer 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
ecs_api_edp_put_producer 201 prod-b $CB_JOB/prod-b $CB_SV/prod-b type1 testdata/ecs/ei-type-1.json type2 testdata/ecs/ei-type-2.json
ecs_api_edp_put_producer 201 prod-c $CB_JOB/prod-c $CB_SV/prod-c type1 testdata/ecs/ei-type-1.json type2 testdata/ecs/ei-type-2.json type3 testdata/ecs/ei-type-3.json
ecs_api_edp_put_producer 201 prod-d $CB_JOB/prod-d $CB_SV/prod-d type4 testdata/ecs/ei-type-4.json type5 testdata/ecs/ei-type-5.json
else
ecs_api_edp_put_type_2 201 type1 testdata/ecs/ei-type-1.json
ecs_api_edp_put_type_2 201 type2 testdata/ecs/ei-type-2.json
ecs_api_edp_put_type_2 201 type3 testdata/ecs/ei-type-3.json
ecs_api_edp_put_type_2 201 type4 testdata/ecs/ei-type-4.json
ecs_api_edp_put_type_2 201 type5 testdata/ecs/ei-type-5.json
ecs_api_edp_put_producer_2 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
ecs_api_edp_put_producer_2 201 prod-b $CB_JOB/prod-b $CB_SV/prod-b type1 type2
ecs_api_edp_put_producer_2 201 prod-c $CB_JOB/prod-c $CB_SV/prod-c type1 type2 type3
ecs_api_edp_put_producer_2 201 prod-d $CB_JOB/prod-d $CB_SV/prod-d type4 type5
fi
ecs_equal json:ei-producer/v1/eiproducers 4
ecs_api_edp_get_producer_status 200 prod-a ENABLED
ecs_api_edp_get_producer_status 200 prod-b ENABLED
ecs_api_edp_get_producer_status 200 prod-c ENABLED
ecs_api_edp_get_producer_status 200 prod-d ENABLED
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
ecs_api_a1_put_job 201 job$i type1 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type1 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 1 ]; then
ecs_api_a1_put_job 201 job$i type2 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type2 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 2 ]; then
ecs_api_a1_put_job 201 job$i type3 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type3 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 3 ]; then
ecs_api_a1_put_job 201 job$i type4 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type4 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 4 ]; then
ecs_api_a1_put_job 201 job$i type5 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type5 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
done
if [ -z "$FLAT_A1_EI" ]; then
ecs_equal json:A1-EI/v1/eitypes/type1/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type2/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type3/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type4/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type5/eijobs $(($NUM_JOBS/5))
else
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
fi
restart_ecs
set_ecs_trace
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
prodstub_delete_jobdata 204 prod-a job$i
prodstub_delete_jobdata 204 prod-b job$i
prodstub_delete_jobdata 204 prod-c job$i
fi
if [ $(($i%5)) -eq 1 ]; then
prodstub_delete_jobdata 204 prod-b job$i
prodstub_delete_jobdata 204 prod-c job$i
fi
if [ $(($i%5)) -eq 2 ]; then
prodstub_delete_jobdata 204 prod-c job$i
fi
if [ $(($i%5)) -eq 3 ]; then
prodstub_delete_jobdata 204 prod-d job$i
fi
if [ $(($i%5)) -eq 4 ]; then
prodstub_delete_jobdata 204 prod-d job$i
fi
done
ecs_api_edp_get_producer_status 404 prod-a
ecs_api_edp_get_producer_status 404 prod-b
ecs_api_edp_get_producer_status 404 prod-c
ecs_api_edp_get_producer_status 404 prod-d
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type1 job$i DISABLED
else
ecs_api_a1_get_job_status 200 job$i DISABLED 120
fi
fi
if [ $(($i%5)) -eq 1 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type2 job$i DISABLED
else
ecs_api_a1_get_job_status 200 job$i DISABLED 120
fi
fi
if [ $(($i%5)) -eq 2 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type3 job$i DISABLED
else
ecs_api_a1_get_job_status 200 job$i DISABLED 120
fi
fi
if [ $(($i%5)) -eq 3 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type4 job$i DISABLED
else
ecs_api_a1_get_job_status 200 job$i DISABLED 120
fi
fi
if [ $(($i%5)) -eq 4 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type5 job$i DISABLED
else
ecs_api_a1_get_job_status 200 job$i DISABLED 120
fi
fi
done
if [ $ECS_VERSION == "V1-1" ]; then
ecs_api_edp_put_producer 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
ecs_api_edp_put_producer 201 prod-b $CB_JOB/prod-b $CB_SV/prod-b type1 testdata/ecs/ei-type-1.json type2 testdata/ecs/ei-type-2.json
ecs_api_edp_put_producer 201 prod-c $CB_JOB/prod-c $CB_SV/prod-c type1 testdata/ecs/ei-type-1.json type2 testdata/ecs/ei-type-2.json type3 testdata/ecs/ei-type-3.json
ecs_api_edp_put_producer 201 prod-d $CB_JOB/prod-d $CB_SV/prod-d type4 testdata/ecs/ei-type-4.json type5 testdata/ecs/ei-type-5.json
else
ecs_api_edp_put_producer_2 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
ecs_api_edp_put_producer_2 201 prod-b $CB_JOB/prod-b $CB_SV/prod-b type1 type2
ecs_api_edp_put_producer_2 201 prod-c $CB_JOB/prod-c $CB_SV/prod-c type1 type2 type3
ecs_api_edp_put_producer_2 201 prod-d $CB_JOB/prod-d $CB_SV/prod-d type4 type5
fi
ecs_equal json:ei-producer/v1/eiproducers 4
ecs_api_edp_get_producer_status 200 prod-a ENABLED
ecs_api_edp_get_producer_status 200 prod-b ENABLED
ecs_api_edp_get_producer_status 200 prod-c ENABLED
ecs_api_edp_get_producer_status 200 prod-d ENABLED
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type1 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 1 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type2 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 2 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type3 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 3 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type4 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
if [ $(($i%5)) -eq 4 ]; then
if [ -z "$FLAT_A1_EI" ]; then
ecs_api_a1_get_job_status 200 type5 job$i ENABLED
else
ecs_api_a1_get_job_status 200 job$i ENABLED 120
fi
fi
done
if [ -z "$FLAT_A1_EI" ]; then
ecs_equal json:A1-EI/v1/eitypes/type1/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type2/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type3/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type4/eijobs $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eitypes/type5/eijobs $(($NUM_JOBS/5))
else
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
fi
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
if [ $ECS_VERSION == "V1-1" ]; then
prodstub_check_jobdata 200 prod-a job$i type1 $TARGET ric1 testdata/ecs/job-template.json
prodstub_check_jobdata 200 prod-b job$i type1 $TARGET ric1 testdata/ecs/job-template.json
prodstub_check_jobdata 200 prod-c job$i type1 $TARGET ric1 testdata/ecs/job-template.json
else
prodstub_check_jobdata_2 200 prod-a job$i type1 $TARGET ric1 testdata/ecs/job-template.json
prodstub_check_jobdata_2 200 prod-b job$i type1 $TARGET ric1 testdata/ecs/job-template.json
prodstub_check_jobdata_2 200 prod-c job$i type1 $TARGET ric1 testdata/ecs/job-template.json
fi
fi
if [ $(($i%5)) -eq 1 ]; then
if [ $ECS_VERSION == "V1-1" ]; then
prodstub_check_jobdata 200 prod-b job$i type2 $TARGET ric1 testdata/ecs/job-template.json
prodstub_check_jobdata 200 prod-c job$i type2 $TARGET ric1 testdata/ecs/job-template.json
else
prodstub_check_jobdata_2 200 prod-b job$i type2 $TARGET ric1 testdata/ecs/job-template.json
prodstub_check_jobdata_2 200 prod-c job$i type2 $TARGET ric1 testdata/ecs/job-template.json
fi
fi
if [ $(($i%5)) -eq 2 ]; then
if [ $ECS_VERSION == "V1-1" ]; then
prodstub_check_jobdata 200 prod-c job$i type3 $TARGET ric1 testdata/ecs/job-template.json
else
prodstub_check_jobdata_2 200 prod-c job$i type3 $TARGET ric1 testdata/ecs/job-template.json
fi
fi
if [ $(($i%5)) -eq 3 ]; then
if [ $ECS_VERSION == "V1-1" ]; then
prodstub_check_jobdata 200 prod-d job$i type4 $TARGET ric1 testdata/ecs/job-template.json
else
prodstub_check_jobdata_2 200 prod-d job$i type4 $TARGET ric1 testdata/ecs/job-template.json
fi
fi
if [ $(($i%5)) -eq 4 ]; then
if [ $ECS_VERSION == "V1-1" ]; then
prodstub_check_jobdata 200 prod-d job$i type5 $TARGET ric1 testdata/ecs/job-template.json
else
prodstub_check_jobdata_2 200 prod-d job$i type5 $TARGET ric1 testdata/ecs/job-template.json
fi
fi
done
for ((i=1; i<=$NUM_JOBS; i++))
do
if [ $(($i%5)) -eq 0 ]; then
ecs_api_a1_delete_job 204 job$i
fi
if [ $(($i%5)) -eq 1 ]; then
ecs_api_a1_delete_job 204 job$i
fi
if [ $(($i%5)) -eq 2 ]; then
ecs_api_a1_delete_job 204 job$i
fi
if [ $(($i%5)) -eq 3 ]; then
ecs_api_a1_delete_job 204 job$i
fi
if [ $(($i%5)) -eq 4 ]; then
ecs_api_a1_delete_job 204 job$i
fi
done
ecs_equal json:ei-producer/v1/eiproducers 4
ecs_api_edp_get_producer_status 200 prod-a ENABLED
ecs_api_edp_get_producer_status 200 prod-b ENABLED
ecs_api_edp_get_producer_status 200 prod-c ENABLED
ecs_api_edp_get_producer_status 200 prod-d ENABLED
if [ -z "$FLAT_A1_EI" ]; then
ecs_equal json:A1-EI/v1/eitypes/type1/eijobs 0
ecs_equal json:A1-EI/v1/eitypes/type2/eijobs 0
ecs_equal json:A1-EI/v1/eitypes/type3/eijobs 0
ecs_equal json:A1-EI/v1/eitypes/type4/eijobs 0
ecs_equal json:A1-EI/v1/eitypes/type5/eijobs 0
else
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 0
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 0
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 0
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 0
ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 0
fi
check_ecs_logs
store_logs END
#### TEST COMPLETE ####
print_result
auto_clean_environment
| true
|
c42bded65eec6940945bfdc7541105f86296c845
|
Shell
|
silicon-axon/dockerfiles
|
/dockerize.sh
|
UTF-8
| 2,519
| 4.53125
| 5
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# dockerize script with custom tags and packages.
# Each package is assumed to be a subfolder to the path where this
# dockerize script is executed. Then inside each package/ subfolder
# contains the Dockerfile and a single file "TAG" that has a string
# indicating the current tag to be used for the docker image.
#
# Albert Tedja <albert@siliconaxon.com>
# Color stuff
yellow='\e[1;33m'
nc='\e[0m'
color=$yellow
ctrl_c () {
echo
info "CTRL-C pressed. TERMINATED BY USER"
exit $?
}
trap ctrl_c INT
info () {
echo -e "${color}[dockerize] ${1}${nc}"
}
# Grab flags
APPLICATION_NAME=${1%/}
if [[ ! -d $APPLICATION_NAME ]] ; then
info "Package $APPLICATION_NAME does not exist"
exit 1
fi
shift
while getopts bpr opts; do
case ${opts} in
b) BUILD=true ;;
p) PUSH=true ;;
r) REBUILD=true ;;
esac
done
# Show usage if no options are not specified
if [[ -z $APPLICATION_NAME ]] && [[ -z $BUILD ]] && [[ -z $REBUILD ]] && [[ -z $PUSH ]] ; then
echo -e "Simplify building, pushing, and running docker images."
echo -e "Usage:"
echo -e "\tdockerize <package> [OPTIONS]\n"
echo -e "Options:"
echo -e "\tpackage\tName of the package."
echo -e "\t-b\tBuild image."
echo -e "\t-p\tPush image to the registry."
echo -e "\t-r\tRebuild by removing existing image with the same name and tag."
echo
echo -e "Example:"
echo -e "\tdockerize -bp"
exit 1
fi
# Docker user. Replace
DOCKER_USER="siliconaxon"
IMAGE_TAG=$(<$APPLICATION_NAME/TAG)
IMAGE_ID="${DOCKER_USER}/${APPLICATION_NAME}"
IMAGE_ID_TAG="${IMAGE_ID}:${IMAGE_TAG}"
info "Application name: ${APPLICATION_NAME}"
info "Commit: ${IMAGE_TAG}"
info "Image: ${IMAGE_ID_TAG}"
# Check for existing image
exist=$(docker images | grep $IMAGE_ID | grep $IMAGE_TAG)
# Rebuild. Always delete existing image.
if [[ -n $REBUILD ]]; then
if [[ -n $exist ]]; then
info "Removing old image."
docker rmi $IMAGE_ID_TAG
fi
set -e
info "Building image"
cd $APPLICATION_NAME
docker build --no-cache -t $IMAGE_ID_TAG .
docker tag $IMAGE_ID_TAG $IMAGE_ID:latest
set +e
# Build
elif [[ -n $BUILD ]]; then
if [[ -z $exist ]]; then
set -e
info "Building image"
cd $APPLICATION_NAME
docker build -t $IMAGE_ID_TAG .
docker tag $IMAGE_ID_TAG $IMAGE_ID:latest
set +e
else
info "Image already exist."
fi
else
info "No build command specified."
fi
# Push
if [[ -n $PUSH ]] ; then
info "Pushing to registry"
docker push $IMAGE_ID_TAG
docker push $IMAGE_ID:latest
fi
echo $IMAGE_ID_TAG
| true
|
5c6963716cecaef18b64f8d412cdaaac07be93bd
|
Shell
|
xiaoyu4321/Kyrix
|
/rebuild-citus
|
UTF-8
| 324
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
TAG=${1:-asah/pg11_postgis25_citus811:1.4}
sudo docker build . -f Dockerfile-citus-debian -t $TAG
sudo docker push $TAG
sed -i "s@image: .*@image: $TAG@" worker-deployment.yaml
sed -i "s@image: .*@image: $TAG@" master-deployment.yaml
echo "docker build successful and {master,worker}-deployment.yaml updated"
| true
|
7dedac8c8df2ca1d1181715a2dec17d393196020
|
Shell
|
smr97/sort_benchmarking
|
/rust/run.sh
|
UTF-8
| 276
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
PROBLEM_SIZE=$1
NUM_THREADS=$2
FLAG="$(($NUM_THREADS-1))"
cd $(readlink -f "$(dirname "$0")")
set -ex
source $HOME/.profile
echo ${NUM_THREADS}" threads"
RUSTFLAGS="-C target-cpu=native" taskset --cpu-list 0-$FLAG cargo run --release $PROBLEM_SIZE $NUM_THREADS
| true
|
e6a88b4d4e00971ab2a246e7c710dbbcad29e63f
|
Shell
|
dirkaholic/kyoo
|
/examples/sqsworker/setup/setup-localstack.sh
|
UTF-8
| 1,075
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# create S3 bucket, topic adn queue for images
awslocal s3 mb s3://local-kyoo-bucket
awslocal sns create-topic --name kyoo-images-topic
awslocal sqs create-queue --queue-name kyoo-images-queue
# allow SNS to send message to SQS queue
jq -c '. | { Policy: @text }' /config/sqs-permission.json > /tmp/sqs.json
awslocal sqs set-queue-attributes --queue-url "http://localhost:4576/queue/kyoo-images-queue" --attributes file://tmp/sqs.json
# subscribe SQS to SNS topic
awslocal sns subscribe --topic-arn "arn:aws:sns:us-east-1:000000000000:kyoo-images-topic" --protocol "sqs" --notification-endpoint "arn:aws:sqs:us-east-1:000000000000:kyoo-images-queue"
# allow S3 to publish to SNS
awslocal sns set-topic-attributes --topic-arn "arn:aws:sns:us-east-1:000000000000:kyoo-images-topic" --attribute-name Policy --attribute-value file://config/sns-permission.json
# add a notification to SNS for S3 on creating new object
awslocal s3api put-bucket-notification-configuration --bucket local-kyoo-bucket --notification-configuration file://config/s3-notification.json
| true
|
3ad38895c79c38d3bf98a279891498a500f7b32b
|
Shell
|
peplin/dotfiles
|
/git/squashall.sh
|
UTF-8
| 463
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Squash all commits since the current HEAD forked from upstream. Optionally
# provide a custom message, otherwise uses the message from the first commit.
set -e
squash_all() {
message=$1
first_commit=$(git log @{upstream}..HEAD --pretty=format:"%h" | tail -1)
git reset --soft $first_commit
if [ ! -z $message ]; then
git commit --amend -m $message
else
git commit --amend --no-edit
fi
}
squash_all $1
| true
|
c7341ec6b99d92f8ff8659cef2a706b3a5d38739
|
Shell
|
jayrfink/infracode
|
/syssrc/sh/stopinstances.sh
|
UTF-8
| 347
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Where Tag Name is Type and Value is $1
aws_cmd_desc="aws ec2 describe-instances"
aws_cmd_stop="aws ec2 stop-instances --instance-ids"
pattern=$1
for n in `$aws_cmd_desc --filters Name=tag-value,Values=$pattern|\
grep InstanceId|awk '{print $2}'|sed s/\"//g|sed s/\,//`
do
echo "Stopping $n"
$aws_cmd_stop $n
done
| true
|
d3a0eeb867358809a66c8b3651c4ad7238c7cdd0
|
Shell
|
npo-poms/api
|
/bash/schedule/summarize.sh
|
UTF-8
| 398
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
type=search
if [ ! -z $1 ] ; then
type=$1
fi
if [ $type == "search" ] ; then
pref=".result"
else
pref=""
fi
jsongrep -output VALUE \
-sep ' ' \
-recordsep $'\n'\
-record 'items.*' \
"$pref.channel,$pref.start,$pref.midRef,$pref.media.objectType,$pref.media.titles[0].value,$pref.media.broadcasters[0].id" \
| sed -e 's/^/ /'
| true
|
cae271f4aa1eed5b0c7b4b1e3b5145de2332faf2
|
Shell
|
dnmczsw/brcr-update
|
/install.sh
|
UTF-8
| 496
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
[[ $(id -u) -eq 0 ]] || { echo "This script must be run as root"; exit 1; }
export PATH="${PATH/\/usr\/local\/bin:/}"
cd /usr/local
rm -rf "__brcr"
mkdir __brcr
cd __brcr
curl -L -o brcr-update https://raw.githubusercontent.com/BiteDasher/brcr-update/master/brcr-update
curl -L -o unzip https://raw.githubusercontent.com/BiteDasher/brcr-update/master/unzip
install -m 755 brcr-update /usr/bin/brcr-update
install -m 755 unzip /usr/bin/unzip
cd /
rm -rf /usr/local/__brcr
echo "Done."
| true
|
8576fbedff64b0284f68d4c71fa3829596a6916b
|
Shell
|
dosxuz/Workspace_creator
|
/create_wkps.sh
|
UTF-8
| 325
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Enter the number of workspaces to create : " n
for i in $(seq 1 $n)
do
echo "Creating workspace $i"
read -p "Enter the name of the first workspace : " name
mkdir $name
cd $name
mkdir notes/ notes/Pictures nmap
filename="${name}.md"
cd notes/
touch $filename
cp -r ~/.obsidian .
cd ../../
done
| true
|
e19ccdb68d19dfee56f7ab65ccb8da6b8809adfc
|
Shell
|
rgerganov/tpm2-tools
|
/test/integration/tests/activecredential.sh
|
UTF-8
| 1,495
| 3.09375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# SPDX-License-Identifier: BSD-3-Clause
source helpers.sh
cleanup() {
rm -f secret.data ek.pub ak.pub ak.name mkcred.out actcred.out ak.out \
ak.ctx session.ctx
# Evict persistent handles, we want them to always succeed and never trip
# the onerror trap.
tpm2_evictcontrol -Q -C o -c 0x81010009 2>/dev/null || true
if [ "$1" != "no-shut-down" ]; then
shut_down
fi
}
trap cleanup EXIT
start_up
cleanup "no-shut-down"
echo 12345678 > secret.data
tpm2_createek -Q -c 0x81010009 -G rsa -u ek.pub
tpm2_createak -C 0x81010009 -c ak.ctx -G rsa -g sha256 -s rsassa -u ak.pub \
-n ak.name -p akpass> ak.out
# Capture the yaml output and verify that its the same as the name output
loaded_key_name_yaml=`python << pyscript
from __future__ import print_function
import yaml
with open('ak.out', 'r') as f:
doc = yaml.safe_load(f)
print(doc['loaded-key']['name'])
pyscript`
# Use -c in xxd so there is no line wrapping
file_size=`ls -l ak.name | awk {'print $5'}`
loaded_key_name=`cat ak.name | xxd -p -c $file_size`
test "$loaded_key_name_yaml" == "$loaded_key_name"
tpm2_makecredential -Q -e ek.pub -s secret.data -n $loaded_key_name \
-o mkcred.out
TPM2_RH_ENDORSEMENT=0x4000000B
tpm2_startauthsession --policy-session -S session.ctx
tpm2_policysecret -S session.ctx -c $TPM2_RH_ENDORSEMENT
tpm2_activatecredential -Q -c ak.ctx -C 0x81010009 -i mkcred.out \
-o actcred.out -p akpass -P"session:session.ctx"
tpm2_flushcontext session.ctx
exit 0
| true
|
bc206f04ae427fe2d3206c39ad0abd14e57e55df
|
Shell
|
Rb-tech-byte/open5gs-k8s
|
/generate-configs
|
UTF-8
| 2,094
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
HERE=$(dirname "$(readlink --canonicalize "$BASH_SOURCE")")
. "$HERE/_env"
ETC=$HERE/work/etc
rsync -v --recursive --exclude='*.pem' $HERE/work/dist/etc/* "$ETC/"
#MONGODB_URI=$(kubectl get mongodbcommunity open5gs-mongodb --output=jsonpath={.status.mongoUri})
#MONGODB_URI="mongodb://user:password@${MONGODB_URI:10}/open5gs?replicaSet=open5gs-mongodb&authSource=admin"
# webui just needs the MongoDB URI
#echo -n "$MONGODB_URI" > "$ETC/open5gs/db_uri"
# Fix relative directories
sed --in-place \
"s|$HERE/work/dist||g" \
"$ETC/open5gs/"*.yaml
sed --in-place \
"s|$HERE/work/dist||g" \
"$ETC/freeDiameter/"*.conf
# MongoDB URI (for udr, pcf, pcrf, hss)
#sed --in-place \
# "s|^db_uri: .*|db_uri: '$(escape_sed "$MONGODB_URI")'|" \
# "$ETC/open5gs/"*.yaml
sed --in-place \
"s|^db_uri: .*|db_uri: '\$MONGODB_URI'|" \
"$ETC/open5gs/"*.yaml
# Disable IPv6
sed --in-place \
'/^#/!s|- ::1|#- ::1|' \
"$ETC/open5gs/"*.yaml
# Listen on all addresses
sed --in-place \
'/^#/!s|127\.\0\.\0\..*|0.0.0.0|' \
"$ETC/open5gs/"*.yaml
sed --in-place \
's|^ListenOn = |#ListenOn =|' \
"$ETC/freeDiameter/"*.conf
# Addresses to domain names
sed --in-place \
's|ConnectTo = "127\.\0\.\0\.2"|ConnectTo = "mme"|' \
"$ETC/freeDiameter/"*.conf
sed --in-place \
's|ConnectTo = "127\.\0\.\0\.4"|ConnectTo = "smf"|' \
"$ETC/freeDiameter/"*.conf
sed --in-place \
's|ConnectTo = "127\.\0\.\0\.8"|ConnectTo = "hss"|' \
"$ETC/freeDiameter/"*.conf
sed --in-place \
's|ConnectTo = "127\.\0\.\0\.9"|ConnectTo = "pcrf"|' \
"$ETC/freeDiameter/"*.conf
# Certificates
sed --in-place \
's|^TLS_Cred =.*|TLS_Cred = "/etc/tls/tls.crt", "/etc/tls/tls.key";|' \
"$ETC/freeDiameter/"*.conf
sed --in-place \
's|^TLS_CA =.*|TLS_CA = "/etc/tls/ca.crt";|' \
"$ETC/freeDiameter/"*.conf
# Disable SCTP
# (in many Linuxes SCTP is disabled by default)
sed --in-place \
's|#No_SCTP;|No_SCTP;|' \
"$ETC/freeDiameter/"*.conf
sed --in-place \
's|#Prefer_TCP;|Prefer_TCP;|' \
"$ETC/freeDiameter/"*.conf
| true
|
c983ef69d0a25bc3f17ba13cfbf70ac291e5fc0d
|
Shell
|
jammsen/docker-the-forest-dedicated-server
|
/usr/bin/servermanager.sh
|
UTF-8
| 2,632
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# SteamCMD APPID for the-forest-dedicated-server
SAVEGAME_PATH="/theforest/saves/"
CONFIG_PATH="/theforest/config/"
CONFIGFILE_PATH="/theforest/config/config.cfg"
function isServerRunning() {
if ps axg | grep -F "TheForestDedicatedServer.exe" | grep -v -F 'grep' > /dev/null; then
true
else
false
fi
}
function isVirtualScreenRunning() {
if ps axg | grep -F "Xvfb :1 -screen 0 1024x768x24" | grep -v -F 'grep' > /dev/null; then
true
else
false
fi
}
function setupWineInBashRc() {
echo "Setting up Wine in bashrc"
mkdir -p /winedata/WINE64
if [ ! -d /winedata/WINE64/drive_c/windows ]; then
cd /winedata
echo "Setting up WineConfig and waiting 15 seconds"
winecfg > /dev/null 2>&1
sleep 15
fi
cat >> /etc/bash.bashrc <<EOF
export WINEPREFIX=/winedata/WINE64
export WINEARCH=win64
export DISPLAY=:1.0
EOF
}
function isWineinBashRcExistent() {
grep "wine" /etc/bash.bashrc > /dev/null
if [[ $? -ne 0 ]]; then
echo "Checking if Wine is set in bashrc"
setupWineInBashRc
fi
}
function startVirtualScreenAndRebootWine() {
# Start X Window Virtual Framebuffer
export WINEPREFIX=/winedata/WINE64
export WINEARCH=win64
export DISPLAY=:1.0
Xvfb :1 -screen 0 1024x768x24 &
wineboot -r
}
function installServer() {
# force a fresh install of all
isWineinBashRcExistent
steamcmdinstaller.sh
mkdir -p $SAVEGAME_PATH $CONFIG_PATH
cp /server.cfg.example $CONFIGFILE_PATH
sed -i -e "s/###serverSteamAccount###/$SERVER_STEAM_ACCOUNT_TOKEN/g" $CONFIGFILE_PATH
sed -i -e "s/###RANDOM###/$RANDOM/g" $CONFIGFILE_PATH
sed -i -e "s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/$(hostname -I)/g" $CONFIGFILE_PATH
bash /steamcmd/steamcmd.sh +runscript /steamcmdinstall.txt
}
function updateServer() {
# force an update and validation
echo ">>> Doing an update of the gameserver"
bash /steamcmd/steamcmd.sh +runscript /steamcmdinstall.txt
}
function startServer() {
if ! isVirtualScreenRunning; then
startVirtualScreenAndRebootWine
fi
rm /tmp/.X1-lock 2> /dev/null
cd /theforest
wine64 /theforest/TheForestDedicatedServer.exe -batchmode -dedicated -savefolderpath /theforest/saves -configfilepath /theforest/config/config.cfg
}
function startMain() {
# Check if server is installed, if not try again
if [ ! -f "/theforest/TheForestDedicatedServer.exe" ]; then
installServer
fi
if [ $ALWAYS_UPDATE_ON_START == 1 ]; then
updateServer
fi
startServer
}
startMain
| true
|
61119fb014b69df2509e33b6c13ab3878fa2a5f5
|
Shell
|
Sumanshu-Nankana/Linux
|
/Bash/Cut #4.sh
|
UTF-8
| 117
| 2.65625
| 3
|
[] |
no_license
|
# https://www.hackerrank.com/challenges/text-processing-cut-4/problem
while read line
do
echo $line | cut -c -4
done
| true
|
849075033a92e1170af76c54449a90035c45b60e
|
Shell
|
creativefloworg/creativeflow
|
/creativeflow/datagen/mp4_to_frames.sh
|
UTF-8
| 677
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
set -o nounset
USAGE="$0 inputfile.mp4 /path/to/output/frameprefix%06d.png"
if [ $# -lt 2 ]; then
echo "$USAGE"
exit
fi
INFILE="$1"
OFRAMEPATTERN="$2"
ODIR=$(dirname "$OFRAMEPATTERN")
mkdir -p "$ODIR"
# First we extract to temporary directory
TDIR=$ODIR/tmp$RANDOM
mkdir "$TDIR"
TMPPATTERN=$TDIR/$(basename "$OFRAMEPATTERN")
ffmpeg -hide_banner -loglevel panic \
-start_at_zero -i "$INFILE" "$TMPPATTERN"
# Then do a simple convert; this ensures more consistent PNG formats
EXT=$(echo "$OFRAMEPATTERN" | awk -F'.' '{printf "%s", $NF}')
for F in $(find $TDIR -name "*.${EXT}"); do
convert "$F" $ODIR/$(basename $F)
done
rm -rf "$TDIR"
| true
|
7d131533a640480936fbaba770e5a49c88bb214d
|
Shell
|
viraj99/serendipity-api
|
/projects/spring-boot/server/services/jasperreports-server/scripts/wait-for-container-to-exit.sh
|
UTF-8
| 1,231
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Use this script to test if a container has exited
CONTAINER=$1
# When you're curling the unix socket, you only need a single /
# API_SERVER=http:/v1.24
API_SERVER=http:/docker
while :
do
INSPECT_CONTAINER=`curl --silent --unix-socket /var/run/docker.sock ${API_SERVER}/containers/${CONTAINER}/json`
INSPECT_CONTAINER_RESULT=$(jq --raw-output '.State.Status' <<< "${INSPECT_CONTAINER}")
# echo $INSPECT_CONTAINER_RESULT
if [[ $INSPECT_CONTAINER_RESULT == "exited" ]]; then
echo $INSPECT_CONTAINER_RESULT
exit
else
echo "sleep 30"
sleep 30
fi
done
# chmod 755 wait-for-container-to-exit.sh
# ./wait-for-container-to-exit.sh jasperreports-server-cmdline
# https://docs.docker.com/engine/api/v1.24/#31-containers
# https://docs.docker.com/engine/api/v1.24/
# https://docs.docker.com/engine/api/sdk/examples/
# https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/
# https://jpetazzo.github.io/2016/04/03/one-container-to-rule-them-all/
# https://gist.github.com/paulosalgado/91bd74c284e262a4806524b0dde126ba
# JSON lines format
# echo $INSPECT_CONTAINER | jq -s --raw-output'.[0].message'
# echo $INSPECT_CONTAINER | jq -n --raw-output 'input.message'
| true
|
4ab3325cf0d56fae33dc1e4de7afb88883c9fa9e
|
Shell
|
scheehan/log_replicator
|
/log_sender.sh
|
UTF-8
| 4,159
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
'''
Use it with your own risk.
This bash script will dynamically generate data value base upon date and
time retrieved from system, dummy IP addresses, ports, and serial number
bash script need to rely on system to open udp network connectivity
$ bash log_shooter.sh 10.47.11.248 5 2
'''
source ./cal_ip.sh
source ./input_check.sh
# retrieve destination target IP address
k_value=$1
# retrieve first argument and validate value with input_check function
l_value=$(checkinput $2)
# retrieve second argument and validate value with input_check function
j_value=$(checkinput $3)
# final check return code from input_check function
if [ "$l_value" == "d" ]; then
echo "invalid input valid"
echo "example: create_addr_a.sh x.x.x.x <int> [1 | 2]"
echo "acceptable IPv4 address "
echo "acceptable integer 1-999999 "
echo "selective option for log type"
echo "1 - FortiGate IPS"
echo "2 - Cisco ASA"
exit 1;
fi;
if [ "$j_value" == "d" ]; then
echo "invalid input valid"
echo "example: create_addr_a.sh <int> [1 | 2]"
echo "acceptable integer 1-9999999 "
echo "selective option for log type"
echo "1 - FortiGate IPS"
echo "2 - Cisco ASA"
exit 1;
fi;
SRCP_NUM=50100 # SOURCE PORT NUMBER TO STARTS OFF
ISERIAL_NUM=758514639 # LOG ENTRY SERIAL NUMBER TO STARTS OFF
FIRST_IP=223.109.0.0 # IP ADDRESS TO STARTS OFF
IP=$FIRST_IP # ASSIGN TO IP VARIABLE
exec 7>/dev/udp/$k_value/514
'''
open udp connection to target host; /dev/{udp|tcp} feature may not available in all linux distro, it is depending on shell
A possible error
No such file or directory => If you're getting this error during the execution, it's because your Bash don't have the feature net-redirections enabled.
For example:
:> /dev/tcp/google.com/443
bash: /dev/tcp/google.com/443: No such file or directory
: Is the short version of true. Could also be: true > /dev/tcp/google.com/443
To fix this error, you need to complile and install Bash with net-redirections.
'''
# case statement. prompt user to select log format
case "$3" in
# iterate with for loop base on user input number
1)
for ((i = 0 ; i < $l_value ; i++)); do
# pause 0.5s for each iteration; adjust interval here
sleep 0.5
# increment source port number and serial number by 1
let "SRCP_NUM+=1"
let "ISERIAL_NUM+=1"
# increment IP address by call nextip function
IP=$(nextip $IP)
# get system time with month and year format
MY_MTIME=`date +%Y-%m-%d`
# get system time with T format
MY_CTIME=`date +%T`
# print syslog sample output
result=$(printf "%s\n" "<185>date=$MY_MTIME "time=$MY_CTIME " devname=GN-IPS-4 devid=FGT37D9P00000005 logid=0419016384 type=utm subtype=ips eventtype=signature level=alert vd=TP severity=critical srcip=234.209.91.49 srccountry="Netherlands" dstip="$IP " srcintf="port10" dstintf="port9" policyid=2000 sessionid=1816135564 action=dropped proto=17 service="udp/42448" attack="Netcore.Netis.Devices.Hardcoded.Password.Security.Bypass" srcport="$SRCP_NUM " dstport=53413 direction=outgoing attackid=42781 profile="GSN" ref="http://www.fortinet.com/ids/VID42781" incidentserialno="$ISERIAL_NUM " msg="backdoor\: Netcore.Netis.Devices.Hardcoded.Password.Security.Bypass," crscore=50 crlevel=critical")
# echo with std output
echo $result
echo $result >&7
done;
exec 7>&- # close udp connection
exit 0;
;;
2)
for ((i = 0 ; i < $l_value ; i++)); do
# pause 0.5s for each iteration; adjust interval here
sleep 0.5
# increment source port number and serial number
let "SRCP_NUM+=1"
let "ISERIAL_NUM+=1"
# increment IP address and date
IP=$(nextip $IP)
# get system time with month and year format
MY_MTIME=`date +%Y-%m-%d`
# get system time with T format
MY_CTIME=`date +%T`
# print syslog sample output
result=$(printf "%s\n" "<134>$MY_MTIME $MY_DTIME $MY_CTIME: %ASA-6-302013: Build outbound TCP connection 76118 for outside:$IP/80 ($IP/80) to inside:192.168.20.31/3530 (192.168.0.1/5957)")
# echo with std output
echo $result
echo $result >&7
done;
exec 7>&- # close udp connection
exit 0;
;;
esac
| true
|
c8d8aaaf0dac259a6eed5f9fcdea85e0e18ad9c5
|
Shell
|
cherylling/OS-kerneltest
|
/测试套/debug_perf_t/debug_perf_t_src/testcase/at/perf_v1r2c00_record_tc002.sh
|
GB18030
| 1,687
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################################
##- @Copyright (C), 1988-2013, Huawei Tech. Co., Ltd.
##- @Suitename: opt_perf_t
##- @Name: perf_v1r2c00_record_tc002
##- @Author: y00197803
##- @Date: 2013-4-17
##- @Precon: 1.֧perf
##- @Brief: perf record -A2
##- @Detail: 1.perf record ./test -A
# 2.perf record ./test --append
##- @Expect: perf.data
##- @Level: Level 1
##- @Auto:
##- @Modify:
#######################################################################*/
. ${TCBIN}./common_perf.sh
######################################################################
##- @Description: prepare,set the init env.
######################################################################
prepareenv()
{
prepare_tmp
datafile=${TCTMP}/record_tc002-$$.data
}
######################################################################
##- @Description:
######################################################################
dotest()
{
perf record -o $datafile -A ${USE_HUGE}common_prg_1 2>/dev/null
ret=$?
if [ $ret -ne 0 ]; then
echo "If there arm some error such as : failed to mmap with 22,
Then you need rebuild the perf command with necessary lib"
fi
check_ret_code $ret
has_file $datafile
perf record -o $datafile --append ${USE_HUGE}common_prg_1 2>/dev/null
check_ret_code $?
has_file $datafile
}
cleanenv()
{
clean_end
}
######################################################################
##-@ Description: main function
######################################################################
use_huge $*
prepareenv
dotest
cleanenv
| true
|
cbb2910d84c1fad96d4e72d449d39d252066c08a
|
Shell
|
zeheater/dotfiles
|
/.local/share/jwm-scripts/runmenu.sh
|
UTF-8
| 1,656
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Shows a run menu with calculator support.
# Deps:
# - rofi (can easily be converted to dmenu)
# - bc or python (either version 2 or 3) for calculations
# - notification system setup (`notify-send` should work properly)
# Running:
# - Save the file to anything.sh
# - chmod +x anything.sh
# - ./anything.sh
# - It should show a menu with executables. Input app name or math. Enjoy!
# If you enter math, it should show the result as a notification.
# Otherwise should run the program you selected.
# Example input: geany, 2+2, 2-1, 10/3, 2*2=, (3^2)*10
input=$( find ${PATH//:/ } -maxdepth 1 -executable -printf '%f\n' | rofi -show combi -i)
#input=$( find ${PATH//:/ } -maxdepth 1 -executable -printf '%f\n' | rofi -dmenu -i )
if grep -q '+\|-[[:digit:]]\|\*\|\/\|\^' <<<"$input"; then
if [ -x "$(command -v bc)" ]; then # has bc package
# If there is an equal sign (=) at the end, get rid of it.
input=$(echo "$input" | sed 's/\=//g')
# Show the result in a notification.
notify-send "The result is: "$( echo $input | bc )
elif [ -x "$(command -v python)" ]; then # has python package
# If there is an equal sign (=) at the end, get rid of it.
# Replace "^" with "**" so that it can calculate powers.
input=$(echo "$input" | sed 's/\=//g;s/\^/**/g')
# Show the result in a notification.
# The `float(1)*` part is so that Python 2 returns decimal places.
notify-send "The result is: "$( python -c "print( float(1)*$input )" )
else
notify-send "Neither bc nor python was found to do the calculations. Please install one of them."
fi
else
# Not a calculation, so run the command.
$($input)
fi
| true
|
26f82a08d4c4fe20e0704932dba9b4b93f244f0f
|
Shell
|
jppferguson/dotfiles
|
/homebrew/path.zsh
|
UTF-8
| 118
| 2.75
| 3
|
[] |
no_license
|
# Add Homebrew to PATH on M* Macs
if [[ "$(uname -m)" == "arm64" ]]; then
export PATH="/opt/homebrew/bin:${PATH}"
fi
| true
|
3836b39315a6231cf71ee5b7df8acf4d71291534
|
Shell
|
crowdbotics/blueprint-react-native-articles-screen
|
/blueprint-lib/docker.sh
|
UTF-8
| 416
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
##
# Docker related functions
##
# Define file paths
DOCKER_DOCKERFILE_PATH="$BASE_PATH/Dockerfile"
##
# Add a system dependency to the dockerfile
##
add_dockerfile_dependency() {
if [ -f "$DOCKER_DOCKERFILE_PATH" ] && ! grep -q $1 $DOCKER_DOCKERFILE_PATH
then
echo "Adding dependency $1 to Dockerfile"
sed -i '' -e 's/\(RUN apk add.*\)/\1\'$'\n '"$1 \\\/" $DOCKER_DOCKERFILE_PATH
fi
}
| true
|
90e383a94eba4862210ec560d7f4dd4b133bd568
|
Shell
|
gggeek/pake
|
/bin/pake.sh
|
UTF-8
| 766
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Shell wrapper for pake (based on Phing shell wrapper)
# $Id: pake.sh 67 2005-10-08 11:50:16Z fabien $
#
# This script will do the following:
# - check for PHP_COMMAND env, if found, use it.
# - if not found assume php is on the path
# - check for PAKE_HOME env, if found use it
# - if not look for it
# - check for PHP_CLASSPATH, if found use it
# - if not found set it using PAKE_HOME/lib
if [ -z "$PAKE_HOME" ] ; then
PAKE_HOME="@PEAR-DIR@"
fi
if (test -z "$PHP_COMMAND") ; then
# echo "WARNING: PHP_COMMAND environment not set. (Assuming php on PATH)"
export PHP_COMMAND=php
fi
if (test -z "$PHP_CLASSPATH") ; then
PHP_CLASSPATH=$PAKE_HOME/lib
export PHP_CLASSPATH
fi
$PHP_COMMAND -d html_errors=off -qC $PAKE_HOME/pake.php "$@"
| true
|
9854138d8ad55d243aab371aefcf103f2382ea6c
|
Shell
|
flyzjhz/DWR-956_decrypted_fw
|
/decrypted_and_extracted/dwr956_v1.0.0.8/root/etc/rc.d/url_filter
|
UTF-8
| 1,185
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
#$1=url
OUTPUT="/tmp/cmd_output"
flush_output()
{
echo "" > "$OUTPUT"
}
SED="/bin/sed"
if [ ! "$ENVLOADED" ]; then
if [ -r /etc/rc.conf ]; then
. /etc/rc.conf 2> /dev/null
ENVLOADED="1"
fi
fi
if [ ! "$CONFIGLOADED" ]; then
if [ -r /etc/rc.d/config.sh ]; then
. /etc/rc.d/config.sh 2>/dev/null
CONFIGLOADED="1"
fi
fi
#Add iptables entry during bootup in order to block url entries already present in rc.conf
eval fw_en='$'firewall_main_enable
case $1 in
"start")
if [ $fw_en -eq 1 ]; then
i=0
while [ $i -lt $url_filter_cfg_Count ]; do
. /etc/rc.d/url_filter add ${i}
i=`expr $i + 1`
done
fi
;;
"add")
#add url entry
if [ $fw_en -eq 1 ]; then
eval url='$'url_${2}_URLName
/usr/sbin/iptables -I FORWARD -p tcp -m webstr --url "${url}" -j DROP
fi
;;
"delete")
#delete url entry
eval url='$'url_${2}_URLName
/usr/sbin/iptables -D FORWARD -p tcp -m webstr --url "${url}" -j DROP
;;
"stop")
#delete all url entry
if [ $fw_en -eq 0 ]; then
i=0
while [ $i -lt $url_filter_cfg_Count ]; do
. /etc/rc.d/url_filter delete ${i}
i=`expr $i + 1`
done
fi
;;
"*")
;;
esac
| true
|
54cf9a5c1b4109e1397576bff85d583337f8bac0
|
Shell
|
GDSourceMakers/TileMap
|
/Scripts/build.1.sh
|
UTF-8
| 638
| 2.96875
| 3
|
[] |
no_license
|
#! /bin/sh
# Example build script for Unity3D project. See the entire example: https://github.com/JonathanPorta/ci-build
# Change this the name of your project. This will be the name of the final executables as well.
project="Tile-Map"
travisUnity="/Applications/Unity/Unity.app/Contents/MacOS/Unity"
unityPath="$travisUnity"
# csc /target:library /out:TileMap.DLL /Assets/TileMap/clipper.cs /Assets/TileMap/TileMap.cs
echo "Exporting Package"
"$unityPath" -batchmode -nographics -projectPath "$(pwd)" -exportPackage "Assets\TileMap" -logFile $(pwd)/unity.log
echo 'Logs from build'
logFile="$(pwd)"/unity.log
echo cat "$logFile"
| true
|
fa478d7a7c54088e265546e8c6c4fe4fd647a43d
|
Shell
|
wisezt/git_practise_2019_Nov
|
/section4/t04.sh
|
UTF-8
| 190
| 2.9375
| 3
|
[] |
no_license
|
#! /bin/bash
read -p "Input Name:" name
if [ -z $name ]; then
echo "Empty"
else
echo "$name"
fi
exit
echo "line 1"
echo "line 2"
echo "line 3"
exit
echo "line 4"
echo "line 5"
| true
|
b6ef9a8f513805e43f3b36915545bf1579ba0db8
|
Shell
|
veeramarni/Oracle-EBusiness-Suite-Clone-Automation
|
/function_lib/io_calibrate.sh
|
UTF-8
| 470
| 2.90625
| 3
|
[] |
no_license
|
io_calibrate()
{
dbname=$1
ldbname=`echo "$dbname" | tr [A-Z] [a-z]`
orasid="$dbname"1
lorasid="$ldbname"1
export ORACLE_SID="$orasid"
if [ $dbname == "DBM01" ]
then
lorasid="$ldbname"1
export ORACLE_SID="$lorasid"
export ORACLE_HOME=/u01/app/oracle/product/11.2.0.4/dbhome_1
else
export ORACLE_HOME=/u01/app/oracle/product/11.2.0.4/dbhome_2
fi
"$ORACLE_HOME"/bin/sqlplus /" as sysdba" @/u01/app/oracle/scripts/refresh/targets/"$dbname"/"$dbname"_io_calibrate.sql
}
| true
|
44d5657e3726fc93d82fca8b18a1a68b26ef8218
|
Shell
|
jtmieg/AceView
|
/waceview/cgiclean
|
UTF-8
| 421
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# This program cleans up after context files used by pfam.cgi
# Run it from cron with this entry:
# 58 * * * * /bin/sh /a/mark/query_temp/cgi_clean
#
cd /net/vesta/a/mark/query_temp
TZ=gmt0
export TZ
now=`date +ctx-%j%H-`
list=` ( ls ctx* | grep -v '^'$now ) 2>/dev/null `
for x in $list
do
rm -f ./$x
done
find . -type f -name 'ctx*' ! -mtime -1 -exec rm -f {} ';'
| true
|
1e7a3b2ef5894f750d646c5b46cee04ff9ac819a
|
Shell
|
gerritjvv/pseidon
|
/pseidon-hdfs/pkg/rpm/init.sh
|
UTF-8
| 3,025
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# chkconfig: 2345 20 80
# description: pseidon-hdfs
#
PATH=/usr/bin:/sbin:/bin:/usr/sbin
export PATH
OK_STAT=0
DEAD_STAT=1
UNKOWN_STAT=4
mkdir -p /var/lock/subsys
[ -f /etc/sysconfig/pseidon-hdfs ] && . /etc/sysconfig/pseidon-hdfs
lockfile=${LOCKFILE-/var/lock/subsys/pseidon-hdfs}
gcronsd="${GCRONSD-/opt/pseidon-hdfs/bin/watchdog.sh} /opt/pseidon-hdfs/conf/pseidon.edn"
gcronsd_stop="${GCRONSD-/opt/pseidon-hdfs/bin/process.sh} stop /opt/pseidon-hdfs/conf/pseidon.edn"
REGEX1="pseidon_hdfs.watchdog"
REGEX2="pseidon_hdfs.pseidon_hdfs"
RETVAL=0
ISDAEMON=0
# Source function library.
FUNCTIONS="/etc/rc.d/init.d/functions"
[ -f $FUNCTIONS ] && . $FUNCTIONS && ISDAEMON=$(grep "daemon()" /etc/rc.d/init.d/functions | wc -l)
JAVASH="/etc/profile.d/java.sh"
[ -f $JAVASH ] && . $JAVASH
if [ -z "$JAVA_HOME" ]; then
echo "JAVA_HOME not set, using /usr/java/latest" >&2
export JAVA_HOME="/usr/java/latest"
fi
start() {
touch $lockfile
status
RETVAL=$?
if [ $RETVAL = $OK_STAT ]; then
echo "The pseidon is already running"
RETVAL=$OK_STAT
else
echo -n $"Starting pseidon: "
touch /opt/pseidon-hdfs/log/serverlog.log
chmod -R 777 /opt/pseidon-hdfs/log/serverlog.log
su - ${PSEIDON_USER:-pseidon} -l -m -c "exec $gcronsd < /dev/null >> /opt/pseidon-hdfs/log/serverlog.log 2>&1 &"
counter=0
while [ $counter -lt 30 ]
do
status
RETVAL=$?
[ "$RETVAL" = $OK_STAT ] && break
sleep 1s
counter=$(( counter + 1 ))
done
fi
[ $RETVAL = $OK_STAT ] && echo " OK"
[ $RETVAL = $DEAD_STAT ] && echo " FAILED"
return $RETVAL
}
stop() {
status
RETVAL=$?
if [ $RETVAL = $OK_STAT ]; then
echo -n "Stopping pseidon-hdfs: "
su - ${PSEIDON_USER:-pseidon} -l -m -c "exec $gcronsd_stop < /dev/null >> /opt/pseidon-hdfs/log/serverlog.log 2>&1 &"
kill_proc
counter=0
while [ $counter -lt 30 ]
do
status
RETVAL=$?
[ "$RETVAL" = $DEAD_STAT ] && break
sleep 1s
counter=$(( counter + 1 ))
[ $counter -gt 3 ] && [ $RETVAL = $OK_STAT ] && kill_proc
done
[ $RETVAL = $DEAD_STAT ] && rm -f ${lockfile} && echo "OK"
else
echo "No pseidon-hdfs instance is running"
RETVAL=$DEAD_STAT
fi
return $RETVAL
}
restart() {
stop
start
}
kill_proc() {
pkill -u ${PSEIDON_USER:-pseidon} -f "$REGEX1"
}
status() {
if pgrep -f "$REGEX1" >/dev/null; then
RETVAL=$OK_STAT
else
if pgrep -f "$REGEX2" >/dev/null; then
RETVAL=$OK_STAT
else
RETVAL=$DEAD_STAT
fi
fi
return $RETVAL
}
case "$1" in
start)
start
exit $?
;;
stop)
stop
RETVAL=$?
if [ $RETVAL = $DEAD_STAT ]; then
exit 0
else
exit 1
fi
;;
restart)
stop || exit $?
start
exit $?
;;
status)
status
RETVAL=$?
[ $RETVAL = $OK_STAT ] && echo "Running"
[ $RETVAL = $DEAD_STAT ] && echo "Stopped"
exit $RETVAL
;;
*)
echo $"Usage: $0 {start|stop|status|restart}"
exit $DEAD_STAT
;;
esac
exit $OK_STAT
| true
|
43db7b4cc8b9f25a5f90f232bf9474037f0cbb60
|
Shell
|
yagamy4680/vagrant-boxes
|
/externals/init
|
UTF-8
| 279
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CURRENT=$(pwd)
cd $(dirname $0)
EXTERNAL_DIR=$(pwd)
cd ${CURRENT}
if [ ! -d "${EXTERNAL_DIR}/dotfiles" ]; then
git clone https://github.com/yagamy4680/dotfiles.git ${EXTERNAL_DIR}/dotfiles
cd ${EXTERNAL_DIR}/dotfiles
git submodule init
git submodule update
fi
| true
|
ca7b36e885f01106625d9e1cf69efebecb75142b
|
Shell
|
ilovedanzi521/deploy-center
|
/repo/init.sh
|
UTF-8
| 2,849
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
# "init.sh <remote_addr> [remote_port]
echo "Start $0 $*"
# initialize env variable
# manual configure FTP_SERVER and FTP_PORT
CC_FTP_SERVER=192.168.0.72
CC_FTP_PORT=21
CC_FTP_ROOT=/pub/repo
CC_FTP_USER=ftp
CC_FTP_PSWD=ftp
CC_HOME_DIR=~/repo
CC_PACKAGES_DIR=$CC_HOME_DIR/packages
CC_PACKAGES_TOOL_DIR=$CC_HOME_DIR/packages/tools
CC_MODULES_DIR=$CC_HOME_DIR/modules
CC_SCRIPTS_DIR=$CC_HOME_DIR/scripts
CC_DEPLOY_DIR=~/deploy
CC_BACKUP_DIR=$CC_DEPLOY_DIR/backup
CC_LOG_DIR=$CC_DEPLOY_DIR/logs
# Get remote host from first parameter
REMOTE_HOST=
REMOTE_PORT=22
if [ $# -eq 1 ];then
REMOTE_HOST=$1
elif [ $# -eq 2 ];then
REMOTE_HOST=$1
REMOTE_PORT=$2
else
echo "ERROR: remote host unknown."
exit 10
fi
echo "paramNum:$#, REMOTE_HOST=$REMOTE_HOST:$REMOTE_PORT ftp_server=$CC_FTP_SERVER ftp_port=$CC_FTP_PORT"
# init generate env.sh
cat << EOF > $CC_HOME_DIR/env.sh
#!/bin/sh
CC_FTP_SERVER=$CC_FTP_SERVER
CC_FTP_PORT=$CC_FTP_PORT
CC_FTP_ROOT=$CC_FTP_ROOT
CC_FTP_USER=$CC_FTP_USER
CC_FTP_PSWD=$CC_FTP_PSWD
export CC_FTP_SERVER CC_FTP_PORT CC_FTP_ROOT CC_FTP_USER CC_FTP_PSWD
CC_HOME_DIR=$CC_HOME_DIR
CC_PACKAGES_DIR=$CC_PACKAGES_DIR
CC_PACKAGES_TOOL_DIR=$CC_PACKAGES_TOOL_DIR
CC_MODULES_DIR=$CC_MODULES_DIR
CC_SCRIPTS_DIR=$CC_SCRIPTS_DIR
export CC_HOME_DIR CC_MODULES_DIR CC_SCRIPTS_DIR
CC_DEPLOY_DIR=$CC_DEPLOY_DIR
CC_BACKUP_DIR=$CC_BACKUP_DIR
CC_LOG_DIR=$CC_LOG_DIR
export CC_DEPLOY_DIR CC_LOG_DIR CC_BACKUP_DIR
source $CC_HOME_DIR/common.sh
EOF
chmod u+x $CC_HOME_DIR/env.sh && . $CC_HOME_DIR/env.sh
chmod u+x $CC_HOME_DIR/ftp_client.sh
# create remote dir
ssh -p $REMOTE_PORT $REMOTE_HOST \
mkdir -p $CC_HOME_DIR \
$CC_PACKAGES_DIR $CC_MODULES_DIR $CC_SCRIPTS_DIR $CC_PACKAGES_TOOL_DIR \
$CC_DEPLOY_DIR $CC_BACKUP_DIR $CC_LOG_DIR
echo "create remote dir ret=$?"
# scp env.sh&ftp_client.sh to remote host
scp -P $REMOTE_PORT $CC_HOME_DIR/env.sh \
$CC_HOME_DIR/shell.sh \
$CC_HOME_DIR/ftp_client.sh \
$CC_HOME_DIR/common.sh \
$REMOTE_HOST:$CC_HOME_DIR/
if [ $? -ne 0 ];then
echo "remote host $REMOTE_HOST:$REMOTE_PORT connect failed."
exit 11
fi
scp -P $REMOTE_PORT $CC_MODULES_DIR/* $REMOTE_HOST:$CC_MODULES_DIR/
scp -P $REMOTE_PORT $CC_SCRIPTS_DIR/* $REMOTE_HOST:$CC_SCRIPTS_DIR/
scp -P $REMOTE_PORT $CC_PACKAGES_DIR/ftp $REMOTE_HOST:$CC_PACKAGES_DIR/
echo "scp -P $REMOTE_PORT $CC_HOME_DIR/env.sh $CC_HOME_DIR/ftp_client.sh $CC_HOME_DIR/common.sh $REMOTE_HOST:$CC_HOME_DIR/"
echo "scp -P $REMOTE_PORT $CC_MODULES_DIR/ $REMOTE_HOST:$CC_MODULES_DIR"
# Execute remote chmod +x to env.sh&ftp_client.sh
ssh $REMOTE_HOST -p $REMOTE_PORT chmod u+x $CC_HOME_DIR/env.sh \
$CC_HOME_DIR/shell.sh \
$CC_HOME_DIR/ftp_client.sh \
$CC_PACKAGES_DIR/ftp
cat << EOF
ssh $REMOTE_HOST -p $REMOTE_PORT chmod u+x $CC_HOME_DIR/env.sh
$CC_HOME_DIR/shell.sh
$CC_HOME_DIR/ftp_client.sh
$CC_PACKAGES_DIR/ftp
OK
EOF
| true
|
03aa6f9a538dd96f97a3a80219d0b4ab43d81bb9
|
Shell
|
matt2005/prebuilts
|
/buildsystem/99_copy_compiled_files.sh
|
UTF-8
| 1,283
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# Set current folder as home
HOME="`cd $0 >/dev/null 2>&1; pwd`" >/dev/null 2>&1
# Create output folder
if [ ! -d BINARY_FILES ]; then
mkdir BINARY_FILES
fi
# Copy aasdk so's
if [ -f ./aasdk/lib/libaasdk.so ]; then
cp ./aasdk/lib/libaasdk.so ./BINARY_FILES/
fi
if [ -f ./aasdk/lib/libaasdk_proto.so ]; then
cp ./aasdk/lib/libaasdk_proto.so ./BINARY_FILES/
fi
# Copy openauto
if [ -f ./openauto/bin/autoapp ]; then
cp ./openauto/bin/autoapp ./BINARY_FILES/
fi
if [ -f ./openauto/bin/btservice ]; then
cp ./openauto/bin/btservice ./BINARY_FILES/
fi
# Copy gpio2kbd
if [ -f ./gpio2kbd/gpio2kbd ]; then
cp ./gpio2kbd/gpio2kbd ./BINARY_FILES/
fi
# Copy cam_overlay.bin
if [ -f ./cam_overlay/cam_overlay.bin ]; then
cp ./cam_overlay/cam_overlay.bin ./BINARY_FILES/
fi
# Create compressed qt5
QTVERSION=`cat /usr/local/qt5/lib/pkgconfig/Qt5Core.pc | grep Version: | cut -d: -f2 | sed 's/ //g' | sed 's/\.//g'`
ARMARCH=`uname -m`
tar -cvf $HOME/BINARY_FILES/Qt_${QTVERSION}_${ARMARCH}_OpenGLES2.tar.xz /usr/local/qt5
split -b 50m -d $HOME/BINARY_FILES/Qt_${QTVERSION}_${ARMARCH}_OpenGLES2.tar.xz "$HOME/BINARY_FILES/Qt_${QTVERSION}_${ARMARCH}_OpenGLES2.tar.xz.part"
rm $HOME/BINARY_FILES/Qt_${QTVERSION}_${ARMARCH}_OpenGLES2.tar.xz
cd $HOME
| true
|
a18c20c968d3faa3a1b470cd7a06ea0584021fd9
|
Shell
|
mdinos/bash-session-scripts
|
/.bash_aliases
|
UTF-8
| 457
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
alias ls="ls --color=always"
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
export SENDAT_HOME=/home/marcus/git/me/sensors-data
alias sendat='${SENDAT_HOME}/sendat.py'
alias await=await
await () {
attempts=0
rc=1
while [[ $rc != 0 ]];
do
echo "Trying to execute \"${@}\", ${attempts} attempts so far."
$@
rc=$?
attempts=$(expr $attempts + 1)
sleep 1
done
echo "Return code ${rc} from ${@}, exiting."
}
| true
|
17c417f65b857fd9a39a103cc07f6bd187474b54
|
Shell
|
ElmWizard/unix-linux
|
/shellscript/test2.sh
|
UTF-8
| 408
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# test-integer2: evaluate the value of an integer
INT=-5
if [[ "$INT" =~ ^-[0-9]+$ ]]; then
if [ $INT -eq 0 ]; then
echo "INT is zero"
else
if [ $INT -lt 0 ]; then
echo "INT is less than zero"
else
echo "INT is greated than zero"
fi
if [ $((INT % 2)) -eq 0 ];then
echo "INT is even"
else
echo "INT is odd"
fi
fi
else
echo "INT is not an integer." >&2
exit 1
fi
| true
|
8dea815bd98b949bbe41902a98f2c26535f4c200
|
Shell
|
andrew-d/bbuild
|
/recipes/cross-libtool/build.sh
|
UTF-8
| 845
| 3.328125
| 3
|
[] |
no_license
|
pkgname="cross-libtool"
pkgdesc="Installs a cross-compiler libtool"
pkgver="2.4.6"
sources=(
"http://ftpmirror.gnu.org/libtool/libtool-${pkgver}.tar.gz"
)
sums=(
"bad"
)
library=true
binary=false
dependencies=()
# Common variables.
_builddir="$BBUILD_SOURCE_DIR/$pkgname-$pkgver"
function build() {
cd "$_builddir"
CFLAGS="${CFLAGS:-} ${BBUILD_STATIC_FLAGS}" \
CXXFLAGS="${CXXFLAGS:-} ${BBUILD_STATIC_FLAGS}" \
LDFLAGS="${LDFLAGS:-} ${BBUILD_STATIC_FLAGS}" \
./configure \
--prefix="${BBUILD_SOURCE_DIR}/dest" \
--host=${BBUILD_CROSS_PREFIX} \
--program-prefix="${BBUILD_CROSS_PREFIX}-" \
|| return 1
make || return 1
make install || return 1
}
function setup_env() {
echo "${BBUILD_SOURCE_DIR}/dest/bin/${BBUILD_CROSS_PREFIX}-libtool" > "$depdir"/.libtool-bin
}
| true
|
7aa8eb19c38b509afaaf32f9a6c611eb4b6c5ec0
|
Shell
|
mikhailnov/spec-helper
|
/relink_symlinks
|
UTF-8
| 1,581
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
# We cannot use "readlink -f" command because it doesn't know about $RPM_BUILD_ROOT
# and follows symlinks on real filesystem instead of $RPM_BUILD_ROOT
readlink_f() {
__symlink="`readlink \"$1\"`"
if [ -z "$__symlink" ]
then
echo $1 | sed s,"$RPM_BUILD_ROOT",,
else
readlink_f "$RPM_BUILD_ROOT$__symlink"
fi
}
if [ -z "$RPM_BUILD_ROOT" ]; then
echo "No build root defined" >&2
exit 1
fi
if [ ! -d "$RPM_BUILD_ROOT" ]; then
echo "Invalid build root" >&2
exit 1
fi
find "$RPM_BUILD_ROOT" \
-type l -print0 | xargs --no-run-if-empty -0 ls 2>/dev/null |
while read symlink; do
path="$(readlink_f "$symlink")"
echo $path | grep -q -E '^(/dev|/sys|/proc)' && continue
# skip non-absolute path
if echo $path | grep -q -E '^/'; then
# absolute path needs to be made into an absolute path relative to buildroot
path="$RPM_BUILD_ROOT$path"
if stat "$path" &> /dev/null; then
rm "$symlink"
# ln will try follow symlink if source exists as symlink, so let's move
# it out of the way first, then back afterwards
stat "$path" &> /dev/null && mv "$path" "$path.origlink"
output="`ln -svr \"$path\" \"$symlink\" 2>&1`"
stat "$path.origlink" &> /dev/null && mv "$path.origlink" "$path"
if ! stat "$symlink" &> /dev/null; then
echo "symlink relativization failed:" >&2
echo "$output" >&2
ls --color -l "$symlink" >&2
fi
fi
fi
done
| true
|
81349a0d355d605ffda8e0a69dddfe953a970013
|
Shell
|
scirelli/Terraria_Docker
|
/.docker/build.sh
|
UTF-8
| 374
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
worldName=${1:-'Doc Ock'}
docker rmi $(docker images -f "dangling=true" -q)
docker build \
--build-arg TERRARIA_DOWNLOAD='https://terraria.org/system/dedicated_servers/archives/000/000/038/original/terraria-server-1404.zip' \
--build-arg EXTRACTED_FOLDER_NAME='1404' \
-t "scirelli/terraria:latest" \
-t "scirelli/terraria:1.4.0.4" .
| true
|
9801e42b9ed7d4dde9c4749a5dfb8dedf309c617
|
Shell
|
mihic/clanek-matrike
|
/programi/auto_optimize/auto_optimize.sh
|
UTF-8
| 4,295
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
optimize_recursive(){
file_name="resoults_recursive.txt"
resoult_string=""
a=16
b=2
c=32
d=500
e=100
f=1500
echo $a
echo $b
echo $c
mkdir Output
resoult_string+="Tests \n"
resoult_string+="Velikosti dim: \n"
for ((i=$a;i<$c;i+=b)); do
resoult_string+="${i};"
done
resoult_string+="\n"
resoult_string+="Velikosti matrik: \n"
for ((j=$d;j<$f;j+=$e)); do
resoult_string+="${j};"
done
resoult_string+="\n"
printf ${resoult_string}
for ((i=$a;i<$c;i+=b)); do
sed -i "s/.*const int RECT_DIM.*;/const int RECT_DIM = ${i};/" matrix.h
g++ *.cpp -O3 -march=native -lboost_program_options -o main
printf "Parameter: ${i}\n"
resoult_string+="Parameter: ${i}\n"
for ((j=$d;j<$f;j+=$e)); do
x=$(./main -m recursive_transposed -a ${j} -b ${j} -c ${j} -r 5)
printf "${j};${x};\n"
resoult_string+="${j};${x};\n"
done
done
now=$(date +"%T")
printf "Testing parameters \n" > "Output/${file_name}"
printf "Current time: $now \n" >> "Output/${file_name}"
sys_info="$(lscpu)"
printf "${sys_info}\n" >> "Output/${file_name}"
resoult_string="${resoult_string}"
best_resoult=$(python3 extractBest.py "${resoult_string}" 2>&1)
echo "Best const:"
echo "${best_resoult}"
case "${best_resoult}" in
''|*[!0-9]*) echo "error: Best resoult is not a number ${best_resoult}" ; return 1;;
*) echo "succes: Best resoult is number ${best_resoult}" ;;
esac
sed -i "s/.*const int RECT_DIM.*;/const int RECT_DIM = ${best_resoult};/" matrix.h
g++ *.cpp -O3 -march=native -lboost_program_options -o main
printf "Best: \n" >> "Output/${file_name}"
printf "${best_resoult}\n" >> "Output/${file_name}"
printf "${resoult_string}" >> "Output/${file_name}"
}
optimize_subcub(){
file_name="resoults_subcub.txt"
resoult_string=""
a=64
b=32
c=256
d=2000
e=100
f=3000
echo $a
echo $b
echo $c
mkdir Output
resoult_string+="Tests \n"
resoult_string+="Velikosti dim: \n"
for ((i=$a;i<$c;i+=b)); do
resoult_string+="${i};"
done
resoult_string+="\n"
resoult_string+="Velikosti matrik: \n"
for ((j=$d;j<$f;j+=$e)); do
resoult_string+="${j};"
done
resoult_string+="\n"
printf ${resoult_string}
for ((i=$a;i<$c;i+=b)); do
sed -i "s/.*const int SUBC_DIM.*;/const int SUBC_DIM = ${i};/" matrix.h
g++ *.cpp -O3 -march=native -lboost_program_options -o main
printf "Parameter: ${i}\n"
resoult_string+="Parameter: ${i}\n"
for ((j=$d;j<$f;j+=$e)); do
x=$(./main -m subcubic -a ${j} -b ${j} -c ${j} -r 5)
printf "${j};${x};\n"
resoult_string+="${j};${x};\n"
done
done
now=$(date +"%T")
printf "Testing parameters \n" > "Output/${file_name}"
printf "Current time: $now \n" >> "Output/${file_name}"
sys_info="$(lscpu)"
printf "${sys_info}\n" >> "Output/${file_name}"
resoult_string="${resoult_string}"
best_resoult=$(python3 extractBest.py "${resoult_string}" 2>&1)
echo "Best const:"
echo "${best_resoult}"
case "${best_resoult}" in
''|*[!0-9]*) echo "error: Best resoult is not a number ${best_resoult}" ; return 1;;
*) echo "succes: Best resoult is number ${best_resoult}" ;;
esac
sed -i "s/.*const int SUBC_DIM.*;/const int SUBC_DIM = ${best_resoult};/" matrix.h
g++ *.cpp -O3 -march=native -lboost_program_options -o main
printf "Best: \n" >> "Output/${file_name}"
printf "${best_resoult}\n" >> "Output/${file_name}"
printf "${resoult_string}" >> "Output/${file_name}"
}
echo "Comand sed can take line number for fast performance for example 7s.."
sed -i "s/.*const int RECT_DIM.*;/const int RECT_DIM = 256;/" matrix.h
sed -i "s/.*const int SUBC_DIM.*;/const int SUBC_DIM = 512;/" matrix.h
sed -i "2s/.*TODO.*;/const int TODO = 2;/" matrix.h
optimize_recursive
sed -i "2s/.*TODO.*;/const int TODO = 1;/" matrix.h
optimize_subcub
sed -i "2s/.*TODO.*;/const int TODO = 0;/" matrix.h
| true
|
d229fb5df1f02cbe0d3f839e0193f7fc8a0947f9
|
Shell
|
o-ran-sc/it-test
|
/test_scripts/O2IMS_Compliance_Test/mock.sh
|
UTF-8
| 2,377
| 3.21875
| 3
|
[
"Apache-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# $1 mocker server host
# $2 mocker server port
if [ "$#" -ne 2 ]; then
echo "need 2 arguments"
exit 1
fi
export mockserver_host=$1
export mockserver_port=$2
export SMO_ENDPOINT="https://${mockserver_host}:${mockserver_port}/mockserver"
VALID_CLOUD_OBSERVER="{
\"id\": \"mock_smo_registration\",
\"httpRequest\" : {
\"path\" : \"/mock_smo/v1/ocloud_observer\",
\"method\":\"POST\"
},
\"httpResponse\" : {
\"statusCode\": 204,
\"body\" : {
\"status\": 204,
\"result\": \"Welcome to mocked smo server!\"
}
},
\"priority\" : 10
}"
INVALID_CLOUD_OBSERVER="{
\"id\": \"invalid_mock_smo_registration\",
\"httpRequest\" : {
\"path\" : \"/mock_smo/v1/invalid_ocloud_observer\",
\"method\":\"POST\"
},
\"httpResponse\" : {
\"statusCode\": 500,
\"body\" : {
\"status\": 500,
\"result\": \"mocked smo server invalid\"
}
},
\"priority\" : 10
}"
O2IMS_INVENTORY_OBSERVER="{
\"id\": \"mock_smo_inventory_change_notification_endpoint_registration\",
\"httpRequest\" : {
\"path\" : \"/mock_smo/v1/o2ims_inventory_observer\",
\"method\":\"POST\"
},
\"httpResponse\" : {
\"statusCode\": 204,
\"body\" : {
\"status\": 204,
\"result\": \"this is mocked inventory change notification callback\"
}
},
\"priority\" : 10
}"
O2IMS_ALARM_OBSERVER="{
\"id\": \"mock_smo_alarm_notification_endpoint_registration\",
\"httpRequest\" : {
\"path\" : \"/mock_smo/v1/o2ims_alarm_observer\",
\"method\":\"POST\"
},
\"httpResponse\" : {
\"statusCode\": 204,
\"body\" : {
\"status\": 204,
\"result\": \"Welcome to mocked smo server alarm notification endpoint\"
}
},
\"priority\" : 10
}"
curl -s -k -X PUT $SMO_ENDPOINT/expectation --header 'Content-Type: application/json' \
--header 'Accept: application/json' \
-d "${VALID_CLOUD_OBSERVER}"
curl -s -k -X PUT $SMO_ENDPOINT/expectation --header 'Content-Type: application/json' \
--header 'Accept: application/json' \
-d "${INVALID_CLOUD_OBSERVER}"
curl -s -k -X PUT $SMO_ENDPOINT/expectation --header 'Content-Type: application/json' \
--header 'Accept: application/json' \
-d "${O2IMS_INVENTORY_OBSERVER}"
curl -s -k -X PUT $SMO_ENDPOINT/expectation --header 'Content-Type: application/json' \
--header 'Accept: application/json' \
-d "${O2IMS_ALARM_OBSERVER}"
exit
| true
|
2254cc3a91aebe7ed2bcb8174e905cffee6a90d2
|
Shell
|
tc-ca/webapp-quickstart
|
/setup.sh
|
UTF-8
| 4,869
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
echo "This will delete existing .env, credentials and metadata!"
echo "Windows users: make sure this is running in winpty"
echo "Linux users: this might need to be run as root since the folders the docker process creates aren't managed by the normal user"
read -p "Press enter to continue"
# Breaks on windows unless this is set
export MSYS_NO_PATHCONV=1
# Autofill value for openssh subjects
subj="/CN=webapp"
rm -rf ./out/ || true
rm -rf ./secrets/ || true
mkdir ./secrets/
mkdir ./secrets/idp
mkdir ./secrets/sp
mkdir ./out/
#
# Setup .env
#
echo "MSSQL SA passwords must be at least 8 characters long and contain three of the following:"
echo "- Uppercase letters"
echo "- Lowercase letters"
echo "- Base 10 digits"
echo "- Symbols"
read -s -p "Enter the database SA password:" SAP
echo "SA_PASSWORD=$SAP" > .env
#
# Setup registry
#
printf "\n\nSetting up registry\n"
cd ./00-registry/
rm -r certs || true
mkdir certs
cd certs
# TLS cert
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout domain.key -out domain.crt -subj "$subj"
chown $USER:docker ./*
chmod o+r ./*
cd ..
cp .env.example .env
cd ..
#
# Setup httpd
#
printf "\n\nSetting up httpd\n"
cd 01-httpd/etc-httpd/
rm -r ./ssl/ || true
mkdir ssl
cd ssl
# TLS cert
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout domain.key -out domain.crt -subj "$subj"
chown $USER:docker ./*
chmod ugo+r ./*
cd ../../..
#
# Setup idp
#
printf "\n\nSetting up IdP\n"
rm -r ./03-idp/shibboleth-idp/credentials/ || true
mkdir ./03-idp/shibboleth-idp/credentials/
# Run unicon setup script
echo "Running unicon setup."
echo "Expected domain: webapp"
docker run -it -v $(pwd)/out:/ext-mount --rm unicon/shibboleth-idp sh -c "init-idp.sh && chmod o+rw -R /ext-mount"
# Copy results to proper locations
mkdir ./03-idp/shibboleth-idp/credentials/
mkdir ./03-idp/shibboleth-idp/metadata/
mv ./out/customized-shibboleth-idp/credentials/{idp-backchannel.crt,idp-encryption.crt,idp-signing.crt,sealer.kver} ./03-idp/shibboleth-idp/credentials/
mv ./out/customized-shibboleth-idp/credentials/* ./secrets/idp/
mv ./out/customized-shibboleth-idp/metadata/idp-metadata.xml ./03-idp/shibboleth-idp/metadata/idp-metadata.xml
# Remove validUntil from idp metadata
sed -i.bak -e 's/validUntil="[^"]*" //' 03-idp/shibboleth-idp/metadata/idp-metadata.xml
rm 03-idp/shibboleth-idp/metadata/idp-metadata.xml.bak
read -s -p "Re-enter backchannel password for compose:" backchannel
printf "\n"
# Create idp browser keystore
cd ./secrets/idp/
openssl req -x509 -sha256 -nodes -days 256 -newkey rsa:2048 -keyout idp-browser.pem -out idp-browser.crt -subj "$subj"
read -s -p "Enter browser keystore password: " browser
printf "\n"
openssl pkcs12 -inkey idp-browser.pem -in idp-browser.crt -export -out idp-browser.p12 -passout pass:${browser}
chown $USER:docker ./*
chmod o+r ./*
cd ../..
# Add store passwords to compose
sed -i "s/JETTY_BROWSER_SSL_KEYSTORE_PASSWORD: .*/JETTY_BROWSER_SSL_KEYSTORE_PASSWORD: ${browser}/" docker-compose.yml
sed -i "s/JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD: .*/JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD: ${backchannel}/" docker-compose.yml
# Transfer idp.properties
sed -i "s/idp.scope=.*/idp.scope= $( \
grep -Eow "idp.scope=(.*)" \
./out/customized-shibboleth-idp/conf/idp.properties \
| sed 's/idp.scope=//' \
)/" ./03-idp/shibboleth-idp/conf/idp.properties
sed -i "s/idp.sealer.storePassword=.*/idp.sealer.storePassword= $( \
grep -Eow "idp.sealer.storePassword=(.*)" \
./out/customized-shibboleth-idp/conf/idp.properties \
| sed 's/idp.sealer.storePassword=//' \
)/" ./03-idp/shibboleth-idp/conf/idp.properties
sed -i "s/idp.sealer.keyPassword=.*/idp.sealer.keyPassword= $( \
grep -Eow "idp.sealer.keyPassword=(.*)" \
./out/customized-shibboleth-idp/conf/idp.properties \
| sed 's/idp.sealer.keyPassword=//' \
)/" ./03-idp/shibboleth-idp/conf/idp.properties
#
# Setup sp
#
printf "\n\nSetting up SP\n"
rm ./04-sp/etc-shibboleth/sp-cert.pem
# SP key
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout ./secrets/sp/sp-key.pem -out ./04-sp/etc-shibboleth/sp-cert.pem -subj "$subj"
chown $USER:docker ./secrets/sp/sp-key.pem
chmod o+r,g+rw ./secrets/sp/sp-key.pem
# Fetch new metadata
echo "Retrieving SP metadata, expecting availability on localhost"
( rm ./03-idp/shibboleth-idp/metadata/sp-metadata.xml || true ) \
&& docker-compose up -d --build sp \
&& bash -c 'while [[ "$(curl --insecure -s -o /dev/null -w ''%{http_code}'' https://localhost/Shibboleth.sso/Metadata)" != "200" ]]; do sleep 5; done' \
&& curl -o ./03-idp/shibboleth-idp/metadata/sp-metadata.xml https://localhost/Shibboleth.sso/Metadata --insecure \
&& docker-compose stop sp
#
# Cleanup
#
echo "Cleanup"
# rm -r ./out/
#
# Finish
#
echo "Secrets have been regenerated!"
echo "Remember to rebuild next compose"
| true
|
b9e49a87a9849f318b9fc4480028947773240eca
|
Shell
|
shvenkat/dotfiles
|
/installers/install-tlaplus-proofsystem
|
UTF-8
| 847
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e -o pipefail -u
# License: BSD License
# (https://tla.msr-inria.inria.fr/tlaps/content/Download/License.html).
# Source: https://tla.msr-inria.inria.fr/tlaps/dist/current/tlaps-1.4.3.tar.gz
# Instructions: https://tla.msr-inria.inria.fr/tlaps/content/Download/Source.html
url='https://tla.msr-inria.inria.fr/tlaps/dist/current/tlaps-1.4.3.tar.gz'
archive='tlaps-1.4.3.tar.gz'
# sha256=''
install_dir="${HOME}/install/tlaps"
# curl -f -o "$archive" "$url"
# if [[ "$(sha256sum -b < "$archive" | cut -d' ' -f1)" != "$sha256" ]]; then
# echo "Downloaded file has incorrect checksum." 1>&2
# exit 1
# fi
# unzip -t "$archive" >/dev/null 2>&1
# mkdir -p "$install_dir"
# unzip -d "$(dirname "$install_dir")" "$archive" >/dev/null
# Documentation.
# https://tla.msr-inria.inria.fr/tlaps/content/Documentation/Tutorial.html
| true
|
5895cb9f20c845c46bc67f8bf369c04e6fa43522
|
Shell
|
uc-cdis/cloud-automation
|
/gen3/bin/monitor-pod.sh
|
UTF-8
| 1,616
| 3.90625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# kubescope is a tool that would let you monitor a pod more easily and in real time
#
# Load the basics
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
if [[ -n "$JENKINS_HOME" ]]; then
echo "Jenkins skipping fluentd setup: $JENKINS_HOME"
exit 0
fi
if [ $# -eq 0 ];
then
echo "please provide a pod to monitor"
exit 1
fi
function deploy-KS() {
local node
local threshold=10
local now=$(date +%s)
local name="${1}-${now}"
local what="${2}"
local node="${3}"
g3k_kv_filter "${GEN3_HOME}/kube/services/kubescope/kubescope-cli.yaml" MONITOR_NAME ${name} TO_MONITOR ${what} NODE ${node} | g3kubectl apply -f -
sleep 2
while [ ${threshold} -ge 0 ];
do
g3kubectl get pod ${name}
if [ $? -eq 0 ];
then
local ready=$(g3kubectl get pod ${name} -o jsonpath='{.status.containerStatuses[].ready}')
if [ "${ready}" == "true" ];
then
g3kubectl attach -it ${name}
break;
fi
fi
threshold=$(( threshold - 1 ))
sleep 2
done
}
echo "Reviewing provided arguments"
if ( g3kubectl get pod ${1} > /dev/null 2>&1);
then
pod=${1}
name="$(echo ${pod} | egrep -o "^[a-z0-9]*\-[a-z0-9]*")-monitor"
node=$(g3kubectl get pod ${pod} -o jsonpath="{.spec.nodeName}")
else
pod=$(gen3 pod ${1})
if [ $? -eq 0 ];
then
name="$(echo ${pod} | egrep -o "^[a-z0-9]*\-[a-z0-9]*")-monitor"
node=$(g3kubectl get pod ${pod} -o jsonpath="{.spec.nodeName}")
#podi=$(echo $pod | egrep -o "^[a-z0-9]*\-[a-z0-9]*")
# deploy-KS $name $pod $node
fi
fi
deploy-KS ${name} ${pod} ${node}
| true
|
03d5453c7514755211cb4702140d4f62661c4e3e
|
Shell
|
samoshkin/tls-nginx-strong-example
|
/cron/renew_certs.sh
|
UTF-8
| 877
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eux
# Since we start our services using docker compose,
# volume, images and containers are prefixed with COMPOSE_PROJECT_NAME
DOCKER_PREFIX="${PROJECT_NAME}_";
# Run 'asamoshkin/letsencrypt-certgen' image to renew certificates
docker run \
-v ${DOCKER_PREFIX}ssl:/var/ssl \
-v ${DOCKER_PREFIX}acme_challenge_webroot:/var/acme_challenge_webroot \
-v ${DOCKER_PREFIX}letsencrypt:/etc/letsencrypt \
-v ${DOCKER_PREFIX}acme:/etc/acme \
-e CHALLENGE_MODE=webroot \
-e SSL_GROUP_ID \
-e RSA_KEY_LENGTH \
-e ECDSA_KEY_LENGTH \
-e DOMAINS \
-e STAGING \
-e VERBOSE \
--rm \
asamoshkin/letsencrypt-certgen:v0.1.1 renew
# reload nginx once certificates are renewed
nginx_container_id=$(docker ps -q -f name="${DOCKER_PREFIX}nginx_*" | head -n 1)
if [ -n "$nginx_container_id" ]; then
docker kill -s HUP "$nginx_container_id"
fi
| true
|
5ebd7ab5b9fb9521c2cad210b4d4c1615a6009e6
|
Shell
|
jaczkal/lightning-systemic-attack
|
/sh/dot2jpg
|
UTF-8
| 218
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
IN=$1
OUT=$2
if [[ -z $IN ]] || [[ -z $OUT ]]; then
echo "usage: dot2jpg INPUT.dot OUTPUT.jpg [dpi=300]"
exit 1
fi
DPI=300
if [[ ! -z $3 ]]; then
DPI=$3
fi
dot -Tjpg -Gdpi=300 $IN -o $OUT
| true
|
950745fceb0bf252716322555663f38b96b3917d
|
Shell
|
mkwon0/qemu-init
|
/test-web/install.sh
|
UTF-8
| 819
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
WORK_DIR=/home/mkwon
install_pre() {
yum install -y git
cd $WORK_DIR && git clone https://github.com/nanoninja/docker-nginx-php-mysql.git
}
configure_ssl() {
cd $WORK_DIR/docker-nginx-php-mysql && \
source .env && \
docker run --rm -v $(pwd)/etc/ssl:/certificates -e "SERVER=$NGINX_HOST" jacoelho/generate-certificate && \
sed -i '24,48 s/^#//' etc/nginx/default.template.conf
}
install_docker_compose() {
cd $WORK_DIR && \
git clone git@github.com:mkwon0/docker-compose-swap.git && \
cd docker-compose-swap && \
./init.sh && \
cp web/app/composer.json.dist web/app/composer.json
}
run_application() {
cd $WORK_DIR/docker-nginx-php-mysql && \
docker-compose up -d && \
docker-compose logs -f
}
main() {
# install_pre
# configure_ssl
# install_docker_compose
run_application
}
main
| true
|
6179fd31fa19909c1373aa268e644f0851a23ae4
|
Shell
|
csfera/openstack-ccp
|
/sync.sh
|
UTF-8
| 590
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
REPOS=( 'openstack-helm' 'openstack-helm-infra' )
TGTS=( 'k8s-m' 'k8s-w0' 'k8s-w1' 'k8s-w2' 'k8s-w3' )
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
for tgt in "${TGTS[@]}" ; do
ssh ${SSH_OPTIONS[@]} ubuntu@${tgt} "sudo chown -R ubuntu: /opt"
for repo in "${REPOS[@]}" ; do
rsync -azv -e "ssh -o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error" --progress --delete /opt/${repo}/ ubuntu@${tgt}:/opt/${repo}
done
done
| true
|
f74f0b369d44cfd2ac32dfe986c8092754e705f8
|
Shell
|
joaofel-u/Data_Structures
|
/exec_gtest_cpp.sh
|
UTF-8
| 208
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 2 ];
then
echo "Params[Diretorio][Nome arquivo de teste]"
else
g++ $1/$2.cpp /usr/lib/libgtest.a -o test.bin -fPIC -g -Wall -lpthread -std=c++11
./test.bin
rm test.bin
fi
| true
|
a47b28a7eacc9387fe5bee7102134b3d2c8c33df
|
Shell
|
nkininge/provisioning_new_latest
|
/setup-slaves/roles/jenkins-slaves/files/S99massageHostname
|
UTF-8
| 436
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: massageHostname
# Required-Start:
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Massage hostname to FQDN
### END INIT INFO
set -e
realHost=` curl -s http://169.254.169.254/latest/meta-data/hostname `
case "$realHost" in
*.*) hostname $realHost
;;
*) echo Setting hostname to ${realHost}.ec2.internal
hostname ${realHost}.ec2.internal
;;
esac
| true
|
0d435794b56cfc9adcb043103433e5c03fe1c776
|
Shell
|
jafingerhut/p4-guide
|
/p4runtime-translation/compile.sh
|
UTF-8
| 252
| 3.046875
| 3
|
[] |
no_license
|
#! /bin/bash
P4C="p4c"
for j in weird-case*.p4
do
k=`basename $j .p4`
echo "------------------------------------------------------------"
set -x
${P4C} --target bmv2 --arch v1model --p4runtime-files ${k}.p4info.txt $j
set +x
done
| true
|
efa93c2e683b7fb7ea8bc29181424955af7ce0f4
|
Shell
|
byb121/ExomePipe_NCL_IGM
|
/GATK_Variant_Call_pipe/Annotation/Launcher_VCF_filters.sh
|
UTF-8
| 2,366
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
################## Paras need to be adjusted for diff samples ##############
INALL=(D05685 D08064 D10020 D100563 D11016 D114041 D115022 D122641 D122836 D13415 D146989 D163417 D164942 D169918 D170484 D171748 D171987 D174381 D36421 D62925 D83732 D86968 D88892 D9646 D99945 D16618_D050227_D62018 D122836_D99945_D62925)
SAMPLEDIR=$(pwd) # the folder contain the VCF file which contain all of the samples
INPUT_VCF="$SAMPLEDIR/New_HaplotypeCaller_recali_SNP_INDEL.vcf"
REF_FILE="/users/data/GATK/bundle2.8/hg19/ucsc.hg19.YAOBO.fasta"
AnnovarDIR="/users/a5907529/data/Files_HG/vcf_annotation_november2013"
ANOVAR="N" #use to indicate if Anovar output has beed produced, Y means yes, then After_Anovar script will be used
PERL_SCRIPT_DIR=$SAMPLEDIR # change if the annotation perl script is stored somewhere else
PERL_SCRIPT="${PERL_SCRIPT_DIR}/VCF_2_annotated_xlsx_20140501.pl"
PERL_SCRIPT_AF_ANOVAR="${PERL_SCRIPT_DIR}/VCF_2_annotated_xlsx_20140501_After_Annovared.pl" #note this name has to contain 'After_Annovared', otherwise GATK will start to select samples
GENO_CALL_FLAG="Yes"
CNV="/users/a5907529/lustre/Sophie_unsovled_cases/CNVs/batch_20130528/batch_20130528_annotated_CNVs.txt"
InterestedGENES="/users/a5907529/lustre/Sophie_unsovled_cases/Genes_PID_01MAY2014_YAOBO.txt"
VCF_TAIL="_selected.vcf"
FILTERED_TAIL="_filtered.xlsx" # output excel file name tail of filtered variants, has to be ended with xlsx
EVERYTHING_TAIL="_Everything.xlsx" # output excel file name tail of all variants, has to be ended with xlsx
###############################
for SAMPLE in "${INALL[@]}"
do
IFS='_' read -a EXPs <<< "$SAMPLE" #### split string
STRING=""
for EXP in "${EXPs[@]}"
do
STRING="${STRING}-se "
STRING="${STRING}$EXP "
done
echo $STRING
SAMPLE_SELECTED_VCF="${SAMPLEDIR}/${SAMPLE}${VCF_TAIL}"
OUTPUT="${SAMPLEDIR}/${SAMPLE}${FILTERED_TAIL}"
OUTPUT_ALL="${SAMPLEDIR}/${SAMPLE}${EVERYTHING_TAIL}"
if [ $ANOVAR == "Y" ]
then
SCRIPT="${PERL_SCRIPT_AF_ANOVAR}"
else
SCRIPT="${PERL_SCRIPT}"
fi
#echo "${SAMPLEDIR}/VCF_filters.sh $SCRIPT ${SAMPLE_SELECTED_VCF} $OUTPUT $OUTPUT_ALL $INPUT_VCF $STRING $REF_FILE $CNV $InterestedGENES"
arr=("$SCRIPT" "${SAMPLE_SELECTED_VCF}" "$OUTPUT" "$OUTPUT_ALL" "$INPUT_VCF" "$STRING" "$REF_FILE" "$CNV" "$InterestedGENES" "$AnnovarDIR")
qsub ${SAMPLEDIR}/VCF_filters.sh "${arr[@]}"
done
| true
|
2a1dd64927a2fa99145b60a5bcea6bf124bc7be4
|
Shell
|
CRamsan/Customization
|
/deploy_termux.sh
|
UTF-8
| 499
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/data/data/com.termux/files/usr/bin/bash
find Customization/ -type f -name "*.sh" -exec termux-fix-shebang {} \;
. deploy.sh
for file in $@; do
# Do realpath to avoid breaking symlinks (modify original file):
sed -i -E "1 s@^#\!/data/data/com.termux/files/usr/bin/\!(.*)/bin/(.*)@#\2@" `realpath $@`
done
for file in $@; do
# Do realpath to avoid breaking symlinks (modify original file):
sed -i -E "1 s@^#\!(.*)/bin/(.*)@#\!/data/data/com.termux/files/usr/bin/\2@" `realpath $@`
done
| true
|
5f60ffa9b1682df41f52b10a5856b869b726a8a6
|
Shell
|
ubnt-intrepid/msgpack-rpc-rs
|
/scripts/deploy_doc.sh
|
UTF-8
| 431
| 2.953125
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/sh
set -euo pipefail
TRAVIS_BRANCH=$1
TRAVIS_RUST_VERSION=$2
GH_TOKEN=$3
TRAVIS_REPO_SLUG=$4
if [[ "${TRAVIS_BRANCH:-}" = "master" ]] && [[ "${TRAVIS_RUST_VERSION}" = "stable" ]]; then
cargo doc --all-features --no-deps
# cargo install --force cobalt-bin
# cobalt build -s site -d target/doc
ghp-import -n target/doc
git push -qf "https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git" gh-pages
fi
| true
|
38bd29f8da1bd7dcd9299a6976c2ed294690f117
|
Shell
|
csdms/ccaffeine
|
/cxx/util/genDLIndexXML
|
UTF-8
| 1,250
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# This script generates an xml index file for the library
# given, under loose cca assumptions.
dstring=`date`
pstring=`pwd`/
palias="paletteClassAlias=\"$3\""
if test "x$4" = "x" ; then
echo "$0:usage: <libname> <constructorName> <className> <install-lib-dir> <palette-alias> <bindingType>"
exit 1
fi
if test "x$5" = "x"; then
echo "$0:usage: <libname> <constructorName> <className> <install-lib-dir> <palette-alias> <bindingType>"
exit 1
fi
if test "x$6" = "x"; then
echo "$0:usage: <libname> <constructorName> <className> <install-lib-dir> <palette-alias> <bindingType>"
exit 1
fi
if test "x$6" = "xbabel"; then
echo "$0:deprecated: bindingType 'babel' is no longer supported with this script."
echo " Use genSCLCCA.sh from cca-spec-babel installation instead."
exit 1
fi
cat << __EOF1
<xml>
<!-- # generated CCAFFEINE dynamic library index. -->
<!-- date=$dstring -->
<!-- builder=$USER@$HOST -->
<!-- $0 $* -->
<componentDeployment
name="$3"
paletteClassAlias="$5"
>
<environment>
<ccaSpec binding="$6" />
<library loading="dynamic"
name="$1"
constructor="$2"
location="$4"
/>
</environment>
</componentDeployment>
</xml>
__EOF1
exit 0
| true
|
d3e979ecc629b6e777f555725c653496b9e8454e
|
Shell
|
lonesafe/openwrt-packages
|
/luci-app-cloudflarespeedtest/root/usr/bin/cloudflarespeedtest/cloudflarespeedtest.sh
|
UTF-8
| 9,056
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
LOG_FILE='/var/log/cloudflarespeedtest.log'
IP_FILE='/usr/share/cloudflarespeedtestresult.txt'
IPV4_TXT='/usr/share/CloudflareSpeedTest/ip.txt'
IPV6_TXT='/usr/share/CloudflareSpeedTest/ipv6.txt'
function get_global_config(){
while [[ "$*" != "" ]]; do
eval ${1}='`uci get cloudflarespeedtest.global.$1`' 2>/dev/null
shift
done
}
function get_servers_config(){
while [[ "$*" != "" ]]; do
eval ${1}='`uci get cloudflarespeedtest.servers.$1`' 2>/dev/null
shift
done
}
echolog() {
local d="$(date "+%Y-%m-%d %H:%M:%S")"
echo -e "$d: $*"
echo -e "$d: $*" >>$LOG_FILE
}
function read_config(){
get_global_config "enabled" "speed" "custome_url" "threads" "custome_cors_enabled" "custome_cron" "t" "tp" "dt" "dn" "dd" "tl" "tll" "ipv6_enabled" "advanced" "proxy_mode"
get_servers_config "ssr_services" "ssr_enabled" "passwall_enabled" "passwall_services" "passwall2_enabled" "passwall2_services" "bypass_enabled" "bypass_services" "vssr_enabled" "vssr_services" "DNS_enabled" "HOST_enabled"
}
function appinit(){
ssr_started='';
passwall_started='';
passwall2_started='';
bypass_started='';
vssr_started='';
}
function speed_test(){
rm -rf $LOG_FILE
command="/usr/bin/cdnspeedtest -sl $((speed*125/1000)) -url ${custome_url} -o ${IP_FILE}"
if [ $ipv6_enabled -eq "1" ] ;then
command="${command} -f ${IPV6_TXT} -ipv6"
else
command="${command} -f ${IPV4_TXT}"
fi
if [ $advanced -eq "1" ] ; then
command="${command} -tl ${tl} -tll ${tll} -n ${threads} -t ${t} -dt ${dt} -dn ${dn}"
if [ $dd -eq "1" ] ; then
command="${command} -dd"
fi
if [ $tp -ne "443" ] ; then
command="${command} -tp ${tp}"
fi
else
command="${command} -tl 200 -tll 40 -n 200 -t 4 -dt 10 -dn 1"
fi
appinit
ssr_original_server=$(uci get shadowsocksr.@global[0].global_server 2>/dev/null)
ssr_original_run_mode=$(uci get shadowsocksr.@global[0].run_mode 2>/dev/null)
if [ "x${ssr_original_server}" != "xnil" ] && [ "x${ssr_original_server}" != "x" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set shadowsocksr.@global[0].global_server="nil"
elif [ $proxy_mode == "gfw" ] ;then
uci set shadowsocksr.@global[0].run_mode="gfw"
fi
ssr_started='1';
uci commit shadowsocksr
/etc/init.d/shadowsocksr restart
fi
passwall_server_enabled=$(uci get passwall.@global[0].enabled 2>/dev/null)
passwall_original_run_mode=$(uci get passwall.@global[0].tcp_proxy_mode 2>/dev/null)
if [ "x${passwall_server_enabled}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set passwall.@global[0].enabled="0"
elif [ $proxy_mode == "gfw" ] ;then
uci set passwall.@global[0].tcp_proxy_mode="gfwlist"
fi
passwall_started='1';
uci commit passwall
/etc/init.d/passwall restart 2>/dev/null
fi
passwall2_server_enabled=$(uci get passwall2.@global[0].enabled 2>/dev/null)
passwall2_original_run_mode=$(uci get passwall2.@global[0].tcp_proxy_mode 2>/dev/null)
if [ "x${passwall2_server_enabled}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set passwall2.@global[0].enabled="0"
elif [ $proxy_mode == "gfw" ] ;then
uci set passwall2.@global[0].tcp_proxy_mode="gfwlist"
fi
passwall2_started='1';
uci commit passwall2
/etc/init.d/passwall2 restart 2>/dev/null
fi
vssr_original_server=$(uci get vssr.@global[0].global_server 2>/dev/null)
vssr_original_run_mode=$(uci get vssr.@global[0].run_mode 2>/dev/null)
if [ "x${vssr_original_server}" != "xnil" ] && [ "x${vssr_original_server}" != "x" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set vssr.@global[0].global_server="nil"
elif [ $proxy_mode == "gfw" ] ;then
uci set vssr.@global[0].run_mode="gfw"
fi
vssr_started='1';
uci commit vssr
/etc/init.d/vssr restart
fi
bypass_original_server=$(uci get bypass.@global[0].global_server 2>/dev/null)
bypass_original_run_mode=$(uci get bypass.@global[0].run_mode 2>/dev/null)
if [ "x${bypass_original_server}" != "x" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set bypass.@global[0].global_server=""
elif [ $proxy_mode == "gfw" ] ;then
uci set bypass.@global[0].run_mode="gfw"
fi
bypass_started='1';
uci commit bypass
/etc/init.d/bypass restart
fi
echo $command >> $LOG_FILE 2>&1
echolog "-----------start----------"
$command >> $LOG_FILE 2>&1
echolog "-----------end------------"
}
function ip_replace(){
# 获取最快 IP(从 result.csv 结果文件中获取第一个 IP)
bestip=$(sed -n "2,1p" $IP_FILE | awk -F, '{print $1}')
[[ -z "${bestip}" ]] && echo "CloudflareST 测速结果 IP 数量为 0,跳过下面步骤..." && exit 0
alidns_ip
ssr_best_ip
vssr_best_ip
bypass_best_ip
passwall_best_ip
passwall2_best_ip
restart_app
host_ip
}
function passwall_best_ip(){
if [ "x${passwall_enabled}" == "x1" ] ;then
echolog "设置passwall IP"
for ssrname in $passwall_services
do
echo $ssrname
uci set passwall.$ssrname.address="${bestip}"
done
uci commit passwall
fi
}
function passwall2_best_ip(){
if [ "x${passwall2_enabled}" == "x1" ] ;then
echolog "设置passwall2 IP"
for ssrname in $passwall2_services
do
echo $ssrname
uci set passwall2.$ssrname.address="${bestip}"
done
uci commit passwall2
fi
}
function ssr_best_ip(){
if [ "x${ssr_enabled}" == "x1" ] ;then
echolog "设置ssr IP"
for ssrname in $ssr_services
do
echo $ssrname
uci set shadowsocksr.$ssrname.server="${bestip}"
uci set shadowsocksr.$ssrname.ip="${bestip}"
done
uci commit shadowsocksr
fi
}
function vssr_best_ip(){
if [ "x${vssr_enabled}" == "x1" ] ;then
echolog "设置Vssr IP"
for ssrname in $vssr_services
do
echo $ssrname
uci set vssr.$ssrname.server="${bestip}"
done
uci commit vssr
fi
}
function bypass_best_ip(){
if [ "x${bypass_enabled}" == "x1" ] ;then
echolog "设置Bypass IP"
for ssrname in $bypass_services
do
echo $ssrname
uci set bypass.$ssrname.server="${bestip}"
done
uci commit bypass
fi
}
function restart_app(){
if [ "x${ssr_started}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set shadowsocksr.@global[0].global_server="${ssr_original_server}"
elif [ $proxy_mode == "gfw" ] ;then
uci set shadowsocksr.@global[0].run_mode="${ssr_original_run_mode}"
fi
uci commit shadowsocksr
/etc/init.d/shadowsocksr restart &>/dev/null
echolog "ssr重启完成"
fi
if [ "x${passwall_started}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set passwall.@global[0].enabled="${passwall_server_enabled}"
elif [ $proxy_mode == "gfw" ] ;then
uci set passwall.@global[0].tcp_proxy_mode="${passwall_original_run_mode}"
fi
uci commit passwall
/etc/init.d/passwall restart 2>/dev/null
echolog "passwall重启完成"
fi
if [ "x${passwall2_started}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set passwall2.@global[0].enabled="${passwall2_server_enabled}"
elif [ $proxy_mode == "gfw" ] ;then
uci set passwall2.@global[0].tcp_proxy_mode="${passwall2_original_run_mode}"
fi
uci commit passwall2
/etc/init.d/passwall2 restart 2>/dev/null
echolog "passwall2重启完成"
fi
if [ "x${bypass_started}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set vssr.@global[0].global_server="${vssr_original_server}"
elif [ $proxy_mode == "gfw" ] ;then
uci set vssr.@global[0].run_mode="${vssr_original_run_mode}"
fi
uci commit vssr
/etc/init.d/vssr restart &>/dev/null
echolog "Vssr重启完成"
fi
if [ "x${vssr_started}" == "x1" ] ;then
if [ $proxy_mode == "close" ] ;then
uci set bypass.@global[0].global_server="${bypass_original_server}"
elif [ $proxy_mode == "gfw" ] ;then
uci set bypass.@global[0].run_mode="${bypass_original_run_mode}"
fi
uci commit bypass
/etc/init.d/bypass restart &>/dev/null
echolog "Bypass重启完成"
fi
}
function alidns_ip(){
if [ "x${DNS_enabled}" == "x1" ] ;then
get_servers_config "DNS_type" "app_key" "app_secret" "main_domain" "sub_domain" "line"
if [ $DNS_type == "aliyu" ] ;then
for sub in $sub_domain
do
/usr/bin/cloudflarespeedtest/aliddns.sh $app_key $app_secret $main_domain $sub $line $ipv6_enabled $bestip
echolog "更新域名${sub}阿里云DNS完成"
sleep 1s
done
fi
echo "aliyun done"
fi
}
function host_ip() {
if [ "x${HOST_enabled}" == "x1" ] ;then
get_servers_config "host_domain"
HOSTS_LINE="$bestip $host_domain"
if [ -n "$(grep $host_domain /etc/hosts)" ]
then
sed -i".bak" "/$host_domain/d" /etc/hosts
echo $HOSTS_LINE >> /etc/hosts;
else
echo $HOSTS_LINE >> /etc/hosts;
fi
/etc/init.d/dnsmasq reload &>/dev/null
echolog "HOST 完成"
fi
}
read_config
# 启动参数
if [ "$1" ] ;then
[ $1 == "start" ] && speed_test && ip_replace
[ $1 == "test" ] && speed_test
[ $1 == "replace" ] && ip_replace
exit
fi
| true
|
4c3353c90b66c7efed4686b92a0bbba8e760026d
|
Shell
|
PaNOSC-ViNYL/oasys-installation-scripts
|
/Developers/install_xraylib_from_github.sh
|
UTF-8
| 1,319
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#===============================================================================
#
# script to update/install xraylib from github in linux
#
# Note that one must install libtool with apt-get
#
#===============================================================================
#
#
# export all_proxy=http://proxy.esrf.fr:3128/
# clean old stuff
#define python
source oasys1env/bin/activate
export PYTHON_SITE_PACKAGES=`python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"`
echo "Cleaning old installation files..."
rm -rf xraylib
rm $PYTHON_SITE_PACKAGES/xrayhelp.py
rm $PYTHON_SITE_PACKAGES/xraylib.py
rm $PYTHON_SITE_PACKAGES/xraylib_np.so
rm $PYTHON_SITE_PACKAGES/_xraylib.so
rm $PYTHON_SITE_PACKAGES/xraymessages.py
# xraylib
echo "Installing Oasys dependency xraylib"
# curl -O http://lvserver.ugent.be/xraylib/xraylib-3.1.0.tar.gz
# tar xvfz xraylib-3.1.0.tar.gz
# cd xraylib-3.1.0
git clone https://github.com/tschoonj/xraylib
cd xraylib
autoreconf -i
./configure --enable-python --enable-python-integration PYTHON=`which python`
make
cp python/xrayhelp.py $PYTHON_SITE_PACKAGES
cp python/xraylib.py $PYTHON_SITE_PACKAGES
cp python/.libs/_xraylib*.so $PYTHON_SITE_PACKAGES
cp python/xraymessages.py $PYTHON_SITE_PACKAGES
cd ..
echo "All done for xraylib. "
| true
|
d777d68920214bb66ddc1b4c1d485e218b70fe8a
|
Shell
|
CognitiveScale/generator-cortex
|
/generators/datasets/templates/scripts/linux/publish-dataset.sh
|
UTF-8
| 300
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Run the build
${SCRIPT_DIR}/build-dataset.sh
# Publish dataset to Cortex Marketplace
cortex marketplace datasets save "${SCRIPT_DIR}/build/dataset.zip"
# Delete zip
rm -rf "${SCRIPT_DIR}/build/dataset.zip"
| true
|
1bed81da91ccdb262da0e628be3c4ef2c3c74014
|
Shell
|
AlexGroves/dotfiles
|
/.exports
|
UTF-8
| 1,160
| 2.84375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Make nano the default editor.
export EDITOR='nano';
# Make Python use UTF-8 encoding for output to stdin, stdout, and stderr.
export PYTHONIOENCODING='UTF-8';
# Increase Bash history size. Allow 32³ entries; the default is 500.
export HISTSIZE='32768';
export HISTFILESIZE="${HISTSIZE}";
# Omit duplicates and commands that begin with a space from history.
export HISTCONTROL='ignoreboth';
# Prefer US English and use UTF-8.
export LANG='en_US.UTF-8';
export LC_ALL='en_US.UTF-8';
# Highlight section titles in manual pages.
export LESS_TERMCAP_md="${yellow}";
# Don’t clear the screen after quitting a manual page.
export MANPAGER='less -X';
# # set environment variables
# export MY_REPO_ROOT=$HOME/github/warmlogic;
# export BRANCH_REPO_ROOT=$HOME/github/branch;
# # Set AWS defaults
# # https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
# # https://docs.aws.amazon.com/cli/latest/userguide/cli-environment.html
# export AWS_DEFAULT_PROFILE=personal; # or run `export AWS_PROFILE=personal`
# export AWS_DEFAULT_REGION=us-west-2;
# export AWS_DEFAULT_OUTPUT=text;
# export GITHUB_AUTH_TOKEN='';
| true
|
b33dede46b1e2ed1456236d5ebd8f0dda620b24a
|
Shell
|
yajiedesign/mxnet_cd_demo
|
/demo/mxnet-distro/scripts/set_environment.sh
|
UTF-8
| 2,574
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ $# -lt 1 ]; then
>&2 echo "Usage: source set_environment.sh <VARIANT>[CPU|MKL|CU75|CU80|CU90|CU75MKL|CU80MKL|CU90MKL]"
fi
echo $PWD
export DEPS_PATH=$PWD/deps
export VARIANT=$(echo $1 | tr '[:upper:]' '[:lower:]')
export PLATFORM=$(uname | tr '[:upper:]' '[:lower:]')
# Variant-specific dependencies:
if [[ $PLATFORM == 'linux' ]]; then
>&2 echo "Setting CUDA versions for $VARIANT"
# TODO uncomment when starting to use mkldnn
# if [[ $VARIANT == 'mkl' ]]; then
# export MKLML_VERSION='2017.0.2.20170209'
# export MKLDNN_VERSION='0.5'
if [[ $VARIANT == cu90* ]]; then
export CUDA_VERSION='9.0.176-1'
export LIBCUDA_VERSION='384.66-0ubuntu1'
# export LIBCUDNN_VERSION='6.0.21-1+cuda7.5'
export LIBCUDNN_VERSION='7.0.4.31-1+cuda9.0'
elif [[ $VARIANT == cu80* ]]; then
export CUDA_VERSION='8.0.61-1'
export LIBCUDA_VERSION='375.88-0ubuntu1'
export LIBCUDNN_VERSION='7.0.4.31-1+cuda8.0'
elif [[ $VARIANT == cu75* ]]; then
export CUDA_VERSION='7.5-18'
export LIBCUDA_VERSION='375.88-0ubuntu1'
export LIBCUDNN_VERSION='6.0.21-1+cuda7.5'
fi
if [[ $VARIANT == cu* ]]; then
# download and install cuda and cudnn, and set paths
CUDA_MAJOR_VERSION=$(echo $CUDA_VERSION | tr '-' '.' | cut -d. -f1,2)
NVIDIA_MAJOR_VERSION=$(echo $LIBCUDA_VERSION | cut -d. -f1)
export PATH=${PATH}:$DEPS_PATH/usr/local/cuda-$CUDA_MAJOR_VERSION/bin
export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:$DEPS_PATH/usr/local/cuda-$CUDA_MAJOR_VERSION/include
export C_INCLUDE_PATH=${C_INCLUDE_PATH}:$DEPS_PATH/usr/local/cuda-$CUDA_MAJOR_VERSION/include
export LIBRARY_PATH=${LIBRARY_PATH}:$DEPS_PATH/usr/local/cuda-$CUDA_MAJOR_VERSION/lib64:$DEPS_PATH/usr/lib/x86_64-linux-gnu:$DEPS_PATH/usr/lib/nvidia-$NVIDIA_MAJOR_VERSION
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$DEPS_PATH/usr/local/cuda-$CUDA_MAJOR_VERSION/lib64:$DEPS_PATH/usr/lib/x86_64-linux-gnu:$DEPS_PATH/usr/lib/nvidia-$NVIDIA_MAJOR_VERSION
fi
fi
export PKG_CONFIG_PATH=$DEPS_PATH/lib/pkgconfig:$DEPS_PATH/lib64/pkgconfig:$PKG_CONFIG_PATH
export CPATH=$DEPS_PATH/include:$CPATH
export CC="gcc -fPIC"
export CXX="g++ -fPIC"
export FC="gfortran"
NUM_PROC=1
if [[ ! -z $(command -v nproc) ]]; then
NUM_PROC=$(nproc)
elif [[ ! -z $(command -v sysctl) ]]; then
NUM_PROC=$(sysctl -n hw.ncpu)
else
>&2 echo "Can't discover number of cores."
fi
export NUM_PROC
>&2 echo "Using $NUM_PROC parallel jobs in building."
env
| true
|
5f8dd56310b308cfcb1f8a04fde9655e8840ddf5
|
Shell
|
mattfoster/ff-docker
|
/entrypoint.sh
|
UTF-8
| 296
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
: ${PASS:=$(dd if=/dev/urandom bs=1 count=15 2> /dev/null | base64 )}
x11vnc -storepasswd $PASS ~/.vnc/passwd 2> /dev/null
echo "VNC password is: ${PASS}"
echo "Connect to vnc://127.0.0.1:5900 (assuming you forward that port!)"
x11vnc -forever -usepw -create > /dev/null 2>&1
| true
|
ba6a1654459d471b88886f56bdf992f7f4a8512a
|
Shell
|
openov2/integration
|
/test/csit/plans/sdno-lcm/tosca-template/setup.sh
|
UTF-8
| 5,766
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright 2016-2017 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# These scripts are sourced by run-csit.sh.
# JQ install function
# Setup commented as the test has been disabled.
#set +x
##Local Var:
#CSAR_DIR=${SCRIPTS}/sdno-lcm/uploadCSAR
#source ${SCRIPTS}/common_functions.sh
#
## Pull down MSB, Tosca, and Tosca-catalog docker images
#echo "Pull MSB docker image ..."
#docker pull openoint/common-services-msb
#
#echo "Pull MSS docker image ..."
#docker pull openoint/sdno-service-mss
#
#echo "Pull BRS docker image ..."
#docker pull openoint/sdno-service-brs
#
#echo "Pull LCM docker image ..."
#docker pull openoint/sdno-service-lcm
#
#echo "Pull TOSCA-CATALOG docker image ..."
#docker pull openoint/common-tosca-catalog
#
#echo "Pull TOSCA-ARIA docker image ..."
#docker pull openoint/common-tosca-aria
#
#echo "Pull ESR (common-services-extsys) docker image ..."
#docker pull openoint/common-services-extsys
#
## Start Images
#echo "Start MSB docker image ..."
#MSB_PORT="80"
#docker run -d -i -t --name i-msb -p 80:$MSB_PORT openoint/common-services-msb
#MSB_IP=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' i-msb`
#MSB_ADDR=$MSB_IP:$MSB_PORT
#sleep_msg="Waiting_connection_for:i-msb"
#curl_path='http://'${MSB_ADDR}'/api/microservices/v1/swagger.yaml'
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="15" STATUS_CODE="200"
#
#echo "Start MSS docker image ..."
#docker run -d -i -t --name i-mss -e MSB_ADDR=$MSB_ADDR openoint/sdno-service-mss
#curl_path='http://'$MSB_ADDR'/openoapi/microservices/v1/services/sdnomss/version/v1'
#sleep_msg="Waiting_connection_for:i-mss"
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="20" STATUS_CODE="200"
#
#echo "Start BRS docker image ..."
#docker run -d -i -t --name i-brs -e MSB_ADDR=$MSB_ADDR openoint/sdno-service-brs
#curl_path='http://'$MSB_ADDR'/openoapi/sdnobrs/v1/swagger.json'
#sleep_msg="Waiting_connection_for:i-brs"
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="20" STATUS_CODE="200"
#
#echo "Start LCM docker image ..."
#docker run -d -i -t --name s-sdno-service-lcm -e MSB_ADDR=$MSB_ADDR openoint/sdno-service-lcm
#curl_path='http://'$MSB_ADDR'/openoapi/sdnonslcm/v1/swagger.json'
#sleep_msg="Waiting_connection_for:s-lcm"
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="20" STATUS_CODE="200"
#
#echo "Start TOSCA-CATALOG docker image ..."
#docker run -d -i -t --name i-catalog -e MSB_ADDR=$MSB_ADDR openoint/common-tosca-catalog
#curl_path='http://'$MSB_ADDR'/openoapi/catalog/v1/swagger.json'
#sleep_msg="Waiting_connection_for:i-catalog"
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="20" STATUS_CODE="200"
#
#echo "Start TOSCA-ARIA docker image ..."
#docker run -d -i -t --name i-tosca -e MSB_ADDR=$MSB_ADDR openoint/common-tosca-aria
#curl_path='http://'$MSB_ADDR'/openoapi/tosca/v1/swagger.json'
#sleep_msg="Waiting_connection_for:i-tosca"
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="20" STATUS_CODE="200"
#
#echo "Start ESR (common-services-extsys) docker image ..."
#docker run -d -i -t --name i-common-services-extsys -e MSB_ADDR=$MSB_ADDR openoint/common-services-extsys
#sleep_msg="Waiting_for_i-common-services-extsys"
#curl_path='http://'${MSB_ADDR}'/openoapi/microservices/v1/services/extsys/version/v1'
#wait_curl_driver CURL_COMMAND=$curl_path WAIT_MESSAGE="$sleep_msg" REPEAT_NUMBER="50" STATUS_CODE="200"
#
#echo "chmod +x CSAR script..."
#chmod +x $CSAR_DIR/uploadCSAR.sh
#
#echo "get current time in seconds ..."
#timestamp=`date +%s`
#TMP_DIR=/tmp/CSAR_$timestamp
#echo "Create temporary directory to store fresh csar files under $TMP_DIR...."
#mkdir $TMP_DIR
#chmod 755 $TMP_DIR
#
#CSARFILE=enterprise2DC.csar
#echo "pull $CSARFILE ..."
#for i in $(seq 1 5);
#do
# echo "Attempt to download csar file number $i/5"
# curl -m90 "https://gerrit.open-o.org/r/gitweb?p=modelling-csar.git;a=blob;f=csars/sdn-ns/${CSARFILE};h=4b81cd020ec7e94059c68454b81c451613e715bf;hb=refs/heads/master" > $TMP_DIR/${CSARFILE}
# unzip -t $TMP_DIR/${CSARFILE}
# if [ $? -eq 0 ];
# then
# break
# elif [ $i -eq 5 ]
# then
# echo "/!\ WARNNING::Test expected to fail because the ${CSARFILE} is corrupted"
# fi
#done
#
#CSARFILE=underlayl3vpn.csar
#echo "pull $CSARFILE ..."
#for j in $(seq 1 5);
#do
# echo "Attempt to download csar file number $j/5"
# curl -m90 "https://gerrit.open-o.org/r/gitweb?p=modelling-csar.git;a=blob;f=csars/sdn-ns/${CSARFILE};h=ebb13900ec281b40875442332d5b22afb6ff14ea;hb=refs/heads/master" > $TMP_DIR/${CSARFILE}
# unzip -t $TMP_DIR/${CSARFILE}
# if [ $? -eq 0 ];
# then
# break
# elif [ $j -eq 5 ]
# then
# echo "/!\ WARNNING::Test expected to fail because the ${CSARFILE} is corrupted"
# fi
#done
#
#LOG=`ls -l $TMP_DIR`
#echo "LOG INFO::$LOG"
#echo "Log memory details # @BEFORE TESTS"
#memory_details
#
#ROBOT_VARIABLES="-L TRACE -v MSB_IP:${MSB_IP} -v SCRIPTS:${SCRIPTS} -v TMP_DIR:${TMP_DIR} -v enterprise2DC:enterprise2DC.csar -v underlayl3vpn:underlayl3vpn.csar"
ROBOT_VARIABLES="-L TRACE -v MSB_IP:${MSB_IP}"
| true
|
c8c55525c6dc8db4548ffd3ff7efe7f843e1a195
|
Shell
|
chriswininger/home-monitor-service
|
/live-cam/coop-image-downloades/dl-all-folders.sh
|
UTF-8
| 213
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
base_path=s3://coop-cam-uploads
aws s3 ls "$base_path" |
grep 'PRE ' |
while read line ;
do
directory=${line#"PRE "}
directory=${directory%"/"}
./dl-folder.sh "$directory"
done
| true
|
0405edb5ed0af778b646503f4ae7cbc00fbe091f
|
Shell
|
dougwyu/HJA_analyses_Kelpie
|
/HJA_scripts/04_kelpie/scripts_for_unfiltered_kelpie/1_launch_kelpie.sh
|
UTF-8
| 3,152
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -u
set -o pipefail
#######################################################################################
#######################################################################################
# a shell script to launch bsub files.
#######################################################################################
#######################################################################################
# mkdir BWA{01,02,03,04,05,06,07,08,09,10} # make 10 folders
# by hand, I moved 24 folders into each of the 10 BWA folders
############# edit kelpie script #############
ssh b042@ada.uea.ac.uk
interactive
# path to GNU parallel
PATH=$PATH:~/scripts/parallel-20170722/bin/
############# copy the kelpie2 shell and bsub scripts into each BWA folder and edit the jobIDs
# cd ~/_Oregon/2019Sep_shotgun/testkelpie/; ls
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/; ls
KELPIE2_SUB="_parallel_kelpie_20200717.sub"; head -n20 ${KELPIE2_SUB}
KELPIE2_SH="_parallel_kelpie_20200717.sh"; head -n60 ${KELPIE2_SH}
parallel cp ${KELPIE2_SUB} BWA{} ::: 01 02 03 04 05 06 07 08 09 10
parallel cp ${KELPIE2_SH} BWA{} ::: 01 02 03 04 05 06 07 08 09 10
parallel ls BWA{} ::: 01 02 03 04 05 06 07 08 09 10
# ls BWA{01,02,03,04,05,06,07,08,09,10}
# edit the bsub files so that the correct jobID will show up (i suppose i could have instead run a job array...)
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/; ls
parallel "sed 's/klplp/klplp{}/g' BWA{}/${KELPIE2_SUB} > BWA{}/${KELPIE2_SUB}_tmp" ::: 01 02 03 04 05 06 07 08 09 10
parallel "mv BWA{}/${KELPIE2_SUB}_tmp BWA{}/${KELPIE2_SUB}" ::: 01 02 03 04 05 06 07 08 09 10
head -n14 BWA{01,02,03,04,05,06,07,08,09,10}/${KELPIE2_SUB} # check. should be klplp{01,02,03,...}
# check that i'm using #SBATCH -p compute
tail -n2 BWA{01,02,03,04,05,06,07,08,09,10}/${KELPIE2_SUB} # check. should be the correct shell file
ls # BWA* folders should now sort to bottom
####### launch the kelpie2 scripts #######
# cd into each BWA folder and submit bsub job
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA01; ls
echo ${KELPIE2_SUB}
sbatch ${KELPIE2_SUB}
squeue -u b042
ls
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA02; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA03; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA04; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA05; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA06; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA07; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA08; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA09; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
cd ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA10; ls
sbatch ${KELPIE2_SUB}
squeue -u b042
ls ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA01/HOBO-357-M1-S2_BDSW190603169-1a/ # check
ls ~/_Oregon/2019Sep_shotgun/2.trimmeddata/BWA01/HOBO-349-M1-S1_BDSW190603162-1a
squeue -u b042
head -n14 ${KELPIE2_SUB}
| true
|
0c5ce0f2de6726667c5eeec0d873df7c3badd88a
|
Shell
|
KritiS/shell-scripting-course
|
/section-six/exe2.sh
|
UTF-8
| 383
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
read -p "Enter the exetension for the file to rename: " EXT
read -p "Enter the prefix to prepend to filename: " PREFIX
for n in 1 2 3 4 5 6
do
touch ${n}"."${EXT}
done
if [ PREFIX = "" ]
then
PREFIX = $(date +%Y-%m-%d)
fi
ls *.${EXT}
if [ $? -eq 0 ]
then
for FILE in *.${EXT}
do
echo "Renaming "$FILE to ${PREFIX}"-"${FILE}
mv $FILE "${PREFIX}-${FILE}"
done
fi
| true
|
32bd5ee8d8388aedf29419d2f976a70100b2307f
|
Shell
|
NetBSD/src
|
/etc/powerd/scripts/hotkey_button
|
UTF-8
| 576
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh -
#
# $NetBSD: hotkey_button,v 1.2 2008/01/17 00:37:46 christos Exp $
#
# Generic script for hotkey events.
#
# Arguments passed by powerd(8):
#
# hotkey event
PATH=/usr/pkg/bin:$PATH; export PATH
# XXXJDM need a better way to determine this
XUSER="$(ls -l /dev/console | awk '{ print $3; }')"
DISPLAY=:0.0; export DISPLAY
case "${2}" in
pressed)
if [ -f "/etc/powerd/actions/${1}" ]; then
/usr/bin/su -- "$XUSER" -c "/etc/powerd/actions/${1}"
fi
exit 0
;;
released)
;;
*)
logger -p warning "${0}: unsupported event ${2} on device ${1}" >&1
exit 1
esac
| true
|
3f5be155584c258a3ae894a7d632370b7b0bd525
|
Shell
|
DarthVader1977/eosio.cdt
|
/scripts/eosiocdt_build_centos.sh
|
UTF-8
| 5,985
| 3.734375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' \
| cut -d'.' -f1 )
MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 )
CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 )
CPU_CORE=$( nproc )
MEM_GIG=$(( ((MEM_MEG / 1000) / 2) ))
export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG ))
DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 )
DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' )
DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' )
DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 ))
DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 ))
printf "\\nOS name: ${OS_NAME}\\n"
printf "OS Version: ${OS_VER}\\n"
printf "CPU speed: ${CPU_SPEED}Mhz\\n"
printf "CPU cores: ${CPU_CORE}\\n"
printf "Physical Memory: ${MEM_MEG}Mgb\\n"
printf "Disk install: ${DISK_INSTALL}\\n"
printf "Disk space total: ${DISK_TOTAL%.*}G\\n"
printf "Disk space available: ${DISK_AVAIL%.*}G\\n"
printf "Concurrent Jobs (make -j): ${JOBS}\\n"
if [ "${MEM_MEG}" -lt 7000 ]; then
printf "\\nYour system must have 7 or more Gigabytes of physical memory installed.\\n"
printf "Exiting now.\\n\\n"
exit 1;
fi
if [ "${OS_VER}" -lt 7 ]; then
printf "\\nYou must be running Centos 7 or higher to install EOSIO.\\n"
printf "Exiting now.\\n\\n"
exit 1;
fi
if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then
printf "\\nYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}"
printf "Exiting now.\\n\\n"
exit 1;
fi
printf "\\n"
printf "Checking Yum installation...\\n"
if ! YUM=$( command -v yum 2>/dev/null ); then
printf "!! Yum must be installed to compile EOS.IO !!\\n"
printf "Exiting now.\\n"
exit 1;
fi
printf " - Yum installation found at %s.\\n" "${YUM}"
printf "\\nDo you wish to update YUM repositories?\\n\\n"
select yn in "Yes" "No"; do
case $yn in
[Yy]* )
printf "\\n\\nUpdating...\\n\\n"
if ! sudo $YUM -y update; then
printf "\\nYUM update failed.\\n"
printf "\\nExiting now.\\n\\n"
exit 1;
else
printf "\\nYUM update complete.\\n"
fi
break;;
[Nn]* )
echo "Proceeding without update!"
break;;
* ) echo "Please type 1 for yes or 2 for no.";;
esac
done
printf "Checking installation of Centos Software Collections Repository...\\n"
SCL=$( rpm -qa | grep -E 'centos-release-scl-[0-9].*' )
if [ -z "${SCL}" ]; then
printf " - Do you wish to install and enable this repository?\\n"
select yn in "Yes" "No"; do
case $yn in
[Yy]* )
printf "Installing SCL...\\n"
if ! sudo $YUM -y --enablerepo=extras install centos-release-scl; then
printf "!! Centos Software Collections Repository installation failed !!\\n"
printf "Exiting now.\\n\\n"
exit 1;
else
printf "Centos Software Collections Repository installed successfully.\\n"
fi
break;;
[Nn]* ) echo "User aborting installation of required Centos Software Collections Repository, Exiting now."; exit;;
* ) echo "Please type 1 for yes or 2 for no.";;
esac
done
else
printf " - ${SCL} found.\\n"
fi
printf "Checking installation of devtoolset-7...\\n"
DEVTOOLSET=$( rpm -qa | grep -E 'devtoolset-7-[0-9].*' )
if [ -z "${DEVTOOLSET}" ]; then
printf "Do you wish to install devtoolset-7?\\n"
select yn in "Yes" "No"; do
case $yn in
[Yy]* )
printf "Installing devtoolset-7...\\n"
if ! sudo $YUM install -y devtoolset-7 2>/dev/null; then
printf "!! Centos devtoolset-7 installation failed !!\\n"
printf "Exiting now.\\n"
exit 1;
else
printf "Centos devtoolset installed successfully.\\n"
fi
break;;
[Nn]* ) echo "User aborting installation of devtoolset-7. Exiting now."; exit;;
* ) echo "Please type 1 for yes or 2 for no.";;
esac
done
else
printf " - ${DEVTOOLSET} found.\\n"
fi
printf "Enabling Centos devtoolset-7...\\n"
if ! source "/opt/rh/devtoolset-7/enable" 2>/dev/null; then
printf "!! Unable to enable Centos devtoolset-7 at this time !!\\n"
printf "Exiting now.\\n\\n"
exit 1;
fi
printf "Centos devtoolset-7 successfully enabled.\\n"
printf "\\n"
DEP_ARRAY=(
git autoconf automake libtool make bzip2 \
bzip2-devel openssl-devel gmp-devel \
ocaml libicu-devel python python-devel python33 \
gettext-devel file sudo
)
COUNT=1
DISPLAY=""
DEP=""
printf "Checking RPM for installed dependencies...\\n"
for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do
pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name )
if [[ -z $pkg ]]; then
DEP=$DEP" ${DEP_ARRAY[$i]} "
DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n"
printf "!! Package %s ${bldred} NOT ${txtrst} found !!\\n" "${DEP_ARRAY[$i]}"
(( COUNT++ ))
else
printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}"
continue
fi
done
printf "\\n"
if [ "${COUNT}" -gt 1 ]; then
printf "The following dependencies are required to install EOSIO.\\n"
printf "${DISPLAY}\\n\\n"
printf "Do you wish to install these dependencies?\\n"
select yn in "Yes" "No"; do
case $yn in
[Yy]* )
printf "Installing dependencies\\n\\n"
if ! sudo $YUM -y install ${DEP}; then
printf "!! YUM dependency installation failed !!\\n"
printf "Exiting now.\\n"
exit 1;
else
printf "YUM dependencies installed successfully.\\n"
fi
break;;
[Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;;
* ) echo "Please type 1 for yes or 2 for no.";;
esac
done
else
printf " - No required YUM dependencies to install.\\n"
fi
printf "\\n"
printf "Checking CMAKE installation...\\n"
if [ -z $CMAKE ]; then
printf "Installing CMAKE...\\n"
curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \
&& tar -xzf cmake-$CMAKE_VERSION.tar.gz \
&& cd cmake-$CMAKE_VERSION \
&& ./bootstrap --prefix=$HOME \
&& make -j"${JOBS}" \
&& make install \
&& cd .. \
&& rm -f cmake-$CMAKE_VERSION.tar.gz \
|| exit 1
printf " - CMAKE successfully installed @ ${CMAKE}.\\n"
else
printf " - CMAKE found @ ${CMAKE}.\\n"
fi
| true
|
b80f957c574729e014be0bbaeaf49c3698b03ec5
|
Shell
|
naveenb29/solr-config-loader
|
/run.sh
|
UTF-8
| 883
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Use this script to load config to solr:5.5.5
counter=1
for (( c=1; c<=10; c++ ))
do
date -u
echo " validating solr health check"
curl "http://solr-svc:8983/solr/admin/collections?action=clusterstatus"
if [ $? -eq 0 ]
then
echo "health check successful"
break
else
echo "health check failed , will retry in 60 seconds"
c=$c+1
sleep 60
fi
done
if [ c = 10 ]
then
exit 1
fi
date -u
echo "attempting to write config to solr-zookeeper"
/opt/solr/server/scripts/cloud-scripts/zkcli.sh -cmd upconfig -zkhost solr-zookeeper -confname $CONFNAME -confdir /tmp/config/
if [ $? -eq 0 ]
then
echo "config load successful"
fi
echo "$date attempting to create collection"
curl "http://solr-svc:8983/solr/admin/collections?action=CREATE&name=$COLLECTIONNAME&numShards=1&replicationFactor=3&maxShardsPerNode=1&collection.configName=$CONFIG"
| true
|
3d284235558e780cdb4ca3215f4e6cd17cb76391
|
Shell
|
pierrelarsson/bin
|
/mnt
|
UTF-8
| 756
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
[ `stat --format %m /mnt` = "/" ]
PS3="> "
COLUMNS=1
DEVICES=()
MENU=()
function getdevices {
find . -maxdepth 1 -type f \
-iname "*$1*" \
-a \( -name "$1" \
-o -iname "*.iso" \
-o -iname "*.img" \
-o -iname "*.part" \
-o -iname "*.partition" \
\)
find /dev/disk/by-id -iname "*$1*" -lname "*[0-9]" -type l
}
while read device ; do
DEVICES+=("$device")
MENU+=("`basename "$device"`")
done < <( ( getdevices $1 | sort ) )
select name in "${MENU[@]}" ; do
[ -n "$name" ]
device=${DEVICES[$REPLY-1]}
sudo mkdir --parents "/mnt/$name"
sudo mount --verbose "$device" "/mnt/$name" || sudo rmdir "/mnt/$name"
exit $?
done
exit 1
| true
|
80d3d2cc246c8aa3b2b797aeaa6641275bb15a78
|
Shell
|
CryptosWolfOfAllStreets/zpwr
|
/scripts/fzfEnv.sh
|
UTF-8
| 3,097
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#{{{ MARK:Header
#**************************************************************
##### Author: MenkeTechnologies
##### GitHub: https://github.com/MenkeTechnologies
##### Date: Thu Sep 5 22:34:56 EDT 2019
##### Purpose: bash script to
##### Notes: no single quotes allowed
#}}}***********************************************************
isZsh(){
if command ps -p $$ | command grep -q zsh; then
return 0
else
return 1
fi
}
if isZsh; then
exists(){
#alternative is command -v
type "$1" &>/dev/null || return 1 && \
type "$1" 2>/dev/null | \
command grep -qv "suffix alias" 2>/dev/null
}
else
exists(){
#alternative is command -v
type "$1" >/dev/null 2>&1
}
fi
cat<<EOF
line={};
line=\$(echo \$line| sed "s@[]\\\[^\$.*/]@\\\\\\&@g")
cmdType=\$(grep -m1 -a " \$line\$" ${ZPWR_ENV}Key.txt | awk "{print \\\$1}")
file=\$(grep -m1 -a " \$line\$" ${ZPWR_ENV}Key.txt | awk "{print \\\$2}")
if [[ \$ZPWR_DEBUG == true ]]; then
echo "line:_\${line}_, cmdType:_\${cmdType}_ file:_\${file}_" >> $ZPWR_LOGFILE
fi
case \$cmdType in
(command)
if test -f \$file;then
if LC_MESSAGES=C command grep -Hm1 "^" "\$file" | command grep -q "^Binary";then
"$ZPWR_SCRIPTS/clearList.sh" -- \$file| fold -80 | head -500
test -x \$file && objdump -d \$file | $COLORIZER_FZF_YAML
xxd \$file | $COLORIZER_FZF_YAML
else
$COLORIZER_FZF_FILE 2>/dev/null
fi
else
"$ZPWR_SCRIPTS/clearList.sh" -- \$file | fold -80
fi
return 0
;;
esac
{
case \$cmdType in
(alias)
command grep -m1 -Fa "alias \$file" "${ZPWR_ENV}Value.txt"
;;
(param)
command grep -m1 -Fa "export \$file" "${ZPWR_ENV}Value.txt"
;;
(builtin)
command grep -m1 -Fa "\$file" | grep -F "shell builtin" "${ZPWR_ENV}Value.txt"
;;
(resword)
command grep -m1 -Fa "\$file" | grep -F "reserved word" "${ZPWR_ENV}Value.txt"
;;
(command)
if test -f \$file;then
if LC_MESSAGES=C command grep -Hm1 "^" "\$file" | command grep -q "^Binary";then
"$ZPWR_SCRIPTS/clearList.sh" -- \$file| fold -80 | head -500
test -x \$file && objdump -d \$file | $COLORIZER_FZF_YAML
xxd \$file | $COLORIZER_FZF_YAML
else
$COLORIZER_FZF_FILE 2>/dev/null
fi
else
"$ZPWR_SCRIPTS/clearList.sh" -- \$file | fold -80
fi
;;
(func)
file=\$(echo \$file | sed "s@[]\\\[^\$.*/]@\\\\\\&@g")
if [[ \$ZPWR_DEBUG == true ]]; then
echo "line:_\${line}_, cmdType:_\${cmdType}_ file:_\${file}_" >> $ZPWR_LOGFILE
fi
command grep -m1 -a "^\$file is a shell function" "${ZPWR_ENV}Value.txt"
command sed -n "/^\${file} () {/,/^}\$/p" "${ZPWR_ENV}Value.txt" | fold -80
;;
esac
} | cowsay | ponysay | "$ZPWR_SCRIPTS/splitReg.sh" -- ---------- lolcat
EOF
| true
|
ea927950d680f589be54fac4b09c45264adba6b5
|
Shell
|
baobaoyeye/env
|
/scripts/env.sh
|
UTF-8
| 1,626
| 2.5625
| 3
|
[] |
no_license
|
# env for baobaoyeye's linux
# for ssh timeout(s) [default = 0 mean not timeout]
export TMOUT=0
# set export value
# export HOME=/home/work
umask 0022
export TERM=xterm-256color
export LC_CTYPE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export PS1='\[\e[34;40m\][\A \[\e[31;40m\]MY \[\e[37m\]\w]\$ \[\e[m\]'
LS_COLORS='di=34:fi=0:ln=31:pi=5:so=5:bd=5:cd=5:or=31:mi=0:ex=35:*.rpm=90:*.cc=37:*.c=37:*.cpp=37:*.h=92:*.o=94'
export $LS_COLORS
wp="${HOME}/workspace"
# useful alias to jump into dir or extends commands
alias ls='ls --color'
alias to_google='cd ${wp}/google'
alias to_baidu='cd ${wp}/baidu'
alias to_facebook='cd ${wp}/facebook'
alias to_my='cd ${wp}/baobaoyeye'
alias to_tera='cd ${wp}/baobaoyeye/tera'
alias to_code_practices='cd ${wp}/baobaoyeye/code_practices'
# useful commands
# maybe you should set your ftp root dir
alias findx='find . ! -name "*.so" -and ! -name "*.a" -and ! -name "*svn*" -and ! -name "*_test*" -and ! -name "*cscope*" | xargs grep --color -nE'
alias wgetx='wget --no-check-certificate'
alias cscopex='find . ! -name "*_test*" -and -name "*.cc" -or -name "*.h" -or -name "*.proto" > cscope.files && cscope -bkq -i cscope.files'
alias pof='printf "wget ftp://%s%s/%s\n" `hostname` `pwd`'
alias poscp='printf "work@%s:%s\n" `hostname` `pwd`'
EDITOR=vi; export EDITOR
export SVN_EDITOR=vim
# - set your gcc path
PATH=/opt/compiler/gcc-4.8.2/bin:$PATH
PATH=/opt/compiler/gcc-4.8.2/bin/g++:$PATH
export PATH
# - set your http proxy, if you need it
# http_proxy=10.101.114.73:3128
# https_proxy=$http_proxy
# export http_proxy https_proxy
# - end of http proxy
| true
|
3c64b0d65d8fec9e24929df9f4484b0788e131ef
|
Shell
|
arpane4c5/InstallerScripts
|
/step10_sparkConfigMultinode
|
UTF-8
| 2,878
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# Do not run this as super-user
# Script to install the following softwares on a fresh ubuntu 14.04 64-bit PC
# 1. Steps to define the master and slave PCs and make changes in spark-env.sh
# Prerequisites:
# --> Install ubuntu-14.04 (64-bit) on a PC (preferably 4GB RAM) and sufficient free space
# --> sudo apt-get update
# --> Run software updater and install the latest software updates
# --> Copy the Backup folder on the Desktop (It contains all main tar files for jdk, ant, maven, hipi etc)
# If the path is changes then make necessary changes in the script as well
# --> setup JDK1.8, GIT, and SCALA
# --> Apache Spark (Not necessary) - step8_SparkFromSource
# --> Anaconda - step9_Anaconda
# Check for single node by foll command:
# IPYTHON_OPTS="notebook" ~/spark/spark-1.5.1/bin/pyspark
# To setup for multi-node, goto ~/spark/spark-1.5.1/conf
# --> make a copy of the spark-env.sh.template as spark-env.sh
# -- Make following changes in the spark-env.sh - export the following variables (for basic config changes)
# export SPARK_MASTER_IP=hadoop-masternode
# export SPARK_WORKER_CORES=4
# export SPARK_WORKER_MEMORY=4g
# export SPARK_WORKER_INSTANCES=1
# --> make a copy of the slaves.template as slaves
# -- make the following changes to the file (List the hostnames of the slaves) - At present only 1 is added
# hadoop-slaveDN5
# Replicate these 2 files in all the nodes of the cluster
# On the masternode execute the following commands 1 by 1
# Start the Distributed file system
# start-all.sh
# Start the master (spark - for masternode)- Eg.- From inside the project folder
# ~/spark/spark-1.5.1/sbin/start-master.sh
# Check if the master node has been started for spark. Goto browser
# localhost:8080
# Note the URL given on the webpage : Eg:-
# spark://hadoop-masternode:7077
# To start the worker nodes - type the following command:
# ~/spark/spark-1.5.1/sbin/start-slaves.sh spark://hadoop-masternode:7077
###########################################
# Move the data onto the HDFS -
# --> Create the directory structure used for saving the data
# hadoop fs -mkdir -p /MLProjectData/data
# --> Check that the directory is created (from browser or foll command)
# hadoop fs -ls /
# Move the data to HDFS - Go inside the directory where data is saved (or use absolute path)
# hadoop fs -copyFromLocal ~/MLProjects/data/YearPredictionMSD.txt /MLProjectData/data/
# check that it has been moved to HDFS
# hadoop fs -ls /MLProjectData/data/
###########################################
# Refresh the page to check whether the worker node has been started
# To launch the application
# MASTER=spark://hadoop-masternode:7077 IPYTHON_OPTS="notebook" ~/spark/spark-1.5.1/bin/pyspark
# In the notebook, change the local system path to hdfs path
# hdfs://hadoop-masternode:9000/MLProjectData/data/YearPredictionMSD.txt
| true
|
65be1c8f019997eea8fe0fcc356654d338395aa6
|
Shell
|
savf/dotfiles
|
/symlink.sh
|
UTF-8
| 688
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Inspired by https://github.com/michaeljsmalley/dotfiles/blob/master/makesymlinks.sh
dir=~/dotfiles
files="profile bashrc vimrc vim bin tmux.conf lessfilter gitconfig kubectl_aliases zshrc config/starship.toml config/yabai/yabairc"
echo -n "Changing to the $dir directory for correct symlinks ... "
cd $dir
echo "done"
for file in $files; do
echo "Removing existing dotfiles from ~"
rm -rf ~/.$file
done
if [ ! -f ~/.aliases ]
then
echo "Creating empty .aliases file and symlink"
touch ~/dotfiles/aliases
ln -s ~/dotfiles/aliases ~/.aliases
fi
for file in $files; do
echo "Creating symlink to $file in home directory"
ln -s $dir/$file ~/.$file
done
| true
|
1f797b9554f48a037516b476fb8834ac8190c493
|
Shell
|
alex78160/scripts
|
/compare.sh
|
UTF-8
| 405
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
liste1=( `cat $1 `)
liste2=( `cat $2 `)
found=false
count=0
for ip1 in "${liste1[@]}"
do
found=false
for ip2 in "${liste2[@]}"
do
if [[ $ip1 == $ip2 ]]
then
found=true
break
fi
done
if ! $found
then
echo $ip1" pas trouve"
echo $ip1 >> ip_not_found.txt
count=`expr $count + 1`
fi
done
echo $count" IP non trouvees"
| true
|
a4a4715d91b370a0700594b279ee69128d68c3bf
|
Shell
|
EasonBryant/LinuxShellHelper
|
/brackets/parentheses
|
UTF-8
| 5,259
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
command_group_usage()
{
echo -e "\n命令组用途:"
echo -e "括号中的命令将会新开一个子shell顺序执行,所以括号中的变量不能够被脚本余下的部分使用。"
echo -e "括号中多个命令之间用分号隔开,最后一个命令可以没有分号,各命令和括号之间不必有空格。\n"
echo "示例:"
echo -e "> vartest=\"hello\""
echo -e "> echo \$vartest"
echo -e "hello"
echo -e "> (vartest=\"world\";echo \$vartest)"
echo -e "world"
echo -e "> echo \$vartest"
echo -e "hello\n"
echo -e "下面输入框可直接输入命令,'\\\\r'返回上一级,'\q'退出"
}
command_group()
{
command_group_usage
while read -e -r -a input -p "single parentheses commandgroup> "
do
if [[ $input == '\r' ]]
then
break
elif [[ $input == '\q' ]]
then
exit 1
elif [[ $input == '\h' ]]
then
command_group_usage
else
eval ${input[@]}
if [ $? -ne 0 ]
then
echo -e "输入'\h'来获取帮助信息。"
fi
continue
fi
done
}
command_replace_usage()
{
echo -e "\n命令替换用途:"
echo -e "等同于\`cmd\`,shell扫描一遍命令行,发现了\$(cmd)结构,便将\$(cmd)中的cmd执行一次,得到其标准输出,再将此输出放到原来命令。\n"
echo "示例:"
echo -e "> year=\`date +%Y\`"
echo -e "> echo \$year"
echo -e "2014"
echo -e "> month=\$(date +%m)"
echo -e "> echo \$month"
echo -e "05\n"
echo -e "下面输入框可直接输入命令,'\\\\r'返回上一级,'\q'退出"
}
command_replace()
{
command_replace_usage
while read -e -r -a input -p "single parentheses commandreplace> "
do
if [[ $input == '\r' ]]
then
break
elif [[ $input == '\q' ]]
then
exit 1
elif [[ $input == '\h' ]]
then
command_replace_usage
else
eval ${input[@]}
if [ $? -ne 0 ]
then
echo -e "输入'\h'来获取帮助信息。"
fi
continue
fi
done
}
init_array_usage()
{
echo -e "\n初始化数组用途:"
echo -e "用于初始化数组。如:array=(1 2 3 4)"
echo "示例:"
echo -e "> array=(1 2 3 4)"
echo -e "> echo \"数组元素个数:\${#array[@]}\""
echo -e "数组元素个数:4"
echo -e "> echo \"数组所有元素:\${array[@]}\""
echo -e "数组所有元素:1 2 3 4"
echo -e "> echo \"数组第二个元素:\${array[1]}\""
echo -e "数组第二个元素:2\n"
echo -e "下面输入框可直接输入命令,'\\\\r'返回上一级,'\q'退出"
}
init_array()
{
init_array_usage
while read -e -r -a input -p "single parentheses initarray> "
do
if [[ $input == '\r' ]]
then
break
elif [[ $input == '\q' ]]
then
exit 1
elif [[ $input == '\h' ]]
then
init_array_usage
else
eval ${input[@]}
if [ $? -ne 0 ]
then
echo -e "输入'\h'来获取帮助信息。"
fi
continue
fi
done
}
parentheses_usage()
{
echo -e "\n单小括号用途:
1 命令组
2 命令替换
3 用于初始化数组
\h 帮助信息(help)
\\\\c 清屏(clear)
\\\\r 返回上一级(return)
\q 退出(quit)\n"
}
parentheses()
{
parentheses_usage
while read -e -p "single parentheses> " -r input
do
case $input in
'\r')
break
;;
'\q')
exit 1
;;
'\h')
parentheses_usage
;;
'\c')
eval clear
;;
'1')
command_group
;;
'2')
command_replace
;;
'3')
init_array
;;
"")
continue
;;
*)
echo "未知命令'$input'"
echo -e "输入'\h'来获取帮助信息。"
continue
esac
done
}
usage()
{
echo -e "\n小括号用法:
\h 帮助信息(help)
\s 单小括号(single parentheses)
\d 双小括号(double parentheses)
\\\\c 清屏(clear)
\\\\r 返回上一级(return)
\q 退出(quit)\n"
}
main()
{
usage
while read -e -p "parentheses> " -r input
do
case $input in
'\q')
exit 1
;;
'\r')
exit 0
;;
'\h')
usage
;;
'\c')
eval clear
;;
'\s')
parentheses
;;
'\d')
doubleparentheses
if [ $? -eq 1 ]
then
exit 1
fi
;;
'')
continue
;;
*)
echo "未知命令'$input'"
echo -e "输入'\h'来获取帮助信息。"
continue
esac
done
}
main
| true
|
2861bea8e59b1225ad761df5b9b48cef81274ff0
|
Shell
|
derenv/bash-kernel-signer
|
/bash-kernel-signer.sh
|
UTF-8
| 19,358
| 3.984375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
##
#
# Copyright 2021 Deren Vural
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# Root privileges check
[[ $EUID -ne 0 ]] && echo "This script must be run as root." && exit 1
# Global error message var
ERROR_MSG=""
## Functions
function sign_kernel()
{
## Sign loop
stop="False"
prev_out=""
until [[ "$stop" == "True" ]]; do
tput reset
echo "=========BASH KERNEL SIGNING UTILITY=========="
# Search for kernels
mapfile -t kernels < <( find "$kernel_location" -name "vmlinuz-*" | sort -n )
unsigned_kernels=()
valid_signed_kernels=()
invalid_signed_kernels=()
valid_validity_checks=()
invalid_validity_checks=()
# For each detected kernel
for unvalidated_kernel in "${kernels[@]}"; do
# Validate kernel signatures
mapfile -t validity_check < <(sbverify --cert "$cert_location" "${unvalidated_kernel}" 2>&1)
# Increment signed/unsigned kernels
if [[ "${#validity_check[@]}" = 1 && "${validity_check[0]}" = "Signature verification OK" ]]; then
# Add to valid signed kernels
valid_signed_kernels+=("$unvalidated_kernel")
valid_validity_checks+=("${validity_check[0]}")
elif [[ "${#validity_check[@]}" = 1 && "${validity_check[0]}" = "Signature verification failed" ]]; then
# Add to invalid signed kernels
invalid_signed_kernels+=("$unvalidated_kernel")
invalid_validity_checks+=("${validity_check[0]}")
elif [[ "${#validity_check[@]}" = 2 && "${validity_check[0]}" = "No signature table present" ]]; then
# Add to unsiged kernels
unsigned_kernels+=("$unvalidated_kernel")
else
# SOME UNKNOWN ERROR?
echo "??error??"
fi
done
# Print all kernels
declare -i counter
echo " Number of kernels available for signing: ${#unsigned_kernels[@]}"
if [[ "${#unsigned_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${unsigned_kernels[@]}"; do
id=$(( "$counter" + 1 ))
echo " $id - $kernel"
(( counter++ ))
done
fi
echo " Number of signed kernels: ${#valid_signed_kernels[@]}"
if [[ "${#valid_signed_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${valid_signed_kernels[@]}"; do
echo " $kernel"
echo " -> ${valid_validity_checks[$counter]}"
(( counter++ ))
done
fi
echo " Number of invalid signed kernels: ${#invalid_signed_kernels[@]}"
if [[ "${#invalid_signed_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${invalid_signed_kernels[@]}"; do
echo " $kernel"
echo " -> ${invalid_validity_checks[$counter]}"
(( counter++ ))
done
fi
echo "=============================================="
echo "$prev_out"
echo "=============================================="
echo "0 - Exit"
read -p "Which kernel would you like to sign?:" -r user_input
if [[ "$user_input" == "0" ]]; then
ERROR_MSG="cancelled.."
return 1
elif [[ "$user_input" =~ ^[0-9]+$ ]] && test "$user_input" -le "${#unsigned_kernels[@]}"; then
# Sign kernel
selection=$(( user_input - 1 ))
datetime=$(date +"%Y-%m-%d+%T")
sbsign --key "$key_location" --cert "$cert_location" --output "${unsigned_kernels[$selection]}.signed$datetime" "${unsigned_kernels[$selection]}"
prev_out="$?"
else
prev_out="invalid input.."
fi
done
return 0
}
function purge_kernel()
{
## Purge loop
stop="False"
prev_out=""
until [[ "$stop" == "True" ]]; do
tput reset
echo "=========BASH KERNEL SIGNING UTILITY=========="
# Search for kernels
mapfile -t kernels < <( find "$kernel_location" -name "vmlinuz-*" | sort -n )
# Only verify keys if keys exist
if [[ "$valid_keys" == "True" ]]; then
unsigned_kernels=()
valid_signed_kernels=()
invalid_signed_kernels=()
valid_validity_checks=()
invalid_validity_checks=()
# For each detected kernel
for unvalidated_kernel in "${kernels[@]}"; do
# Validate kernel signatures
mapfile -t validity_check < <(sbverify --cert "$cert_location" "${unvalidated_kernel}" 2>&1)
# Increment signed/unsigned kernels
if [[ "${#validity_check[@]}" = 1 && "${validity_check[0]}" = "Signature verification OK" ]]; then
# Add to valid signed kernels
valid_signed_kernels+=("$unvalidated_kernel")
valid_validity_checks+=("${validity_check[0]}")
elif [[ "${#validity_check[@]}" = 1 && "${validity_check[0]}" = "Signature verification failed" ]]; then
# Add to invalid signed kernels
invalid_signed_kernels+=("$unvalidated_kernel")
invalid_validity_checks+=("${validity_check[0]}")
elif [[ "${#validity_check[@]}" = 2 && "${validity_check[0]}" = "No signature table present" ]]; then
# Add to unsinged kernels
unsigned_kernels+=("$unvalidated_kernel")
else
# SOME UNKNOWN ERROR?
echo "??error??"
fi
done
# Print all kernels
declare -i counter
echo " Number of kernels available for signing: ${#unsigned_kernels[@]}"
if [[ "${#unsigned_kernels[@]}" == 0 ]]; then
echo " -none-"
else
for kernel in "${unsigned_kernels[@]}"; do
echo " $kernel"
done
fi
echo " Number of signed kernels: ${#valid_signed_kernels[@]}"
if [[ "${#valid_signed_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${valid_signed_kernels[@]}"; do
id=$(( "$counter" + 1 ))
echo " $id - $kernel"
echo " -> ${valid_validity_checks[$counter]}"
(( counter++ ))
done
fi
echo " Number of invalid signed kernels: ${#invalid_signed_kernels[@]}"
if [[ "${#invalid_signed_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${invalid_signed_kernels[@]}"; do
echo " $kernel"
echo " -> ${invalid_validity_checks[$counter]}"
(( counter++ ))
done
fi
else
echo " Kernels Present: ${#kernels[@]}"
for kernel in "${kernels[@]}"; do
echo " $kernel"
done
echo "Signature Database key and/or certificate not detected.."
fi
echo "=============================================="
echo "$prev_out"
echo "=============================================="
echo "0 - Exit"
read -p "Which signed kernel would you like to purge?:" -r user_input
if [[ "$user_input" == "0" ]]; then
ERROR_MSG="cancelled.."
return 1
elif [[ ! "$valid_keys" == "True" ]]; then
prev_out="missing/invalid keys, cannot check kernels.."
elif [[ "$user_input" =~ ^[0-9]+$ ]] && test "$user_input" -le "${#valid_signed_kernels[@]}"; then
# Purge signed kernel
selection=$(( user_input - 1 ))
sudo rm -f "${valid_signed_kernels[$selection]}"
prev_out="$?"
else
prev_out="invalid input.."
fi
done
return 0
}
function create_keys()
{
# Get user input
tput reset
read -p "Please specify (existing) directory for new keys & certificates (0 to cancel):" -r user_input
# Validate folder exists
if [[ "$user_input" == "0" ]]; then
ERROR_MSG="cancelled.."
return 1
elif [[ $(stat -c "%a" "$user_input") == "700" && -w "$user_input" && -r "$user_input" ]]; then
# Read old keys
echo "reading old keys..."
efi-readvar -v PK -o "${user_input}/old_PK.esl"
efi-readvar -v KEK -o "${user_input}/old_KEK.esl"
efi-readvar -v db -o "${user_input}/old_db.esl"
efi-readvar -v dbx -o "${user_input}/old_dbx.esl"
# (continue)
read -n 1 -s -r -p "Old keys successfully read into files, press any key to continue.."
# Generate keys and certificates
echo -e "\ngenerating keys & certificates..."
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=new platform key/" -keyout "${user_input}/new_PK.key" -out "${user_input}/new_PK.crt" -days 3650 -nodes -sha256
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=new key exchange key/" -keyout "${user_input}/new_KEK.key" -out "${user_input}/new_KEK.crt" -days 3650 -nodes -sha256
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=new kernel signing key/" -keyout "${user_input}/new_db.key" -out "${user_input}/new_db.crt" -days 3650 -nodes -sha256
# Change permissions to read-only for root (precaution)
sudo chmod -v 400 "${user_input}/new_PK.key"
sudo chmod -v 400 "${user_input}/new_KEK.key"
sudo chmod -v 400 "${user_input}/new_db.key"
# (continue)
read -n 1 -s -r -p "Keys successfully generated, press any key to continue.."
# Create update files
echo -e "\ncreating update files for keystore.."
# PK
cert-to-efi-sig-list -g "$(uuidgen)" "${user_input}/new_PK.crt" "${user_input}/new_PK.esl"
sign-efi-sig-list -k "${user_input}/new_PK.key" -c "${user_input}/new_PK.crt" PK "${user_input}/new_PK.esl" "${user_input}/new_PK.auth"
# KEK
cert-to-efi-sig-list -g "$(uuidgen)" "${user_input}/new_KEK.crt" "${user_input}/new_KEK.esl"
sign-efi-sig-list -a -k "${user_input}/new_PK.key" -c "${user_input}/new_PK.crt" KEK "${user_input}/new_KEK.esl" "${user_input}/new_KEK.auth"
# db
cert-to-efi-sig-list -g "$(uuidgen)" "${user_input}/new_db.crt" "${user_input}/new_db.esl"
sign-efi-sig-list -a -k "${user_input}/new_KEK.key" -c "${user_input}/new_KEK.crt" db "${user_input}/new_db.esl" "${user_input}/new_db.auth"
# dbx
sign-efi-sig-list -k "${user_input}/new_KEK.key" -c "${user_input}/new_KEK.crt" dbx "${user_input}/old_dbx.esl" "${user_input}/old_dbx.auth"
# (continue)
read -n 1 -s -r -p "Update files successfully generated, press any key to continue.."
# Create DER (Distinguished Encoding Rules) files, needed for some BIOSes
openssl x509 -outform DER -in "${user_input}/new_PK.crt" -out "${user_input}/new_PK.cer"
openssl x509 -outform DER -in "${user_input}/new_KEK.crt" -out "${user_input}/new_KEK.cer"
openssl x509 -outform DER -in "${user_input}/new_db.crt" -out "${user_input}/new_db.cer"
# (continue)
echo -e "\n"
read -n 1 -s -r -p "DER versions successfully generated, press any key to continue"
echo -e "\n"
# Create compound esl files & auth counterparts
cat "${user_input}/old_KEK.esl" "${user_input}/new_KEK.esl" > "${user_input}/compound_KEK.esl"
cat "${user_input}/old_db.esl" "${user_input}/new_db.esl" > "${user_input}/compound_db.esl"
sign-efi-sig-list -k "${user_input}/new_PK.key" -c "${user_input}/new_PK.crt" KEK "${user_input}/compound_KEK.esl" "${user_input}/compound_KEK.auth"
sign-efi-sig-list -k "${user_input}/new_KEK.key" -c "${user_input}/new_KEK.crt" db "${user_input}/compound_db.esl" "${user_input}/compound_db.auth"
# (continue)
echo -e "\nNew esl & auth files successfully generated!"
# Set the new locations in config file
echo "Adding ${user_input}/new_db.key and ${user_input}/new_db.crt to config file!"
sed_location="${user_input//\//\\/}" #replace all '/' with '\/'
sed_command_key='1s/.*/key_location="'
sed_command_key=$sed_command_key"${sed_location}"
sed_command_key=$sed_command_key'\/new_db.key"/'
sed_command_cert='2s/.*/cert_location="'
sed_command_cert=$sed_command_cert"${sed_location}"
sed_command_cert=$sed_command_cert'\/new_db.crt"/'
sed -i "${sed_command_key}" "$SCRIPT_DIR/keylocations.cfg"
sed -i "${sed_command_cert}" "$SCRIPT_DIR/keylocations.cfg"
echo "Added ${user_input}/new_db.key and ${user_input}/new_db.crt to config file!"
# Give user links for adding to keystore
echo "See Sakaki's guide (https://wiki.gentoo.org/wiki/User:Sakaki/Sakaki's_EFI_Install_Guide/Configuring_Secure_Boot#Installing_New_Keys_into_the_Keystore) on how to update your keystore!"
read -n 1 -s -r -p "(press any key to continue)"
else
ERROR_MSG="invalid directory, please exit and create new directory (check permissions!).."
return 1
fi
return 0
}
## Check for signing keys
# Check if files specified in config
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
if [[ ! -f "$SCRIPT_DIR/keylocations.cfg" ]];then
# Otherwise print error
echo "missing config file.."
# Create new config file
echo "creating new config file.."
touch "$SCRIPT_DIR/keylocations.cfg"
if [[ ! -f "$SCRIPT_DIR/keylocations.cfg" ]];then
echo "creation failed! unknown error.."
exit 1
fi
# Update ownership
USER="$(who | awk '{print $1;}')"
chown "$USER:$USER" "$SCRIPT_DIR/keylocations.cfg"
# Add variables
echo 'key_location=""' >> "$SCRIPT_DIR/keylocations.cfg"
echo 'cert_location=""' >> "$SCRIPT_DIR/keylocations.cfg"
echo 'kernel_location=""' >> "$SCRIPT_DIR/keylocations.cfg"
# Warn user
echo "!!PLEASE UPDATE CONFIG FILE WITH KERNEL LOCATION!!"
fi
# Import config file
source "$SCRIPT_DIR/keylocations.cfg"
# Check keys exist
if [[ -z "$key_location" || -z "$cert_location" ]]; then
# empty key & cert file locations in config
valid_keys="False"
else
# Key & cert files are specified
valid_keys="True"
# Check key & cert valid locations
if [[ ! -f $key_location || ! -f $cert_location ]]; then
# Otherwise print error
echo "invalid Signature Database key/certificate locations, check config file.."
exit 1
fi
fi
# Check valid locations
if [[ -z $kernel_location ]]; then
# Otherwise print error
echo "empty kernel location, check config file.."
exit 1
else
# Check files exist
if [[ ! -d $kernel_location ]]; then
# Otherwise print error
echo "missing kernel location, check config file.."
exit 1
fi
fi
## Main Loop
stop="False"
prev_out=""
while [[ "$stop" == "False" ]]; do
tput reset
echo "=========BASH KERNEL SIGNING UTILITY=========="
# Search for kernels
mapfile -t kernels < <( find "$kernel_location" -name "vmlinuz-*" | sort -n )
# Only verify keys if keys exist
if [[ "$valid_keys" == "True" ]]; then
unsigned_kernels=()
valid_signed_kernels=()
invalid_signed_kernels=()
valid_validity_checks=()
invalid_validity_checks=()
# For each detected kernel
for unvalidated_kernel in "${kernels[@]}"; do
# Validate kernel signatures
mapfile -t validity_check < <(sbverify --cert "$cert_location" "${unvalidated_kernel}" 2>&1)
# Increment signed/unsigned kernels
if [[ "${#validity_check[@]}" = 1 && "${validity_check[0]}" = "Signature verification OK" ]]; then
# Add to valid signed kernels
valid_signed_kernels+=("$unvalidated_kernel")
valid_validity_checks+=("${validity_check[0]}")
elif [[ "${#validity_check[@]}" = 1 && "${validity_check[0]}" = "Signature verification failed" ]]; then
# Add to invalid signed kernels
invalid_signed_kernels+=("$unvalidated_kernel")
invalid_validity_checks+=("${validity_check[0]}")
elif [[ "${#validity_check[@]}" = 2 && "${validity_check[0]}" = "No signature table present" ]]; then
# Add to unsiged kernels
unsigned_kernels+=("$unvalidated_kernel")
else
# SOME UNKNOWN ERROR?
echo "??error??"
fi
done
# Print all kernels
declare -i counter
echo " Number of kernels available for signing: ${#unsigned_kernels[@]}"
if [[ "${#unsigned_kernels[@]}" == 0 ]]; then
echo " -none-"
else
for kernel in "${unsigned_kernels[@]}"; do
echo " $kernel"
done
fi
echo " Number of signed kernels: ${#valid_signed_kernels[@]}"
if [[ "${#valid_signed_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${valid_signed_kernels[@]}"; do
echo " $kernel"
echo " -> ${valid_validity_checks[$counter]}"
(( counter++ ))
done
fi
echo " Number of invalid signed kernels: ${#invalid_signed_kernels[@]}"
if [[ "${#invalid_signed_kernels[@]}" == 0 ]]; then
echo " -none-"
else
counter=0
for kernel in "${invalid_signed_kernels[@]}"; do
echo " $kernel"
echo " -> ${invalid_validity_checks[$counter]}"
(( counter++ ))
done
fi
echo "Signature Database key & certificate detected.."
else
echo " Kernels Present: ${#kernels[@]}"
for kernel in "${kernels[@]}"; do
echo " $kernel"
done
echo "Signature Database key and/or certificate not detected.."
fi
echo "=============================================="
echo "$prev_out"
echo "=============================================="
echo "1 - Sign a kernel"
echo "2 - Purge signed kernel"
echo "3 - Create new keys"
echo "4 - Install/Remove unsigned kernel"
echo "5 - Modify Grub"
echo "6 - Reboot"
echo "0 - Exit"
read -p "enter input:" -r user_input
if [[ "$user_input" == "1" ]]; then
if [[ "$valid_keys" == "True" ]]; then
# sign kernels
sign_kernel
if [[ $? == 0 ]]; then
prev_out="success!"
else
prev_out="failure: $ERROR_MSG"
fi
else
prev_out="create new keys and append to existing/default keys first!"
fi
elif [[ "$user_input" == "2" ]]; then
# purge kernels
purge_kernel
if [[ $? == 0 ]]; then
prev_out="success!"
else
prev_out="failure: $ERROR_MSG"
fi
elif [[ "$user_input" == "3" ]]; then
# create keys
create_keys
if [[ $? == 0 ]]; then
prev_out="success!"
# Import config file
source "$SCRIPT_DIR/keylocations.cfg"
# Key & cert files now exist
valid_keys="True"
else
prev_out="failure: $ERROR_MSG"
fi
elif [[ "$user_input" == "4" ]]; then
# check mainline present
command_exists="$(command -v mainline-gtk)"
if [[ -n "$command_exists" ]]; then
# redirect to mainline-gtk app
mainline-gtk
if [[ $? == 0 ]]; then
prev_out="success!"
else
prev_out="failure: $?"
fi
else
prev_out="mainline-gtk not present!"
fi
elif [[ "$user_input" == "5" ]]; then
# check grub-customizer present
command_exists="$(command -v grub-customizer)"
if [[ -n "$command_exists" ]]; then
# redirect to grub-customizer app
grub-customizer
if [[ $? == 0 ]]; then
prev_out="success!"
else
prev_out="failure: $?"
fi
else
prev_out="grub-customizer not present!"
fi
elif [[ "$user_input" == "6" ]]; then
reboot
elif [[ "$user_input" == "0" ]]; then
# exit
tput reset
stop="True"
echo "Goodbye!.."
else
prev_out="invalid input.."
fi
done
| true
|
209e40a4f30804316c6040eeeeed0bd9837419b8
|
Shell
|
banqusli/first
|
/praxis/IT2008/primzahlen
|
UTF-8
| 800
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# Berechnung Primzahlen
if [ -z $1 ]
then
echo -n "Bitte maximale zu testende Zahl eingeben: "
read MAX
else
MAX=$1
fi
echo "Berechnung der Primzahlen bis "$MAX:
echo
echo "2" # erste Primzahl
for ((k=3;k<=$MAX;k+=2)) # gerade Zahlen sind keine Primzahlen
do
# TMAX=`expr $k - 1` # naive Annahme Testgrenze
TMAX=`echo "scale=0; sqrt("$k")" | bc -l`
# echo "Testgrenze : "$TMAX
PRIM="yes"
for ((i=3;i<=$TMAX;i+=2)) # Test auf Teilbarkeit
do
if [ `expr $k % $i` -eq 0 ]
then
PRIM="no"
break # Teiler gefunden, Test abbrechen
fi
done # Ende Test auf Teilbarkeit
if [ $PRIM = "yes" ]
then
echo $k
fi
done # Ende Schleife der zu testenden Zahlen
exit 0
| true
|
e3739bf5a781d4c419451b8b2d9aa3d1f6e3c105
|
Shell
|
mrtos/ambarella
|
/kernel/external/atheros/ar6003_844/host/os/rexos/src/usdio/load.sh
|
UTF-8
| 510
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh -x
DIR=.output
case $1 in
unloadall )
echo "..unloading all"
/sbin/rmmod $DIR/usdio.ko
/sbin/rmmod $DIR/sdio_pcistd_hcd.ko
/sbin/rmmod $DIR/sdio_busdriver.ko
/sbin/rmmod $DIR/sdio_lib.ko
;;
*)
/sbin/insmod $DIR/sdio_lib.ko debuglevel=0
/sbin/insmod $DIR/sdio_busdriver.ko RequestListSize=300 debuglevel=0
/sbin/insmod $DIR/sdio_pcistd_hcd.ko debuglevel=0
/sbin/insmod $DIR/usdio.ko
rm -f /dev/usdio0
mknod -m 777 /dev/usdio0 c 65 0
;;
esac
| true
|
796669e2bac98e92cf0e9244a563fcd0a64f4126
|
Shell
|
DanielDlc/shellscript
|
/02_comandos_dentro_de_arquivos/21_uname-a_detalhado.sh
|
UTF-8
| 1,397
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
# informações sobre o sistema
uname -a
echo # pular linha após as informações uname -a
# utilizar cut com delimitador " " (espaço) pegando apenas o nome do computador
uname -a | cut -d " " -f2
# versão do kernel (está na terceira informação f3)
uname -a | cut -d " " -f2,3
# mostrar arquitetura na posição 14
uname -a | cut -d " " -f2,3,14
# podemos utilizar tr para substituir (espaço por quebra de linha )
uname -a | cut -d " " -f2,3,14 | tr " " "\n"
echo # pular linha no final
###########################################################
# #
# Autor: Daniel Louro Costa (dlc.engcomputacao@gmail.com) #
# Data criação: 22/05/2021 #
# #
# Descrição: buscar nome do pc, kernel e arquitetura #
# #
# Exemplo de uso: ./21_uname-a_detalhado.sh #
# #
# Alterações #
# #
# Dia X - ... #
# Dia Y - ... #
# #
###########################################################
| true
|
e16c69603f653154f94f6b1abe4856fa3e6ad3df
|
Shell
|
Ununnilium/docker-services
|
/openldap/run.sh
|
UTF-8
| 1,627
| 3.796875
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
script=`readlink -f "$0"`
cd "`dirname \"$script\"`"
printf "Image name? "
read image_name
printf "Container name? "
read container_name
printf "Directory for permanent files? (default /srv/docker/openldap) "
read directory
if [ ! $directory ];then
directory=/srv/docker/openldap
fi
if [ ! -d "$directory/data" ]; then
sudo mkdir -p "$directory/data"
fi
printf 'Domain name? (e.g. example.com) '
read domain
echo $domain | fgrep '.' > /dev/null
while [ $? -eq 0 ]; do
domain=`echo $domain | sed 's/^\([^\.]*\)\.\(.*\)$/dc=\1,dc=\2/'`
echo $domain | fgrep '.' > /dev/null
done
organization=`echo $domain | sed 's/^dc=\([^,]*\).*$/\1/'`
printf 'LDAP root password? '
read -s root_pw
echo
pw_hash=`docker run --rm $image_name slappasswd -h '{CRYPT}' -c '$6$%.12s' -s "$root_pw"`
if [ ! -f "$directory/slapd.conf" ]; then
sed -e "s/dc=example,dc=com/$domain/g" -e "s~ROOTPWHASH~$pw_hash~" conf/slapd.conf | \
sudo tee "$directory/slapd.conf" > /dev/null
sudo chmod 640 "$directory/slapd.conf"
fi
docker run -d -p 127.0.0.1:389:389 --name $container_name -v "$directory/data":/var/lib/openldap/openldap-data \
-v "$directory/slapd.conf":/etc/openldap/slapd.conf $image_name
sed -e "s/dc=example,dc=com/$domain/g" -e "s~example~$organization~" conf/start.ldif | \
docker exec -i $container_name ldapadd -x -D "cn=root,$domain" -w "$root_pw"
docker exec $container_name apk del openldap-clients
printf "\nThe OpenLDAP container \"$container_name\" was configured successfully.
You should be able to access the LDAP service with the user \"cn=root,$domain\"
and the chosen password.\n"
| true
|
a65d79f2a7ce3f6679556d280dfc46cedf593541
|
Shell
|
TrevorBender/dotfiles
|
/bashrc
|
UTF-8
| 2,444
| 3.203125
| 3
|
[] |
no_license
|
# /etc/skel/.bashrc
#
# This file is sourced by all *interactive* bash shells on startup,
# including some apparently interactive shells such as scp and rcp
# that can't tolerate any output. So make sure this doesn't display
# anything or bad things will happen !
# Test for an interactive shell. There is no need to set anything
# past this point for scp and rcp, and it's important to refrain from
# outputting anything in those cases.
if [[ $- != *i* ]] ; then
# Shell is non-interactive. Be done now!
return
fi
# Put your fun stuff here.
#Colors
txtrst="\033[0m" # Text reset (no color)
txtblu="\033[1;37m" # Blue
txtcyn="\033[0;36m" # Cyan
set -o vi
export PATH="$PATH:$HOME/bin"
export PATH="$PATH:$HOME/.cabal/bin"
#if [ "\$(type -t __git_ps1)" ]; then
# PS1="${txtcyn}\$(__git_ps1 '(%s) ')$PS1"
#fi
export PS1="\[\033[01;32m\]\u@\h\[\033[01;34m\] (\!) <\w> \n\$\[\033[00m\] "
alias pd=pushd
alias td=popd
alias v=vim
alias vi=vim
alias gv=gvim
alias ff=ffind
alias tmux="tmux -2"
alias t='tmux'
alias tl="tmux list-s"
alias ta="tmux a -t"
alias ts="tmux new-s -s"
alias sl='screen -ls'
alias sa='screen -r'
alias ss='screen -S'
alias g=git
alias ff=ffind
alias r=ranger
alias lst='ls -tr | tail -1'
function title ()
{
echo -ne "\ek$1\e\\"
}
alias t=title
function tss()
{
title "$*" && ss "$*"
}
# git
source ~/.git-completion.sh
complete -o bashdefault -o default -o nospace -F _git g 2>/dev/null \
|| complete -o default -o nospace -F _git g
source /usr/share/git/git-prompt.sh
if [ "\$(type -t __git_ps1)" ]; then
PS1="${txtcyn}\$(__git_ps1 '(%s) ')$PS1"
fi
alias g='git'
complete -o default -o nospace -F _git g
function gitroot() { cd $(git root) ; }
function set_bash_prompt ()
{
local lefthalf="$(whoami)@$(hostname -s) $(pwd | sed "s|$HOME|~|")$(__git_ps1 " (%s)")"
local righthalf=$(date '+%a %b %d %T')
local columns=$(tput cols)
let fillsize=${columns}-${#lefthalf}-${#righthalf}+1
if [[ $fillsize -lt 0 ]] ; then
fill=" "
else
fill=$(printf ' %0.s' {1..300}) # 300 spaces
fill=${fill:0:$fillsize}
fi
# make sure than new lines are not indented (or whitespace will show up in prompt)
PS1="\[\e]0;\w\a\]\n\[\e[47m\]\[\e[32m\]\u@\h\[\e[31m\]$(__git_ps1 " (%s)")\[\e[36m\] \w${fill}\
\[\e[36m\]\d \t\[\e[0m\]\n\
\$([[ \$? != 0 ]] && echo \"\[\033[01;31m\]:( \")\
\[\e[0m\]> "
}
# need tput
PROMPT_COMMAND=set_bash_prompt
| true
|
9a99ec14e978ce7fa7a46ecb12bfac30b4686513
|
Shell
|
TheEarnest/EnKF_Script
|
/change_cpu_account.sh
|
UTF-8
| 354
| 2.953125
| 3
|
[] |
no_license
|
source ${HOME}/NorESM/Script/personal_setting.sh
start_mem=1
for (( proc = ${start_mem}; proc <= ${ENSSIZE}; ++proc ))
do
mem=`echo 0$proc | tail -3c`
cd ${HOMEDIR}/cases/${CASEDIR}${mem}/
cat ${CASEDIR}${mem}.${machine}.run | sed \
-e "s/nn9207k/nn9039k/g" \
> toto
mv toto ${CASEDIR}${mem}.${machine}.run
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.