blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6dfa504ee95f836feba5b1e7c5e658c90ff50dda
|
Shell
|
cornBuddy/dotfiles
|
/roles/window-manager/templates/xinitrc.j2
|
UTF-8
| 863
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
{{ ansible_managed | comment }}
userresources=$HOME/.Xresources
usermodmap=$HOME/.Xmodmap
sysresources=/etc/X11/xinit/.Xresources
sysmodmap=/etc/X11/xinit/.Xmodmap
# merge in defaults and keymaps
if [ -f $sysresources ]; then
xrdb -merge $sysresources
fi
if [ -f $sysmodmap ]; then
xmodmap $sysmodmap
fi
if [ -f "$userresources" ]; then
xrdb -merge "$userresources"
fi
if [ -f "$usermodmap" ]; then
xmodmap "$usermodmap"
fi
if [ -f {{ wm_xkbcomp_config_path }} ]; then
xkbcomp "{{ wm_xkbcomp_config_path }}" "$DISPLAY"
fi
# start some nice programs
if [ -d /etc/X11/xinit/xinitrc.d ] ; then
for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
xbacklight -set 30
xset -dpms
xss-lock -- slock &
chromium &
dwmstatus &
telegram-desktop &
skypeforlinux &
udiskie &
exec dwm
| true
|
5f8f8925d6603a55f9551f170c98d5e743eb7908
|
Shell
|
CristescuLab/Scripts
|
/postblastaln.sh
|
UTF-8
| 612
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# 1. tsv blast file
# 2. query fasta
# 3. subject fasta
# 4. fasta out filename
count=0
while read line; do
#if [[ ${line} != *"qaccver"* ]]; then
((count++))
read -a vars <<< $line
echo "processing ${vars[0]} and ${vars[1]}"
# get query sequence
seqkit grep -p "${vars[0]}" $2 > tmp
seqkit grep -p "${vars[1]}" $3 >> tmp
mafft --quiet tmp | sed "s/>M0.*$/>Read${count}/"| sed 's/ /_/g' > tmp.aln
trimal -nogaps -in tmp.aln >> tmp.fasta
#fi
done <$1
mafft --auto --thread -1 tmp.fasta | seqkit rmdup > $4
iqtree -pre ${4%%.fasta} -bb 1000 -alrt 0 -m TESTNEW -con -s $4
| true
|
a69e7b60322a889f6e84c04ac665085f0b882418
|
Shell
|
maksverver/MSc
|
/benchmark/tests4.sh
|
UTF-8
| 1,044
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# FIXME: change this to use start-job.sh?
# These are intended to show the effectiveness of various lifting strategies on (clustered) random games.
run_on() {
for seed in {1..30}
do
for lift in -llinear:0 -llinear:1 -Lpredecessor:0 -lpredecessor:0 -lpredecessor:1 -Lpredecessor:1 -Lminmeasure:0 -Lminmeasure:1 -Lminmeasure:2 -Lmaxmeasure:0 -Lmaxmeasure:1 -Lmaxmeasure:2 -Lmaxstep:0 -Lmaxstep:1 -Lmaxstep:2
do
for alternate in '' -a
do
name=random-seed=$seed-size=$1-clustersize=${2:-0}$lift$alternate
if test -f "$name".o*
then
true # echo "$name exists!"
else
echo "starting $name"
name=$name run ../run-test.sh -i random --priorities=10 --seed=$seed $lift $alternate --size=$1 ${2:+--clustersize=$2} --maxlifts=1e9 --stats --verify
sleep 1 # temp hack to work around buggy job server
fi
done
done
done
}
source $HOME/utils/lib/torque.sh || exit 1
cd output-tests4b || exit 1
run_on 4000
run_on 8000
run_on 16000
run_on 4000 16
run_on 4000 64
| true
|
19953e21b5ab383f299be4c34dc86b05fbbcb312
|
Shell
|
sallyom/online-hibernation
|
/hack/make.sh
|
UTF-8
| 830
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
TARGET="$1"
shift
STARTTIME=$(date +%s)
EXITCODE=0
for PROJECT
do
target_exists=1
if [[ "$(make -C "$PROJECT" --question "$TARGET" >& /dev/null; echo $?)" != 2 ]]; then
testexec="make $TARGET -k"
else
target_exists=0
fi
echo
echo "*****************************************************************************************"
echo "Executing $PROJECT target $TARGET..."
echo
pushd $PROJECT >/dev/null
if [ $target_exists -eq 1 ]; then
if $testexec; then
echo "$PROJECT target $TARGET PASS"
else
echo "$PROJECT target $TARGET FAIL"
EXITCODE=1
fi
else
echo "Directory $PROJECT does not have target $TARGET, skipping."
fi
popd >/dev/null
done
ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"
exit $EXITCODE
| true
|
0b41bedb408926ef4509dd8ef6f6c58d1b073e0b
|
Shell
|
csantanapr/knative-docker-desktop
|
/demo.sh
|
UTF-8
| 914
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
set -u
echo -e "Switching to docker-desktop kubectl context"
kubectl config use-context docker-desktop
echo -e "🍿 Installing Knative Serving and Eventing ... \033[0m"
STARTTIME=$(date +%s)
curl -sL https://raw.githubusercontent.com/csantanapr/knative-minikube/master/install.sh | bash
echo -e "🕹 Installing Knative Samples Apps... \033[0m"
curl -sL https://raw.githubusercontent.com/csantanapr/knative-kind/master/03-serving-samples.sh | bash
curl -sL https://raw.githubusercontent.com/csantanapr/knative-kind/master/05-eventing-samples.sh | bash
DURATION=$(($(date +%s) - $STARTTIME))
echo "kubectl get ksvc,broker,trigger"
kubectl -n default get ksvc,broker,trigger
echo -e "\033[0;92m 🚀 Knative install with samples took: $(($DURATION / 60))m$(($DURATION % 60))s \033[0m"
echo -e "\033[0;92m 🎉 Now have some fun with Serverless and Event Driven Apps \033[0m"
| true
|
b51918a2ca87e960ca66ce26f23c572a4236850f
|
Shell
|
PuzzleIOT/Index
|
/force-remote-test-all.sh
|
UTF-8
| 868
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
BRANCH=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p')
if [ "$BRANCH" = "dev" ]
then
DIR=$PWD
sh force-remote-test.sh || exit 1
echo ""
echo "Monitor"
cd sketches/monitor/SoilMoistureSensorCalibratedSerial/
sh force-remote-test.sh || exit 1
cd $DIR
echo ""
echo "Monitor ESP"
cd sketches/monitor/SoilMoistureSensorCalibratedSerialESP/
sh force-remote-test.sh || exit 1
cd $DIR
echo ""
echo "Irrigator"
cd sketches/irrigator/SoilMoistureSensorCalibratedPump/
sh force-remote-test.sh || exit 1
cd $DIR
echo ""
echo "Irrigator ESP"
cd sketches/irrigator/SoilMoistureSensorCalibratedPump/
sh force-remote-test.sh || exit 1
cd $DIR
echo ""
echo "Tests for all projects should now have started on the test server."
else
echo "Cannot force retest from master branch. Switch to dev branch first."
fi
| true
|
1d796422834e88913cb2417a21f16cde161c0737
|
Shell
|
liuzl/ulogme
|
/ulogme.sh
|
UTF-8
| 178
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$(uname)" == "Darwin" ]; then
# This is a Mac
./osx/run_ulogme_osx.sh
else
# Assume Linux
sudo echo -n ""
sudo ./keyfreq.sh &
./logactivewin.sh
fi
| true
|
4b4256b417fc4d39bb13e278028ea658104a2223
|
Shell
|
openSUSE/package-lists
|
/doitall.sh
|
UTF-8
| 1,421
| 3.25
| 3
|
[] |
no_license
|
#! /bin/sh
git pull --rebase
proj=$1
repo=$2
test -n "$proj" || proj=Factory
test -n "$repo" || repo=standard
case $proj in
Factory)
product=000product
;;
Leap:15.*)
product=000product
;;
Leap:15.*:Ports)
product=000product
;;
Factory:PowerPC)
product=000product
;;
Factory:ARM)
product=000product
;;
Factory:zSystems)
product=000product
;;
esac
(cd osc/openSUSE\:$proj/$product/ && (osc up -u; osc up -e))
osc api "/build/openSUSE:$proj/_result?package=bash&repository=$repo" > "$proj.state"
if grep -q 'dirty="true"' "$proj.state" || grep -q 'state="building"' "$proj.state"; then
echo "$repo still dirty"
if test -z "$FORCE"; then
exit 0
fi
fi
./doit.sh $proj
./commit.sh $proj
if [ "$proj" = "Factory" -o "$proj" = "Leap:42.3" ]; then
# Do only create the drop list for the main arch - to avoid constant conflcits in obsoletepackages.inc
./create-drop-list.sh $proj $product
fi
cd update-tests
./testall.sh $proj $product > update-tests-report.$proj.txt 2>&1
file="update-tests-report.$proj.txt"
remote="/source/openSUSE:$proj:Staging/dashboard/update-tests.txt"
if [ "$(< "$file")" != "$(osc api "$remote")" ] ; then
osc -d api -X PUT -f "$file" "$remote"
fi
cd ..
set -e
git commit -m "auto commit for $proj/$repo" -a
echo "all done"
# git push < /dev/null || true
exit 0
| true
|
70a209cf419b498b66fffba44cd3784a454bb5a6
|
Shell
|
godmode2k/pawn_build_custom_apis
|
/pawn_cc.sh
|
UTF-8
| 811
| 3.28125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
# --------------------------------------------------------------
# Project: PAWN
# Purpose: PAWN CC Script
# Author: Ho-Jung Kim (godmode2k@hotmail.com)
# Date: November 5, 2006
# Filename: pawn_cc.sh
#
# Last modified:
# License:
#
# Note:
# --------------------------------------------------------------
# --------------------------------------------------------------
if [ -z "$1" ]; then
echo 'PAWN CC Script'
echo 'Usage: '$0' [Filename.p]'
exit
fi
PAWNCC_BIN="/work/src/linux_app/vm/pawn__nix_win32/pawn/linux/source/compiler/pawncc"
PAWN_INC="-i/work/src/linux_app/vm/pawn__nix_win32/pawn/linux/include"
SRC_FILE=$1
echo "-----------------------------------------------------"
echo "[$0] Compiling..."
echo "-----------------------------------------------------"
$PAWNCC_BIN $PAWN_INC $SRC_FILE
# EOF
| true
|
fb87055611f97e2e10c7ec26ef7296a9f9177a23
|
Shell
|
onokatio/bulk-kosen-syllbus
|
/main.sh
|
UTF-8
| 1,282
| 3.203125
| 3
|
[] |
no_license
|
set -eu
SCHOOL=14
DEPARTMENT=14
function getSubjectIds(){
YEAR="$1"
GRADE="$2"
COL1="$(( 6+(4*(GRADE-1)) ))"
COL2="$(( COL1 + 2 ))"
curl -Ss "https://syllabus.kosen-k.go.jp/Pages/PublicSubjects?school_id=${SCHOOL}&department_id=${DEPARTMENT}&year=${YEAR}&lang=ja" | pup 'table#sytablenc tbody tr json{}' | jq -r ".[4:][] | {\"sub\": .children[2], \"a\": .children[${COL1}], \"b\": .children[${COL2}]} | select(.a.text or .b.text) | .sub.children[0].children[0].href" | sed -e 's/\&/\&/g' | sed -r 's/.+subject_id=(.+)\&year=(.+)\&.+/\1 \2/'
}
function getPDFUrls(){
getSubjectIds "$1" "$2" | while read line
do
id=`echo $line | cut -d ' ' -f 1`
year=`echo $line | cut -d ' ' -f 2`
echo "https://syllabus.kosen-k.go.jp/Pages/SyllabusPDF?school_id=${SCHOOL}&department_id=${DEPARTMENT}&subject_id=${id}&year=${year}&lang=ja&subject_code=&preview=False&type=start&attachment=true"
done
}
function getPDF(){
mkdir -p "./pdfs/$1-$2"
getPDFUrls "$1" "$2" | while read line;do wget --show-progress -nv -P "./pdfs/$1-$2/" --content-disposition "$line";done
pdfunite ./pdfs/$1-$2/*.pdf ./pdfs/$1-$2.pdf
}
function joinAllPDF(){
pdfunite ./pdfs/*.pdf ./all.pdf
}
rm -rf ./pdfs
getPDF 2016 1
getPDF 2017 2
getPDF 2019 3
getPDF 2020 4
getPDF 2021 5
joinAllPDF
| true
|
61a6d7ea5973f359af07f7e9cb8dc82d5f69c85f
|
Shell
|
rperea14/drigoscripts
|
/drigo_dcm_organizer_from_xnat.sh~
|
UTF-8
| 1,219
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
#Rodrigo Perea
#rperea@kumc.edu (alternative email grandrigo@gmail.com)
#PLEASE ADD DESCRIPTION HERE:!!!!!!
#
#
#THIS SCRIPT WAS CREATED UNDER bash version 3.2 unders OSX 10.7. To check your bash version,
#enter bash --version
#Any questions please feel free to contact me at the email provided above.
SOURCE=$( pwd )
#Checking what you are doing.....
if [ $1 = "--help" ] ; then
echo " To organize the downoaded folders from XNAT (using ADEPT and SKyra for implementation) and
organize it according to the size of each acquisition sequence within a +-10 range in MB
The script will read each folde and assumes that the *dcm files are under $DIREC/*
"
exit
fi
if [ -z $1 ] ; then
echo "Run --help for help or
Please include the HSC# as the 1st argument.
Or --help for help "
exit
fi
for DIR in $(ls -1d $1* )
do
echo "In $DIR ..."
for SCAN in $( ls -1d $DIR/SCANS/* ) ;
do
# echo "SCAN and DIR is $DIR/$SCAN "
# echo $SIZE
SIZE=$(du -sm $SOURCE/$SCAN/ | { read FIRST REST ; echo $FIRST; } )
if [ $SIZE -gt 168 ] ; then
mkdir -p DTI_dcm_${DIR}
mv $SCAN/DICOM/*dcm ./DTI_dcm_${DIR}/
echo "In scan sequence $SCAN with size $SIZE... "
fi
done
done
| true
|
dd37d87868ad141bc5937cc93cf4a53973c5e2b1
|
Shell
|
crossbario/crossbar-examples
|
/authentication/test_tls.sh
|
UTF-8
| 1,113
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
########################################################################################
##
## WAMP-TLS(-client-authentication) static
##
crossbar start --cbdir=./tls/static/.crossbar &
sleep 10
python ./tls/static/client_tx.py --url wss://localhost:8080/ws --key client0.key --cert client0.crt --channel_binding="tls-unique"
wamp_tls_tx_cnlbind_unique_good=$?
python ./tls/static/client_tx.py --url wss://localhost:8080/ws --key client1.key --cert client1.crt --channel_binding="tls-unique"
wamp_tls_tx_cnlbind_unique_bad=$?
crossbar stop --cbdir=./tls/static/.crossbar || true
########################################################################################
##
## Test Summary
##
echo ""
echo "Test results:"
echo "============="
echo ""
exec >> test.log
[ $wamp_tls_tx_cnlbind_unique_good -eq 0 ] && echo "wamp-tls-static-cnlbind-unique-good: OK" || echo "wamp-tls-static-cnlbind-unique-good: FAIL"
[ $wamp_tls_tx_cnlbind_unique_bad -eq 1 ] && echo "wamp-tls-static-cnlbind-unique-bad: OK" || echo "wamp-tls-static-cnlbind-unique-bad: FAIL"
| true
|
cff306d5946bbb63f93a59d2ae7ba1abcfc4fb02
|
Shell
|
amiel/ec2_runner
|
/iptables_tunnel.sh
|
UTF-8
| 1,029
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
SELF="./script/ec2_runner/iptables_tunnel.sh"
source "$(dirname $SELF)/functions.sh"
source "$(dirname $SELF)/shared.sh"
get_port() {
case $2 in
backup) echo $[$1 + 5000] ;;
*) echo $[$1 + 5500] ;;
esac
}
add_server() {
local ip=${1:?please supply a server} port=${2:?please supply a port}
ebegin "setting up local $port to forward to $ip:$DESTINATION_PORT"
$IPTABLES -t nat -A OUTPUT -p tcp --dport $port -j DNAT --to-destination $ip
eend $?
}
remove_server() {
local ip=${1:?please supply a server} port=${2:?please supply a port}
ebegin "removing local $port forward to $ip:$DESTINATION_PORT"
$IPTABLES -t nat -D OUTPUT -p tcp --dport $port -j DNAT --to-destination $ip
eend $?
}
case $1 in
add)
add_server $3 $(get_port $2)
add_server $3 $(get_port $2 backup)
;;
remove)
remove_server $3 $(get_port $2)
ebegin "waiting 30 seconds for server to finish handling requests"
sleep 30
eend 0
remove_server $3 $(get_port $2 backup)
;;
*) eerror invalid action; exit 3 ;;
esac
| true
|
75acc7dfe7155f67f93569c8fd09b5eb2f4e7e9b
|
Shell
|
rnd-forests/poi-crawler
|
/backend/.docker/develop.sh
|
UTF-8
| 1,254
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Possible workspaces
# declare -a WORKSPACES=(api)
# Validate user-supplied workspace.
# validate_workspace () {
# local e match="$1"
# shift
# for e; do [[ "$e" == "$match" ]] && return 0; done
# echo 1
# }
# if [[ $(validate_workspace "$1" "${WORKSPACES[@]}") == 1 ]]; then
# echo "Invalid workspace provided. Available ones are: 'api'"
# exit 1
# fi
# Build docker workspace container name.
# The pattern is luxstay-$1-workspace.
workspace="akinia-crawler-workspace"
# Execute commom commands inside workspace container.
if [ $# -gt 1 ]; then
if [ "$2" == "artisan" ] || [ "$2" == "art" ]; then
shift 2
docker exec -it "$workspace" php artisan "$@"
elif [ "$2" == "composer" ] || [ "$2" == "comp" ]; then
shift 2
docker exec -it "$workspace" composer "$@"
elif [ "$2" == "npm" ]; then
shift 2
docker exec -it "$workspace" npm "$@"
elif [ "$2" == "yarn" ]; then
shift 2
docker exec -it "$workspace" yarn "$@"
elif [ "$2" == "test" ]; then
shift 2
docker exec -it "$workspace" ./vendor/bin/phpunit "$@"
else
docker exec -it "$workspace" bash
fi
else
docker exec -it "$workspace" bash
fi
| true
|
f1099334a9678c1fad6a835de4f02024ba5bc3de
|
Shell
|
STAR-RG/shaker-artifacts-icsme
|
/setup.sh
|
UTF-8
| 1,281
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
PATH_SDKMAN="./Android/cmdline-tools/tools/bin"
PATH_ADB="./Android/platform-tools"
PATH_EMULATOR="./Android/emulator"
cd ~
# Download packages
sudo apt-get update
sudo apt-get install -y openjdk-8-jdk stress-ng python3-pip unzip r-base
# Download Python packages
pip3 install python-sat
pip3 install numpy==1.16.1
#Download and unpack command line tools
wget https://dl.google.com/android/repository/commandlinetools-linux-6609375_latest.zip
unzip commandlinetools-linux-6609375_latest.zip
rm -f commandlinetools-linux-6609375_latest.zip
mkdir cmdline-tools
mv tools/ cmdline-tools
mkdir Android
mv cmdline-tools/ Android
#Configure Android
yes | $PATH_SDKMAN/sdkmanager --licenses
$PATH_SDKMAN/sdkmanager "platforms;android-28"
$PATH_SDKMAN/sdkmanager "platforms;android-23"
$PATH_SDKMAN/sdkmanager "system-images;android-28;default;x86"
$PATH_SDKMAN/sdkmanager "system-images;android-23;default;x86"
$PATH_SDKMAN/sdkmanager "build-tools;28.0.3"
echo no | $PATH_SDKMAN/avdmanager create avd --name EmuAPI28 --package "system-images;android-28;default;x86"
echo no | $PATH_SDKMAN/avdmanager create avd --name EmuAPI23 --package "system-images;android-23;default;x86"
source $DIR/setvars.sh
| true
|
936852bcc458fe5cf6ed94960dc005114f4b631f
|
Shell
|
aws/aws-codedeploy-agent
|
/init.d/codedeploy-agent
|
UTF-8
| 3,022
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Init file for codedeploy-agent
#
# chkconfig: 2345 98 02
# description: codedeploy-agent processes the deployments created by AWS CodeDeploy and installs \
# the deployment artifacts on to this instance.
### BEGIN INIT INFO
# Provides: codedeploy-agent
# Required-Start: $all
# Required-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: AWS CodeDeploy Host Agent
# Description: codedeploy-agent processes the deployments created by AWS CodeDeploy and installs
# the deployment artifacts on to this instance.
### END INIT INFO
COMMAND=$1
RETVAL=0
[ -f /etc/profile ] && [ "`stat --format '%U %G' /etc/profile`" == "root root" ] && source /etc/profile
prog="codedeploy-agent"
# Modify the following CODEDEPLOY_USER variable to run the codedeploy process as a non-root user
# Note: You also need to chown /opt/codedeploy /var/log/aws
CODEDEPLOY_USER=""
AGENT_ROOT="/opt/codedeploy-agent/"
INSTALLER="/opt/codedeploy-agent/bin/install"
BIN="/opt/codedeploy-agent/bin/codedeploy-agent"
start() {
echo -n $"Starting $prog:"
cd $AGENT_ROOT
if [ $CODEDEPLOY_USER ]; then
nohup sudo -i -u $CODEDEPLOY_USER $BIN start >/dev/null </dev/null 2>&1 # Try to start the server
else
nohup $BIN start >/dev/null </dev/null 2>&1 # Try to start the server
fi
exit $?
}
stop() {
echo -n $"Stopping $prog:"
cd $AGENT_ROOT
if [ $CODEDEPLOY_USER ]; then
nohup sudo -i -u $CODEDEPLOY_USER $BIN stop >/dev/null </dev/null 2>&1 # Try to stop the server
else
nohup $BIN stop >/dev/null </dev/null 2>&1 # Try to stop the server
fi
exit $?
}
restart() {
echo -n $"Restarting $prog:"
cd $AGENT_ROOT
if [ $CODEDEPLOY_USER ]; then
nohup sudo -i -u $CODEDEPLOY_USER $BIN restart >/dev/null </dev/null 2>&1 # Try to restart the server
else
nohup $BIN restart >/dev/null </dev/null 2>&1 # Try to restart the server
fi
exit $?
}
status() {
cd $AGENT_ROOT
if [ $CODEDEPLOY_USER ]; then
sudo -i -u $CODEDEPLOY_USER $BIN status # Status of the server
else
$BIN status # Status of the server
fi
exit $?
}
update() {
echo -n $"Updating $prog:"
cd $AGENT_ROOT
$INSTALLER auto #Update the agent
}
case "$COMMAND" in
start)
start
;;
start-no-update)
start
;;
start-with-update)
update
start
;;
stop)
stop
;;
restart)
restart
;;
force-reload)
stop
start
;;
status)
status
;;
*)
echo $"Usage: $0 {start|stop|status|restart}"
esac
| true
|
ad4eb294b1e03071a50479fa5a20627d0c66a771
|
Shell
|
aalthof1/cs252
|
/shell-practice/if_else.sh
|
UTF-8
| 129
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
directory="./Scripting"
if [ -d $directory ]; then
echo "Directory exists"
else
echo "Directory does not exist"
fi
| true
|
3e3c13477c371e98954d770100469e5d97097950
|
Shell
|
Insight-Heroes/ih
|
/staging.sh
|
UTF-8
| 1,325
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "------------------\nWe are not using heroku, \nTry 'sh ec2-staging.sh' to deploy on EC2 staging \n------------------\n"
exit 1
# ==========================================================
set -e
if [ `git branch --list deploy` ]
then
# Delete current deploy branch
git branch -D deploy
fi
# Switch to staging branch
git checkout staging
# Create new deploy branch based on Staging
git checkout -b deploy
echo "---------- Generating dist files ----------\n"
# Grunt comands to build our site
grunt build:production
# the dist/ directory is in my .gitignore, forcibly add it
git add -f public/dist
# the modules/core/client/css/core.css file is in my .gitignore, forcibly add it
git add -f modules/core/client/css/core.css
git commit -m "Deploying to Heroku"
# Push it up to heroku, the -f ensures that heroku won't complain
# Start heroku deployment
echo "Build complete\n---------- Heroku Staging deployment ----------\n"
git push heroku -f deploy:master
# This file gets modified during deployment
git checkout config/assets/default.js
# Switch it back to master
echo "Deployment complete\n---------- Switching back to staging branch ----------\n"
git checkout staging
# Remove Dist directory
rm -fr dist/
# Run database migration
# echo "\n---------- Executing DB migration ----------"
| true
|
ebfdbbc463b52669806679a0dc34061002f37527
|
Shell
|
alonSadan/kubemacpool
|
/hack/install-tools.sh
|
UTF-8
| 154
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -xe
tools_file=$1
tools=$(grep "_" $tools_file | sed 's/.*_ *"//' | sed 's/"//g')
for tool in $tools; do
$GO install ./vendor/$tool
done
| true
|
3fb539eddf89b19410f3fc98a2f019b5e1f1cfe6
|
Shell
|
jkwong888/terraform-openshift-installicp
|
/templates/setup_icpinstall.sh.tpl
|
UTF-8
| 1,926
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
test -d ${icp_install_path} || sudo mkdir -p ${icp_install_path}
cd ${icp_install_path}
if [[ ${icp_binary} == http* ]]; then
echo "HTTP binary found"
hostname=$(echo ${icp_binary}|awk -F/ '{print $3}')
filename=$(echo ${icp_binary}|awk -F$${hostname} '{print $2}')
basename=$(basename $${filename})
if [ ! -e "${icp_install_path}/$${basename}" ]; then
sudo curl -k -o ${icp_install_path}/$${basename} ${icp_binary}
fi
elif [[ ${icp_binary} == nfs* ]]; then
echo "NFS binary found"
hostname=$(echo ${icp_binary}|awk -F/ '{print $3}')
filename=$(echo ${icp_binary}|awk -F$${hostname} '{print $2}')
filepath=$(dirname $${filename})
basename=$(basename $${filename})
if [ ! -e "${icp_install_path}/$${basename}" ]; then
sudo mkdir /tmp/nfsmount
sudo mount -t nfs $${hostname}:$${filepath} /tmp/nfsmount
sudo cp /tmp/nfsmount/$${basename} ${icp_install_path}
sudo umount /tmp/nfsmount
sudo rm -rf /tmp/nfsmount
fi
elif [[ ${icp_binary} == docker* ]]; then
echo "Docker image registry"
inception_image=`echo ${icp_binary} | sed -e 's/docker:\/\///g'`
image_reg_url=`echo ${icp_binary} | sed -e 's/docker:\/\///g' | awk -F/ '{print $1;}'`
if [ ! -z "${icp_image_registry_username}" -a ! -z "${icp_image_registry_password}" ]; then
sudo docker login $${image_reg_url} -u ${icp_image_registry_username} -p ${icp_image_registry_username}
fi
sudo docker pull $${inception_image}
else
echo "Bad binary: ${icp_binary}"
exit 1
fi
if [ ! -z "$${basename}" ]; then
sudo tar xf ${icp_install_path}/$${basename} -O | sudo docker load
inception_image=$(sudo docker images|grep icp-inception|awk '{print $1":"$2}')
fi
sudo docker run --rm -v $(pwd):/data:z -e LICENSE=accept --security-opt label:disable $${inception_image} cp -r cluster /data
sudo chown -R ${ssh_user} ${icp_install_path}
| true
|
f1096300730c77a582faa9a7dfcac8617f32f089
|
Shell
|
l4cr0ss/provisioner
|
/auto-install.sh
|
UTF-8
| 2,569
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script framework for virtual machine provisioning.
# Author: Jefferson Hudson
#---------------------------#
# Function defs and imports #
#---------------------------#
SERVER=""
authorized_keys(){
wget "$SERVER"/scripts/config-authorized_keys.sh \
-O /tmp/config-authorized_keys.sh
source /tmp/config-authorized_keys.sh
rm /tmp/config-authorized_keys.sh
config_authorized_keys;
}
kvm()
{
wget "$SERVER"/scripts/config-kvm.sh \
-O /tmp/config-kvm.sh
source /tmp/config-kvm.sh
rm /tmp/config-kvm.sh
config_kvm;
}
nginx(){
wget "$SERVER"/scripts/config-nginx.sh \
-O /tmp/config-nginx.sh
source /tmp/config-nginx.sh
rm /tmp/config-nginx.sh
config_nginx;
}
pgsql(){
wget "$SERVER"/scripts/config-pgsql.sh \
-O /tmp/config-pgsql.sh
source /tmp/config-pgsql.sh
rm /tmp/config-pgsql.sh
config_pgsql $1;
}
queue() {
wget "$SERVER"/dstructs/queue.sh \
-O /tmp/queue.sh
source /tmp/queue.sh
rm /tmp/queue.sh
}
rbenv(){
wget "$SERVER"/scripts/config-rbenv.sh \
-O /tmp/config-rbenv.sh
source /tmp/config-rbenv.sh
rm /tmp/config-rbenv.sh
config_rbenv $1;
}
ruby(){
wget "$SERVER"/scripts/config-ruby.sh \
-O /tmp/config-ruby.sh
source /tmp/config-ruby.sh
rm /tmp/config-ruby.sh
config_ruby $1 $2;
}
sshd(){
wget "$SERVER"/scripts/config-sshd.sh \
-O /tmp/config-sshd.sh
source /tmp/config-sshd.sh
rm /tmp/config-sshd.sh
config_sshd;
}
user(){
wget "$SERVER"/scripts/config-user.sh \
-O /tmp/config-user.sh
source /tmp/config-user.sh
rm /tmp/config-user.sh
config_user $1;
}
vim(){
wget "$SERVER"/scripts/config-vim.sh \
-O /tmp/config-vim.sh
source /tmp/config-vim.sh
rm /tmp/config-vim.sh
config_vim;
}
# ----------------
# Package configs
# ----------------
pgsql_server()
{
vim;
sshd;
pgsql "9.5";
}
hypervisor()
{
vim;
sshd;
kvm;
}
server()
{
vim;
sshd;
}
# ------------ #
# Script Entry #
# ------------ #
# Test if user executing the script has root privileges
if [[ "$(id -u)" != "0" ]];
then
exit 1
fi
queue;
# Parse commandline options
while :; do
case $1 in
-d|--do-config)
if [[ -n "$2" ]]; then
# Push the item onto the queue
push $2
else
echo 'ERROR: "--do-config" requires a non-empty option argument.'
exit 1
fi
;;
--) # End of all options.
shift
break
;;
-?*)
printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2
;;
*) # Default case: If no more options then break the loop.
break
esac
shift
done
while :; do
pop
if [[ -z "$D" ]]; then # Nothing left on queue
break
else
eval ${D}
fi
done
| true
|
33d02ff771e817a8cdfdaf4e46d3b3a576535523
|
Shell
|
jsjimenez51/holberton-system_engineering-devops
|
/0x0F-load_balancer/1-install_load_balancer
|
UTF-8
| 587
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Installs HAProxy 1.8 (stable LTS) on server and configures it
# Install 'add-apt-repository' functionality
sudo apt-get install software-properties-common
# Download, update, upgrade, and install HAProxy 1.8
sudo add-apt-repository -y ppa:vbernat/haproxy-1.8
sudo apt-get update
sudo apt-get -y upgrade
sudo apt-get -y install haproxy
# Download and place configuration file
https://raw.githubusercontent.com/jsjimenez51/holberton-system_engineering-devops/master/0x0F-load_balancer/HAP_Config1.cfg
sudo mv haproxy.cfg /etc/haproxy
sudo service haproxy restart
| true
|
ee0feaa8a001b5e4c4533df1be85de1e92f477a1
|
Shell
|
adzhou/oragle
|
/WebKit/Tools/iExploder/iexploder-1.7.2/tools/release_src.sh
|
UTF-8
| 1,051
| 3.234375
| 3
|
[
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Create a tarball from the subversion repository.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
tmp=$$
cd /tmp
svn checkout http://iexploder.googlecode.com/svn/trunk/ iexploder-$$
version=`grep '^\$VERSION' iexploder-$$/src/version.rb | cut -d\" -f2`
echo "Version: $version"
mv iexploder-$$ iexploder-$version
cd iexploder-$version
svn log > ChangeLog.txt
find . -name "*.pyc" -delete
find . -name ".svn" -exec rm -Rf {} \; 2>/dev/null
cd ..
GZIP="-9" tar -zcvf iexploder-${version}.tgz iexploder-${version}/
rm -Rf iexploder-${version}
| true
|
630d692922a60dc7d8b9ed63859b5967ab396549
|
Shell
|
iamkafai/metal-setup
|
/mutt/mutt2patch.sh
|
UTF-8
| 344
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PATCH_FILE=$(mktemp $HOME/tmp/mutt/mutt-patch.XXXXXX)
cat > $PATCH_FILE
MAILBOX=`cat $PATCH_FILE | formail -c -xSubject: | tr "'" "." | sed -e '{ s@^ @@g; s@\[@@g; s@\]@@g; s@[*()" ]@_@g; s@[/:]@-@g; s@^ \+@@; s@\.\.@.@g; s@-_@_@g; s@__@_@g; s@\.$@@; }' | cut -c 1-70`.patch
mv $PATCH_FILE $HOME/kernel/incoming/$MAILBOX
| true
|
115547dc05919be520f0b867bd26f4cf53bd299e
|
Shell
|
openshift/thanos
|
/scripts/rh-manifest.sh
|
UTF-8
| 356
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Generate rh-manifest.txt file.
# Run from repository root.
set -e
set -u
WORKING_FILE="$(mktemp /tmp/rh-manifest.XXXXXXXX)"
find . -name package.json -execdir yarn list --silent --json \; | \
jq -r '.. | objects | .name? | select(. != null)' \
2>/dev/null >>"${WORKING_FILE}"
sort "${WORKING_FILE}" | uniq > rh-manifest.txt
| true
|
37ae2ddf05952070c27b1ef26f9561b58a2811cc
|
Shell
|
murailab/Ponroy2020_HMG
|
/ATAC-seq/align.sh
|
UTF-8
| 464
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# usage: bash align.sh | parallel -j 12
samples=`ls fastq/*.fastq.gz | cut -d / -f 2 | cut -d . -f 1-5 | cut -d _ -f 1,2 | uniq`
genome="reference_data/Ensembl/Homo_sapiens/GRCh37/Sequence/BWAIndex/genome.fa"
ncores="8"
for s in $samples; do
if [ -f bam/${s}.bam ]; then
continue
fi
echo "bwa mem -t 2 ${genome} trimGalore/${s}_R1_val_1.fq.gz trimGalore/${s}_R2_val_2.fq.gz | samtools sort -@ 2 -O bam -T bam/${s}.tmp -o bam/${s}.bam -"
done
| true
|
5d0dc13a86ac96a2a032bfc85501ef1604988043
|
Shell
|
JavierGalvez/Practicas-Grado
|
/MH/practica3/run_all.sh
|
UTF-8
| 502
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
declare -i k
for alg in es bmb ils-ls ils-es; do
for seed in 11264 16438 75645 79856 96867; do
for dataset in iris rand newthyroid ecoli; do
for r in 10 20; do
if [ "$dataset" == "ecoli" ]
then
k=8
else
k=3
fi
echo -e '\n'$dataset $r $k $seed $alg'\n'
./test $dataset $r $k $seed $alg
done
done
done
done
| true
|
1f539897265d7b9dd1455d75e5d48d8456608efa
|
Shell
|
minterger/menu-vps
|
/.users/.menuusers.sh
|
UTF-8
| 15,529
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
########################################################
onoff () {
statefecha="$(ps x | grep .fechaexp.sh | grep -v grep)"
if [ "$statefecha" = "" ]; then
stateexp="[off]"
else
stateexp="[on]"
fi
}
fun_bar () {
comando[0]="$1"
comando[1]="$2"
(
[[ -e $HOME/fim ]] && rm $HOME/fim
[[ ! -d ~/.Menu ]] && rm -rf /bin/menu
${comando[0]} > /dev/null 2>&1
${comando[1]} > /dev/null 2>&1
touch $HOME/fim
) > /dev/null 2>&1 &
tput civis
echo -ne "\033[1;33mESPERE \033[1;37m- \033[1;33m["
while true; do
for((i=0; i<18; i++)); do
echo -ne "\033[1;31m#"
sleep 0.1s
done
[[ -e $HOME/fim ]] && rm $HOME/fim && break
echo -e "\033[1;33m]"
sleep 1s
tput cuu1
tput dl1
echo -ne "\033[1;33mESPERE \033[1;37m- \033[1;33m["
done
echo -e "\033[1;33m]\033[1;37m -\033[1;32m OK !\033[1;37m"
tput cnorm
}
fun_offfechaexp () {
for pidfechaexp in `screen -ls | grep ".fechaexp" | awk {'print $1'}`; do
screen -r -S "$pidfechaexp" -X quit
done
sleep 1
screen -wipe > /dev/null
}
fun_inifechaexp () {
screen -dmS fechaexp bash ~/.Menu/.users/.fechaexp.sh
}
fechaexp () {
if ps x | grep .fechaexp.sh | grep -v grep 1>/dev/null 2>/dev/null; then
clear
echo -e "\033[1;32mDESACTIVANDO REMOVEDOR DE EXPIRADOS\033[1;33m"
echo ""
fun_bar 'fun_offfechaexp'
echo ""
echo -e "\033[1;32mREMOVEDOR DE EXPIRADOS DESACTIVADO CON EXITO!\033[1;33m"
sleep 3
clear
else
clear
echo ""
echo -e "\033[1;32mINICIANDO REMOVEDOR DE EXPIRADOS\033[1;33m"
echo ""
fun_bar 'fun_inifechaexp'
echo ""
echo -e "\033[1;32mREMOVEDOR DE EXPIRADOS ACTIVADO CON EXITO\033[1;33m"
sleep 3
clear
fi
}
##########################################################
users () {
clear
bash .users.sh
echo
echo -e "\e[1;32mPresiona enter para continuar..."
read foo
}
userkill () {
clear
bash .killusers.sh
echo
echo -e "\e[1;32mPresiona enter para continuar..."
read foo
}
userkill2 () {
clear
bash .menunuevo.sh
echo -e "\e[1;32mPresiona enter para continuar..."
read foo
}
crearuser () {
clear
if [ $(id -u) -eq 0 ]
then
ip=$(cat /home/ip)
echo -e -n "\033[1;32mNombre del nuevo usuario:\033[0;37m"; read -p " " name
echo -e -n "\033[1;32mContraseña para el usuario $name:\033[0;37m"; read -p " " pass
echo -e -n "\033[1;32mCuantos dias el usuario $name debe durar:\033[0;37m"; read -p " " daysrnf
echo -e -n "\033[1;32mLimite de logins simultaneos:\033[0;37m"; read -p " " limiteuser
echo -e -n "\033[1;32mEscribe un puerto de conexion:\033[0;37m"; read -p " " portd
echo -e "\033[0m"
if cat /etc/passwd |grep $name: |grep -vi [a-z]$name |grep -v [0-9]$name > /dev/null
then
echo -e "\033[1;31mUsuario $name ya existe\033[0m"
else
valid=$(date '+%C%y-%m-%d' -d " +$daysrnf days")
datexp=$(date "+%d/%m/%Y" -d "+ $daysrnf days")
useradd -M $name -e $valid
( echo "$pass";echo "$pass" ) | passwd $name 2> /dev/null
echo -e "\033[1;36mIP: \033[0m$ip"
limite $name $limiteuser
echo -e "\033[1;36mUsuario: \033[0m$name"
echo -e "\033[1;36mContraseña: \033[0m$pass"
echo -e "\033[1;36mExpira:\033[0m $datexp"
if [ "$portd" = "" ]
then
echo -e "\033[1;36mPara generar datos para HTTP custom escriba un puerto\033[0m"
else
echo -e "\033[1;36mDatos HTTP custom:\033[0m $ip:$portd@$name:$pass "
fi
echo "$pass" > ~/.Menu/.users/passwd/$name
echo "$name $limiteuser" >> /root/usuarios.db
echo "$name $valid" >> /root/fechaexp.db
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
fi
else
if echo $(id) |grep sudo > /dev/null
then
echo "Su usuario no esta en el grupo sudo"
echo -e "Para ejecutar root escriba: \033[1;31msudo su\033[0m"
echo -e "O ejecute menu como sudo. \033[1;31msudo menu\033[0m"
else
echo -e "Vc no esta como usuario root, ni con sus derechos (sudo)\nPara ejecutar root escribe \033[1;31msu\033[0m y escribe su contraseña root"
fi
fi
}
redefiniruser () {
echo -e -n "\e[1;32mNombre del usuario:\e[1;0m "
read name
if cat /etc/passwd |grep $name: > /dev/null
then
echo " "
clear
echo -e "\e[100m \033[1;33mOpciones a modificar ?\033[1;30m \e[0m
\e[36m1) \e[96mNumero de Conexiones
\e[36m2) \e[96mFecha de expiracion
\e[36m3) \e[96mCambiar contraseña del usuario
\e[36m0) \e[96mVolver\e[32m"
read -p " opcion: " option
if [ $option -eq 1 ]; then
read -p "Cual es el nuevo limite de logins: " liml
echo
limite $name $liml
echo
LIMITE=$(cat /root/usuarios.db | grep "$name " | awk '{print $2}')
USER=$(cat /root/usuarios.db | grep "$name " | awk '{print $1}')
if [ "$USER" == "$name" ];then
sed -i "s/$USER $LIMITE/$name $liml/" /root/usuarios.db
else
echo "$name $liml" >> /root/usuarios.db
fi
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
fi
if [ $option -eq 2 ]; then
echo "Cual es la nueva fecha : formato AAAA/MM/DD"
read -p ": " date
chage -E $date $name 2> /dev/null
FECHA=$(cat /root/fechaexp.db | grep "$name " | awk '{print $2}')
USER2=$(cat /root/fechaexp.db | grep "$name " | awk '{print $1}')
fecha=$(date -d $date +"%Y-%m-%d")
if [ "$USER2" == "$name" ];then
sed -i "s/$USER $FECHA/$name $fecha/" /root/fechaexp.db
else
echo "$name $date" >> /root/fechaexp.db
fi
echo -e "\033[1;31mEl usuario $name se desconectara el dia: $date\033[0m"
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
fi
if [ $option -eq 3 ]
then
read -p "Cual es la nueva contraseña para el usuario $name: " pass
(echo "$pass" ; echo "$pass" ) |passwd $name > /dev/null 2>/dev/null
echo "$pass" > ~/.Menu/.users/passwd/$name
echo "Nueva contraseña aplicada: $pass"
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
fi
else
clear
echo -e "\e[1;32mEl usuario \e[1;31m$name \e[1;32mno existe\e[1;0m"
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
fi
if [ $option -eq 0 ]
then
clear
fi
}
killusers () {
clear
data=( `ps aux | grep -i dropbear | awk '{print $2}'`);
echo -e "Desconectando Usuarios"
for PID in "${data[@]}"
do
#echo "check $PID";
NUM1=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | wc -l`;
if [ $NUM1 -eq 1 ]; then
kill $PID;
fi
done
data=( `ps aux | grep "\[priv\]" | sort -k 72 | awk '{print $2}'`);
for PID in "${data[@]}"
do
#echo "check $PID";
NUM2=`cat /var/log/auth.log | grep -i sshd | grep -i "Accepted password for" | grep "sshd\[$PID\]" | wc -l`;
if [ $NUM2 -eq 1 ]; then
kill $PID;
fi
done
}
userdelete () {
clear
read -p "Cual es el nombre del usuario: " name
if [ $(cat /etc/passwd |grep "^$name:" |wc -l) -eq 0 ]; then
echo
echo -e "\e[1;32mUsuario \e[1;31m$name \e[1;32mno existe\e[1;0m"
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
else
userdel --force $name > /dev/null 2>/dev/null
echo
echo -e "\e[1;32mEl usuario \e[1;31m$name \e[1;32mfue eliminado\e[1;0m"
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
rm ~/.Menu/.users/passwd/$name*
sed -i "/$name /d " /root/usuarios.db
sed -i "/$name /d " /root/fechaexp.db
read foo
fi
}
userlist () {
clear
bash .listusers.sh
echo
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
}
monitorssh () {
clear
database="/root/usuarios.db"
echo $$ > /tmp/kids
while true
do
tput setaf 7 ; tput setab 1 ; tput bold ; printf '%29s%s%-20s\n' "SSH Monitor"
tput setaf 7 ; tput setab 1 ; printf ' %-15s%-16s%s\n' "Usuário" "contraseña" "Conexión / Límite " ; echo "" ; tput sgr0
while read usline
do
user="$(echo $usline | cut -d' ' -f1)"
s2ssh="$(echo $usline | cut -d' ' -f2)"
if [ -z "$user" ] ; then
echo "" > /dev/null
else
if [ -f ~/.Menu/.users/passwd/$user ]; then
passwd=$(cat ~/.Menu/.users/passwd/$user)
else
passwd="null"
fi
ps x | grep [[:space:]]$user[[:space:]] | grep -v grep | grep -v pts > /tmp/tmp7
s1ssh="$(cat /tmp/tmp7 | wc -l)"
tput setaf 3 ; tput bold ; printf ' %-14s%-22s%s\n' $user $passwd $s1ssh/$s2ssh; tput sgr0
fi
done < "$database"
echo ""
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
break
done
}
killmulti () {
clear
database="/root/usuarios.db"
echo $$ > /tmp/pids
if [ ! -f "$database" ]
then
echo "Archivo /root/usuarios.db no encontrado"
exit 1
fi
while true
do
tput setaf 7 ; tput setab 4 ; tput bold ; printf '%29s%s%-20s\n' "SSH Limiter"
tput setaf 7 ; tput setab 4 ; printf ' %-30s%s\n' "Usuário" "Conexión / Límite " ; echo "" ; tput sgr0
while read usline
do
user="$(echo $usline | cut -d' ' -f1)"
s2ssh="$(echo $usline | cut -d' ' -f2)"
if [ -z "$user" ] ; then
echo "" > /dev/null
else
ps x | grep [[:space:]]$user[[:space:]] | grep -v grep | grep -v pts > /tmp/tmp2
s1ssh="$(cat /tmp/tmp2 | wc -l)"
tput setaf 3 ; tput bold ; printf ' %-35s%s\n' $user $s1ssh/$s2ssh; tput sgr0
if [ "$s1ssh" -gt "$s2ssh" ]; then
tput setaf 7 ; tput setab 1 ; tput bold ; echo " Usuário desconectado por ultrapassar el limite!" ; tput sgr0
while read line
do
tmp="$(echo $line | cut -d' ' -f1)"
kill $tmp
done < /tmp/tmp2
rm /tmp/tmp2
fi
fi
done < "$database"
echo ""
echo -e "\e[1;32m Para salir precione Ctrl + C\e[1;0m"
sleep 5
clear
done
}
killmultidbr () {
touch /tmp/users;
database="/root/usuarios.db"
echo $$ > /tmp/pids
if [ ! -f "$database" ]
then
echo "Archivo /root/usuarios.db no encontrado"
exit 1
fi
while true
do
# rm /tmp/users
# data=( `ps aux | grep -i dropbear | awk '{print $2}'`);
# for PID in "${data[@]}"
# do
# #echo "check $PID";
# NUM1=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | wc -l`;
# USER=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | awk '{print $10}'`;
# IP=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | awk '{print $12}'`;
# if [ $NUM1 -eq 1 ]; then
# echo "$PID $USER" >> /tmp/users;
# else
# touch /tmp/users;
# fi
# done
clear
tput setaf 7 ; tput setab 4 ; tput bold ; printf '%29s%s%-20s\n' "Dropbear Limiter"
tput setaf 7 ; tput setab 4 ; printf ' %-30s%s\n' "Usuário" "Conexión / Límite " ; echo "" ; tput sgr0
while read usline
do
user="$(echo $usline | cut -d' ' -f1)"
s2ssh="$(echo $usline | cut -d' ' -f2)"
if [ -z "$user" ] ; then
echo "" > /dev/null
else
data="$(ps aux | grep -i dropbear | awk '{print $2}')" ;
cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$data\]" | grep "'$user'" | awk -F "[" '{print $2}' | awk -F "]" '{print $1}' > /tmp/tmp2
# cat /tmp/users | grep "'$user'" | grep -v grep | grep -v pts > /tmp/tmp2
s1ssh="$(cat /tmp/tmp2 | wc -l)"
tput setaf 3 ; tput bold ; printf ' %-35s%s\n' $user $s1ssh/$s2ssh; tput sgr0
if [ "$s1ssh" -gt "$s2ssh" ]; then
tput setaf 7 ; tput setab 1 ; tput bold ; echo " Usuário desconectado por ultrapassar el limite!" ; tput sgr0
while read line
do
tmp="$(echo $line | cut -d' ' -f1)"
kill $tmp
done < /tmp/tmp2
rm /tmp/tmp2
fi
fi
done < "$database"
echo ""
echo -e "\e[1;32m Para salir precione Ctrl + C\e[1;0m"
sleep 5
done
}
monitordropbear () {
database="/root/usuarios.db"
echo $$ > /tmp/kids
while true
do
# data=( `ps aux | grep -i dropbear | awk '{print $2}'`);
# for PID in "${data[@]}"
# do
# #echo "check $PID";
# NUM1=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | wc -l`;
# USER=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | awk '{print $10}'`;
# IP=`cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$PID\]" | awk '{print $12}'`;
# if [ $NUM1 -eq 1 ]; then
# echo "$PID $USER" >> /tmp/users;
# else
# touch /tmp/users;
# fi
# done
clear
tput setaf 7 ; tput setab 1 ; tput bold ; printf '%29s%s%-20s\n' "Dropbear Monitor"
tput setaf 7 ; tput setab 1 ; printf ' %-15s%-16s%s\n' "Usuário" "contraseña" "Conexión / Límite " ; echo "" ; tput sgr0
while read usline
do
user="$(echo $usline | cut -d' ' -f1)"
s2ssh="$(echo $usline | cut -d' ' -f2)"
if [ -z "$user" ] ; then
echo "" > /dev/null
else
if [ -f ~/.Menu/.users/passwd/$user ]; then
passwd=$(cat ~/.Menu/.users/passwd/$user)
else
passwd="null"
fi
data="$(ps aux | grep -i dropbear | awk '{print $2}')" ;
cat /var/log/auth.log | grep -i dropbear | grep -i "Password auth succeeded" | grep "dropbear\[$data\]" | grep "'$user'" | awk -F "[" '{print $2}' | awk -F "]" '{print $1}' > /tmp/tmp8
# cat /tmp/users | grep "'$user'" | grep -v grep | grep -v pts > /tmp/tmp8
s1ssh="$(cat /tmp/tmp8 | wc -l)"
tput setaf 3 ; tput bold ; printf ' %-14s%-22s%s\n' $user $passwd $s1ssh/$s2ssh; tput sgr0
fi
done < "$database"
echo ""
# rm /tmp/users
echo -e "\e[1;32mPresiona enter para continuar...\e[1;0m"
read foo
break
done
}
autokill () {
clear
echo -e "Autokill: \e[1;31m"
bash .autokill.sh
}
while :
do
onoff
cd ..
bash .head.sh
cd .users
echo -e "\e[1;32mEscoja una opcion "
echo
#echo -e "\e[1;31m[1]\e[1;32m Usuarios conectados"
echo -e "\e[1;31m[1]\e[1;32m Crear usuario"
echo -e "\e[1;31m[2]\e[1;32m Redefinir usuario"
echo -e "\e[1;31m[3]\e[1;32m Eliminar usuario"
echo -e "\e[1;31m[4]\e[1;32m Monitor SSH"
echo -e "\e[1;31m[5]\e[1;32m Monitor Dropbear"
#echo -e "\e[1;31m[6]\e[1;32m Lista de usuarios"
echo -e "\e[1;31m[6]\e[1;32m Desconectar usarios de mas en SSH"
echo -e "\e[1;31m[7]\e[1;32m Desconectar usarios de mas en Dropbear"
echo -e "\e[1;31m[8]\e[1;32m Menu autodesconectar users"
echo -e "\e[1;31m[9]\e[1;32m Autoeliminar users expirados $stateexp"
echo -e "\e[1;31m[10]\e[1;32m Desconectar todos los usuarios"
echo -e "\e[1;31m[0]\e[1;32m Salir"
echo
echo -n "Seleccione una opcion [1 - 10]: "
read opcion
case $opcion in
#1)
#users;;
1)
crearuser;;
2)
redefiniruser;;
3)
userdelete;;
4)
monitorssh;;
5)
monitordropbear;;
#6)
#userlist;;
6)
killmulti;;
7)
killmultidbr;;
8)
autokill;;
9)
fechaexp;;
10)
killusers;;
69)
userkill;;
70)
userkill2;;
0) clear;
exit 1;;
*) clear;
echo -e "\e[1;31mEs una opcion invalida:";
echo -e "\e[1;32mPresiona enter para continuar...";
read foo;;
esac
done
| true
|
0190715f5fc45faf5186caec7a3f2a7e3fd6b1a1
|
Shell
|
LABBIZ/aich02
|
/setup.sh
|
UTF-8
| 1,389
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
SYNC_DIR="/vagrant"
apt-get update
apt-get -y upgrade
apt-get -y install wget
apt-get -y install --no-install-recommends ccache cmake curl g++ make unzip git
apt-get -y install python3 python3-dev python3-pip python3-setuptools python3-virtualenv python3-numpy python3-scipy python3-yaml python3-h5py python-six
apt-get -y install libhdf5-dev libarchive-dev
pip3 install --upgrade pip
pip3 install tensorflow
pip3 install keras
pip3 install http://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp35-cp35m-manylinux1_x86_64.whl
pip3 install torchvision
pip3 install nnabla
curl -L https://github.com/google/protobuf/archive/v3.1.0.tar.gz -o protobuf-v3.1.0.tar.gz
tar xvf protobuf-v3.1.0.tar.gz
cd protobuf-3.1.0
mv BUILD _BUILD
mkdir build && cd build
cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_BUILD_TESTS=OFF ../cmake
make
sudo make install
cd ${SYNC_DIR}
git clone https://github.com/gabime/spdlog.git
git checkout v0.13.0
cp -a spdlog/include/spdlog /usr/include/spdlog
git clone https://github.com/sony/nnabla.git
cd nnabla
git checkout v0.9.4
mkdir build && cd build
cmake .. -DBUILD_CPP_UTILS=ON -DBUILD_PYTHON_API=OFF
make
make install
cd ${SYNC_DIR}/nnabla/examples/vision/mnist
python3 classification.py
cd ${SYNC_DIR}/nnabla/examples/cpp/mnist_runtime
python3 save_nnp_classification.py
make
| true
|
b6646c730d245e3e2c1858613617e118bfad75fe
|
Shell
|
juanino/pyuverse
|
/bin/start_daemon.sh
|
UTF-8
| 470
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# modify with your install location
INSTALLROOT=/root/appliance/pyuverse/bin/
if [ -x ${INSTALLROOT}/inid_stats.py ] ; then
echo starting up ${INSTALLROOT}/inid_stats.py
else
echo Could not find ${INSTALLROOT}/inid_stats.py
echo please check the value of INSTALLROOT in start_daemon.sh
fi
daemon ${INSTALLROOT}/inid_stats.py --stdout /var/log/inid_stats.log --stderr /var/log/inid_stats.log --output /var/log/inid_stats.log
beep -f 2000 -r 4
| true
|
caa5ccf00821628678e02d39e7caadece8919263
|
Shell
|
cpuu/afl-fuzzing-training
|
/workshop/_shared_/run_parallel_fuzz.sh
|
UTF-8
| 3,776
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
### PARAMS
if [ $# -lt 7 ];
then echo "illegal number of parameters : $0 project_name test_dir (or - for continue) findings_dir fuzz_label afl_fuzz_exe fuzzed_exec nr_fuzzers [fuzz_label afl_fuzz_exe fuzzed_exec nr_fuzzers]*"
exit;
fi
PROJECT=$1
EXEC=test_$PROJECT.exe
EXEC_ASAN=test_"$PROJECT"_asan.exe
TESTCASE_DIR=$2
FINDINGS_DIR=$3
###
FUZZ_LABEL=$4
AFL_FUZZ=$5
FUZZ_EXEC=$6
NR_FUZZERS=$7
echo "[*] Starting master"
# echo "screen -dm -S afl_"$PROJECT"_$FUZZ_LABEL $AFL_FUZZ -i $TESTCASE_DIR -o $FINDINGS_DIR -M fuzzer_"$PROJECT"_$FUZZ_LABEL ./$FUZZ_EXEC @@"
set -x
# screen -dm -S afl_"$PROJECT"_$FUZZ_LABEL $AFL_FUZZ -i $TESTCASE_DIR -Q -o $FINDINGS_DIR -M fuzzer_"$PROJECT"_$FUZZ_LABEL ./$FUZZ_EXEC
screen -dm -S afl_"$PROJECT"_$FUZZ_LABEL $AFL_FUZZ -i $TESTCASE_DIR -o $FINDINGS_DIR -M fuzzer_"$PROJECT"_$FUZZ_LABEL ./$FUZZ_EXEC @@
set +x
if [ $# -ge 11 ];
then
FUZZ_LABEL=$8
AFL_FUZZ=$9
FUZZ_EXEC=${10}
NR_FUZZERS=${11}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC @@
set +x
done
fi
if [ $# -ge 15 ];
then
FUZZ_LABEL=${12}
AFL_FUZZ=${13}
FUZZ_EXEC=${14}
NR_FUZZERS=${15}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -m none -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC @@
set +x
done
fi
if [ $# -ge 19 ];
then
FUZZ_LABEL=${16}
AFL_FUZZ=${17}
FUZZ_EXEC=${18}
NR_FUZZERS=${19}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -m none -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC @@
set +x
done
fi
if [ $# -ge 23 ];
then
FUZZ_LABEL=${20}
AFL_FUZZ=${21}
FUZZ_EXEC=${22}
NR_FUZZERS=${23}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC @@
set +x
done
fi
if [ $# -ge 27 ];
then
FUZZ_LABEL=${24}
AFL_FUZZ=${25}
FUZZ_EXEC=${26}
NR_FUZZERS=${27}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC @@
set +x
done
fi
if [ $# -ge 31 ];
then
FUZZ_LABEL=${28}
AFL_FUZZ=${29}
FUZZ_EXEC=${30}
NR_FUZZERS=${31}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC @@
set +x
done
fi
if [ $# -ge 35 ];
then
FUZZ_LABEL=${32}
AFL_FUZZ=${33} # QEmu
FUZZ_EXEC=${34}
NR_FUZZERS=${35}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i $AFL_FUZZ -Q -i $TESTCASE_DIR -o $FINDINGS_DIR -S fuzzer_"$PROJECT"_"$FUZZ_LABEL"-$i ./$FUZZ_EXEC
set +x
done
fi
if [ $# -ge 39 ];
then
FUZZ_LABEL=${36}
AFL_FUZZ=${37} # driller
FUZZ_EXEC=${38}
NR_FUZZERS=${39}
for ((i=1;i<=NR_FUZZERS;i++));
do
echo "[*] Starting $i slave $FUZZ_LABEL"
set -x
screen -dm -S afl_"$PROJECT"_"$FUZZ_LABEL"-$i python $AFL_FUZZ $FINDINGS_DIR/fuzzer_"$PROJECT"_${32}-$i ./$FUZZ_EXEC
set +x
done
fi
| true
|
0444b540f4abb4f95cda14b752f99e58285dd116
|
Shell
|
garridoo/rhizobiales
|
/bai_2015_leaf.sh
|
UTF-8
| 6,498
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# scripts for 16S data analysis
#
# originally by Ruben Garrido-Oter
# garridoo@mpipz.mpg.de
# exits whenever a function returns 1
set -e
# get path to scripts
scripts_dir=$(dirname $0)
# load config file
source $scripts_dir/config.sh
# load functions
source $scripts_dir/16s.functions.sh
# activate QIIME, etc.
source $scripts_dir/activate.sh
# prepare results directory
working_dir=$working_dir/bai_2015_leaf
data_dir=$data_dir/bai_2015_leaf
mkdir -p $working_dir
rm -f -r $working_dir/*
output=$working_dir/"output.txt"
logfile=$working_dir/"log.txt"
### 454 IRLs (libraries 701, 656 and 838) ###
# parameters
l_list="992_1 992_2 992_3 992_4" # list of library IDs
bc_length=6 # barcode length
phred=20 # min. qual score
qual=25 # min. qual score
bc_err=0 # allowed barcode errors
t_length=315 # trimming length
# demultiplexing and quality trimming for each library
for l in $l_list
do
# initialize lib. results directory
rm -f -r $working_dir/"$l"
# truncating reads to equal length
log "["$l"] truncating reads..."
truncate_fasta_qual_files.py -f $data_dir/"$l".fasta \
-q $data_dir/"$l".qual \
-b $t_length \
-o $working_dir/"$l"/trunc \
&>> $output
mv $working_dir/"$l"/trunc/"$l"_filtered.fasta $working_dir/"$l"/filtered.fasta
mv $working_dir/"$l"/trunc/"$l"_filtered.qual $working_dir/"$l"/filtered.qual
rm -f -r $working_dir/"$l"/trunc
# quality filtering and demultiplexing
log "["$l"] demultiplexing..."
split_libraries.py -f $working_dir/"$l"/filtered.fasta \
-q $working_dir/"$l"/filtered.qual \
-m $data_dir/"$l"_mapping.txt \
-s $qual \
-e $bc_err \
-b $bc_length \
-l $t_length \
-d \
-o $working_dir/"$l"/demux \
&>> $output
mv $working_dir/"$l"/demux/* $working_dir/"$l"
rm -f -r $working_dir/"$l"/demux
# edit barcode label identifier for usearch compatibility
cat $working_dir/"$l"/seqs.fna | \
sed 's/ .*/;/g;s/>.*/&&/g;s/;>/;barcodelabel=/g;s/_[0-9]*;$/;/g' \
>> $working_dir/seqs.fasta
# check sample sizes
sampleSizes $working_dir/"$l"/seqs.fasta \
$data_dir/"$l"_mapping.txt \
$working_dir/"$l"/sample_sizes.txt \
&>> $output
done
# dereplication
log "dereplicating..."
usearch -derep_fulllength $working_dir/seqs.fasta \
-fastaout $working_dir/seqs_unique.fasta \
-sizeout \
&>> $output
# abundance sort and discard singletons
log "sorting by abundance and discarding singletons..."
usearch -sortbysize $working_dir/seqs_unique.fasta \
-fastaout $working_dir/seqs_unique_sorted.fasta \
-minsize $min_size \
&>> $output
# OTU clustering
log "OTU clustering using UPARSE..."
usearch -cluster_otus $working_dir/seqs_unique_sorted.fasta \
-otus $working_dir/otus.fasta \
&>> $output
# chimera detection
log "removing chimeras..."
usearch -uchime_ref $working_dir/otus.fasta \
-db $gold_db \
-strand plus \
-nonchimeras $working_dir/otus_nc.fasta \
-threads $n_cores \
&>> $output
# align sequences to database using PyNAST and remove remaining
log "aligning OTU representative sequences to database..."
align_seqs.py -i $working_dir/otus_nc.fasta \
-t $gg_core_aligned_db \
-p $min_id_aln \
-o $working_dir
# rename OTUs and remove alignment gaps
log "renaming OTUs..."
sed -i 's/-//g' $working_dir/otus_nc_aligned.fasta &>> $output
cat $working_dir/otus_nc_aligned.fasta | \
awk 'BEGIN {n=1}; />/ {print ">OTU_" n; n++} !/>/ {print}' \
>> $working_dir/rep_seqs.fasta
# generate OTU table
log "generating OTU table..."
usearch -usearch_global $working_dir/seqs.fasta \
-db $working_dir/rep_seqs.fasta \
-strand plus \
-id $id_threshold \
-uc $working_dir/read_mapping.uc \
&>> $output
# convert uc file to txt
log "converting uc OTU table file into text format..."
python $usearch_dir/uc2otutab.py $working_dir/read_mapping.uc \
1> $working_dir/otu_table.txt \
2>> $output
# taxonomy assignment
log "taxonomy assignment..."
assign_taxonomy.py -i $working_dir/rep_seqs.fasta \
-r $gg_core_db \
-t $gg_taxonomy \
-m $tax_method \
-o $working_dir/tax \
&>> $output
# cleanup
mv $working_dir/tax/rep_seqs_tax_assignments.txt $working_dir/taxonomy.txt
rm -f -r $working_dir/tax
sed -i 's/; /\t/g' $working_dir/taxonomy.txt
# convert OTU table to biom
log "converting OTU table to QIIME compatible biom format..."
biom convert -i $working_dir/otu_table.txt \
-o $working_dir/otu_table.biom \
--table-type="OTU table" \
--to-json \
&>> $output
# align the representative sequences
log "aligning representative sequences..."
clustalo --seqtype=DNA \
--threads=$n_cores \
-i $working_dir/rep_seqs.fasta \
-o $working_dir/rep_seqs_aligned.fasta \
--full \
&>> $output
# filter the alignment
filter_alignment.py -i $working_dir/rep_seqs_aligned.fasta \
-o $working_dir \
&>> $output
# generate tree from alignment using FastTree
log "generating tree..."
make_phylogeny.py -i $working_dir/rep_seqs_aligned_pfiltered.fasta \
-o $working_dir/rep_seqs.tree \
&>> $output
# normalize OTU table
log "normalizing OTU table using the CSS method..."
$scripts_dir/normalize_otu_table.R $working_dir/otu_table.biom \
$working_dir/otu_table_norm.biom \
&>> $output
# extract normalized OTU table to text format
biom convert -i $working_dir/otu_table_norm.biom \
--table-type="OTU table" \
--to-tsv \
-o $working_dir/otu_table_norm.txt \
&>> $output
sed -i '/# Const/d;s/#OTU ID/OTUId/g' $working_dir/otu_table_norm.txt
log "DONE!"
| true
|
c4f8eb51d7675911e047d6b738ad2ea5ecdd93b8
|
Shell
|
Msrgit/MyVim
|
/My Vim Configuration/prereqs.sh
|
UTF-8
| 516
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
function dbum() {
if [[ -e "$1" ]]; then
mv "$1" "$1.$(date +%y%m%d%H%M%S)"
fi
}
sudo apt-get -y install ctags silversearcher-ag
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install
dbum ~/.vimrc
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
cp .vimrc ~/.vimrc
cp .ctags ~/.ctags
mkdir -p ~/.vim && cp plugins.vim ~/.vim/plugins.vim
mkdir -p ~/.vim/colors
mkdir -p ~/.vim/bundle
cp ./colors/* ~/.vim/colors
vim -c PluginInstall
| true
|
e04f603323a3e7e8e8a347bb1ec7362f7331eeb3
|
Shell
|
yujianjun1025/scala_exercise
|
/mem_fun.scala
|
UTF-8
| 820
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
exec scala "$0" "$@"
!#
import scala.io.Source
object LongLines {
def processFile(filename :String, width:Int){
val source = Source.fromFile(filename)
for(line <- source.getLines()){
processLine(filename, width, line)
}
}
def processLine(filename: String, width : Int, line:String){
if(line.length > width){
println(filename + "\t:" + line.trim)
}
}
def processFile2(filename:String, width:Int){
def processLine2(line:String){
if(line.length > width){
println(filename + ":" + line.trim)
}
}
val source = Source.fromFile(filename)
for( line <- source.getLines()){
processLine2(line)
}
}
}
LongLines.processFile(args(0), args(1).toInt)
println("局部函数测试")
LongLines.processFile2(args(0), args(1).toInt)
| true
|
ffcabb0f094af7fd36784b1bbe294d0e6ed9c9c0
|
Shell
|
UCLeuvenLimburg/advanced-programming-topics
|
/exercises/git/03-branches/04-merge/solution.sh
|
UTF-8
| 499
| 2.5625
| 3
|
[] |
no_license
|
#/usr/bin/env bash
pushd sandbox
git merge feature-1
git merge feature-2
# Resolving conflict; should be done using editor
echo This should be line 1 > file.txt
echo This should be line 3 >> file.txt
git add file.txt
git commit --no-edit
git merge feature-3
echo This should be line 1 > file.txt
echo This should be line 2 >> file.txt
echo This should be line 3 >> file.txt
git add file.txt
git commit --no-edit
git branch -d feature-1
git branch -d feature-2
git branch -d feature-3
popd
| true
|
707e27dc5d67e4c69a14cf12863475e64b0dc008
|
Shell
|
Team-OctOS/platform_vendor_oct
|
/config/oct_overlay
|
UTF-8
| 683
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
# Copyright (C) 2014 Team OctOS
#
# OctOS overlay script created by Treken for copying files to specified directories
#
# For recursive directories a permisions flag "-p" must be used
# For overwriting files the force flag "-f" must be used
#
# Disclaimer:
# We are not responsible if you edit this script and use it for otherthings
# OmniSwitch overlays
cp -f vendor/oct/overlay/common/packages/apps/OmniSwitch/AndroidManifest.xml packages/apps/OmniSwitch/AndroidManifest.xml
mkdir -p packages/apps/OmniSwitch/res/values-holodark
cp vendor/oct/overlay/common/packages/apps/OmniSwitch/res/values-holodark/styles.xml packages/apps/OmniSwitch/res/values-holodark/styles.xml
| true
|
c4a6c86cc88ba849025ab5592c7d7b449c1771e5
|
Shell
|
Globidev/linux-dotfiles
|
/docker/docker.install.custom
|
UTF-8
| 377
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Here I install docker from the binaries as it gives better control over the daemon
DOCKER_PATH="/usr/local/bin/docker"
DOCKER_VERSION="1.11.0"
DOCKER_URL="https://get.docker.com/builds/$(uname -s)/$(uname -m)/docker-$DOCKER_VERSION.tgz"
if [[ ! -e $DOCKER_PATH ]]; then
curl -L $DOCKER_URL | tar xz -C $(dirname $DOCKER_PATH) --strip-component 1
fi
| true
|
1a81021188ad345d7947cdd2be9aedb11b6f14c0
|
Shell
|
amitlaldass/assignment
|
/usermenu
|
UTF-8
| 1,278
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/ksh
#######################################################################
#Author:Amit Kumar
#email ID:amitforjob@gmail.com
#######################################################################
USER=`/usr/bin/logname`
main()
{
while true
do
{
clear
echo "1. Change your passwd"
echo "2. See the Disk space"
echo "3. login to other box using ssh"
echo "4. Show all services running"
echo "5. Show all Ports opened"
echo "6. Show All Java Apps running"
echo "7. Facility to kill an application"
echo "8. exit"
echo "Enter your choice:"|tr -s "\n" " "
read a
case $a in
1)
/usr/bin/passwd
echo "Press enter for the menu"
read
;;
2)
/bin/df
echo "Press enter for the menu"
read
;;
3)
echo "Enter the hostname where you want to enter :"|tr -s "\n" " "
read hostn
/usr/bin/ssh -l $USER $hostn
;;
4)
echo "Following Services are running in the server :"|tr -s "\n" " "
/sbin/service --status-all|grep running
;;
5)
netstat -ntpl|more
;;
6)
ps -ef|grep java
;;
7)
echo "enter the name of the Application you want to kill:"|tr -s "\n" " "
read app
ps -ef|grep $app|grep -v grep|grep -q $app
if [ $? -eq 0 ]
then
kill `ps -ef|grep $app|grep -v grep|awk '{print $2}'`
else
echo "$app is not running on the server"
fi
;;
8)
exit
;;
*)
echo "Enter a valid Choice [1-8]"
;;
esac
}
done
}
main
| true
|
34e49ddd77346f389f108710f75fcee3f70b9dc8
|
Shell
|
sbrk-org/slackbuilds
|
/network/opensmtpd/rc.opensmtpd
|
UTF-8
| 454
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
# Start/stop/restart OpenSMTPD.
# Start OpenSMTPD
opensmtpd_start() {
if [ -x /usr/sbin/smtpd ]
then
echo "Starting OpenSMTPD"
/usr/sbin/smtpd
fi
}
opensmtpd_stop() {
smtpctl stop
}
opensmtpd_restart() {
opensmtpd_stop
sleep 1
opensmtpd_start
}
case "$1" in
'start')
opensmtpd_start
;;
'stop')
opensmtpd_stop
;;
'restart')
opensmtpd_restart
;;
*)
echo "usage $0 start|stop|restart"
esac
| true
|
4518d31bf28563ebb13b4a0af6efaf02b2f163dc
|
Shell
|
Sebastien-HATTON/docker-firefox
|
/run.sh
|
UTF-8
| 868
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
IMAGE=${1:-kurron/docker-firefox:latest}
AUDIO_GROUP_ID=$(cut -d: -f3 < <(getent group audio))
VIDEO_GROUP_ID=$(cut -d: -f3 < <(getent group video))
USER_ID=$(id -u $(whoami))
GROUP_ID=$(id -g $(whoami))
# Need to give the container access to your windowing system
xhost +
CMD="docker run --env HOME=/home/powerless \
--env DISPLAY=unix${DISPLAY} \
--interactive \
--name Firefox \
--net "host" \
--rm \
--tty \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /var/run/docker.sock:/var/run/docker.sock \
--workdir /home/powerless \
--device /dev/snd \
--group-add ${AUDIO_GROUP_ID} \
--group-add ${VIDEO_GROUP_ID} \
${IMAGE}"
echo $CMD
$CMD
| true
|
46a09938c921d81c5ecdc81b39cf22808093fa79
|
Shell
|
BeckResearchLab/ODIn_dataServer
|
/run2
|
UTF-8
| 233
| 2.796875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
cd /Volumes/Drobo1/fred/OD_data
while [ 1 ]
do
odfile=`date | sed -e "s/ /_/g" -e "s/:/_/g" `.OD.txt
echo "(re)starting service w/ output saved to $odfile"
/Volumes/Drobo1/fred/ODv2/dataServer 8578 $odfile 7252
done
| true
|
4339c3bf9f6b33f10d7af428a0de29c9aef48149
|
Shell
|
MusaHami/logo-classification-capstone-project
|
/move_JPEG_files.sh
|
UTF-8
| 861
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
oldpath="/data/Machine_Learning/Machine_Learning_Engineer/capstone_project/openlogo/JPEGImages"
newpath="/data/Machine_Learning/Machine_Learning_Engineer/capstone_project/LogosInTheWild-v2/data"
OIFS="$IFS"
IFS=$'\n'
for file in $(find "$oldpath" -regextype posix-extended -regex '.*img[0-9]+.jpg');
do
#echo "$file"
newfile=$(echo "$file" | sed -r 's/^.*(img.*)/\1/g')
#echo "$newfile"
oldfilename=${file##*/}
#echo "$oldfilename"
brand=$(echo "$oldfilename" | sed -r 's/^(.*)img.*/\1/g')
#echo "$brand"
for D in `find "$newpath" -type d`
do
subfolder=$(echo $D| rev | cut -d'/' -f 1 | rev)
if [ "$(echo "${subfolder^^}" | tr -dc '[:alnum:]')" = "$(echo "${brand^^}" | tr -dc '[:alnum:]')" ]; then
echo mv "$file" "$D/${newfile}"
mv "$file" "$D/${newfile}"
fi
done
done
IFS="$OIFS"
| true
|
aa2a3925f34fb58346caee1f4b4347f22d148e9a
|
Shell
|
bthj/volcano-webcam-to-time-lapse-video
|
/eldgos/make_DV_video/crop_webcam_images_in_range.sh
|
UTF-8
| 384
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
YEAR="2010"
MONTH="03"
for d in {24..31}
do
DAY="$YEAR-$MONTH-$d"
. crop_webcam_image_left.sh
. crop_webcam_image_right.sh
done
MONTH="04"
for d in {01..30}
do
DAY="$YEAR-$MONTH-$d"
. crop_webcam_image_left.sh
. crop_webcam_image_right.sh
done
MONTH="05"
for d in {01..21}
do
DAY="$YEAR-$MONTH-$d"
. crop_webcam_image_left.sh
. crop_webcam_image_right.sh
done
| true
|
aed28d4cf9b779664dd36746e15556e03204dfc7
|
Shell
|
alfunx/.dotfiles
|
/.config/ranger/scope.sh
|
UTF-8
| 5,115
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# ranger supports enhanced previews. If the option "use_preview_script"
# is set to True and this file exists, this script will be called and its
# output is displayed in ranger. ANSI color codes are supported.
# NOTES: This script is considered a configuration file. If you upgrade
# ranger, it will be left untouched. (You must update it yourself.)
# Also, ranger disables STDIN here, so interactive scripts won't work properly
# Meanings of exit codes:
# code | meaning | action of ranger
# -----+------------+-------------------------------------------
# 0 | success | success. display stdout as preview
# 1 | no preview | failure. display no preview at all
# 2 | plain text | display the plain content of the file
# 3 | fix width | success. Don't reload when width changes
# 4 | fix height | success. Don't reload when height changes
# 5 | fix both | success. Don't ever reload
# 6 | image | success. display the image $cached points to as an image preview
# 7 | image | success. display the file directly as an image
# Meaningful aliases for arguments:
path="$1" # Full path of the selected file
width="$2" # Width of the preview pane (number of fitting characters)
height="$3" # Height of the preview pane (number of fitting characters)
cached="$4" # Path that should be used to cache image previews
preview_images="$5" # "True" if image previews are enabled, "False" otherwise.
maxln=200 # Stop after $maxln lines. Can be used like ls | head -n $maxln
# Find out something about the file:
mimetype=$(file --mime-type -Lb "$path")
extension=$(/bin/echo "${path##*.}" | awk '{print tolower($0)}')
# Functions:
# runs a command and saves its output into $output. Useful if you need
# the return value AND want to use the output in a pipe
try() { output=$(eval '"$@"'); }
# writes the output of the previously used "try" command
dump() { /bin/echo "$output"; }
# a common post-processing function used after most commands
trim() { head -n "$maxln"; }
# wraps highlight to treat exit code 141 (killed by SIGPIPE) as success
safepipe() { "$@"; test $? = 0 -o $? = 141; }
# Image previews, if enabled in ranger.
if [ "$preview_images" = "True" ]; then
case "$mimetype" in
# Image previews for SVG files, disabled by default.
###image/svg+xml)
### convert "$path" "$cached" && exit 6 || exit 1;;
# Image previews for image files. w3mimgdisplay will be called for all
# image files (unless overriden as above), but might fail for
# unsupported types.
image/*)
exit 7;;
# Image preview for video, disabled by default.:
###video/*)
### ffmpegthumbnailer -i "$path" -o "$cached" -s 0 && exit 6 || exit 1;;
esac
fi
case "$extension" in
# Archive extensions:
a|ace|alz|arc|arj|bz|bz2|cab|cpio|deb|gz|jar|lha|lz|lzh|lzma|lzo|\
rpm|rz|t7z|tar|tbz|tbz2|tgz|tlz|txz|tZ|tzo|war|xpi|xz|Z|zip)
try als "$path" && { dump | trim; exit 0; }
try acat "$path" && { dump | trim; exit 3; }
try bsdtar -lf "$path" && { dump | trim; exit 0; }
exit 1;;
rar)
# avoid password prompt by providing empty password
try unrar -p- lt "$path" && { dump | trim; exit 0; } || exit 1;;
7z)
# avoid password prompt by providing empty password
try 7z -p l "$path" && { dump | trim; exit 0; } || exit 1;;
# PDF documents:
pdf)
try pdftotext -l 10 -nopgbrk -q "$path" - && \
{ dump | trim | fmt -s -w $width; exit 0; } || exit 1;;
# BitTorrent Files
torrent)
try transmission-show "$path" && { dump | trim; exit 5; } || exit 1;;
# ODT Files
odt|ods|odp|sxw)
try odt2txt "$path" && { dump | trim; exit 5; } || exit 1;;
# HTML Pages:
htm|html|xhtml)
try w3m -dump "$path" && { dump | trim | fmt -s -w $width; exit 4; }
try lynx -dump "$path" && { dump | trim | fmt -s -w $width; exit 4; }
try elinks -dump "$path" && { dump | trim | fmt -s -w $width; exit 4; }
;; # fall back to highlight/cat if the text browsers fail
esac
case "$mimetype" in
# Syntax highlight for text files:
text/* | */xml)
if [ "$(tput colors)" -ge 256 ]; then
pygmentize_format=terminal256
highlight_format=xterm256
else
pygmentize_format=terminal
highlight_format=ansi
fi
try safepipe highlight --out-format=${highlight_format} "$path" && { dump | trim; exit 5; }
try safepipe pygmentize -f ${pygmentize_format} "$path" && { dump | trim; exit 5; }
exit 2;;
# Ascii-previews of images:
image/*)
img2txt --gamma=0.6 --width="$width" "$path" && exit 4 || exit 1;;
# Display information about media files:
video/* | audio/*)
exiftool "$path" && exit 5
# Use sed to remove spaces so the output fits into the narrow window
try mediainfo "$path" && { dump | trim | sed 's/ \+:/: /;'; exit 5; } || exit 1;;
esac
exit 1
| true
|
d8c8e77809c132c67fea1b5608a79e914c256831
|
Shell
|
mortaromarcello/Livedevelop
|
/wheezy/openbox_i386_iso-hybrid/config/hooks/sakis3g.chroot
|
UTF-8
| 343
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
TMP="/tmp/sakis3g"
[[ ! -d ${TMP} ]] && mkdir -p ${TMP}; cd ${TMP}
rm -R -f -v *
git clone http://github.com/mortaromarcello/sakis3g-source
cd sakis3g-source
mkdir build
./compile
cp -v build/sakis3gz /usr/bin/sakis3g
cp -v files/sakis3g.png /usr/share/icons
cp -v files/sakis3g.desktop /usr/share/applications
rm -R -f -v ${TMP}
| true
|
4607c5a996eaa182e2bda96588a592e378b0ffe3
|
Shell
|
smiley-yoyo/v2ray-ws-tls-caddy
|
/entry.sh
|
UTF-8
| 1,760
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
domain="$1"
path="$2"
uuid="$3"
psname="v2ray-ws"
if [ ! "$uuid" ] ;then
if [ -f "/root/.v2ray/uuid" ]; then
uuid=`cat /root/.v2ray/uuid`
else
uuid=$(uuidgen)
echo "使用随机生成的UUID: ${uuid}"
fi
fi
mkdir -p /root/.v2ray
echo ${uuid} > /root/.v2ray/uuid
mkdir -p /etc/caddy
mkdir -p /etc/v2ray
# config for caddy
cat << EOF > /etc/caddy/Caddyfile
${domain}
{
log ./caddy.log
proxy /${path} :4567 {
websocket
header_upstream -Origin
}
errors {
404 err.html
}
}
EOF
# config for v2ray
cat << EOF > /etc/v2ray/config.json
{
"inbounds": [
{
"port": 4567,
"protocol": "vmess",
"settings": {
"clients": [
{
"id": "${uuid}",
"alterId": 64
}
]
},
"streamSettings": {
"network": "ws",
"wsSettings": {
"path": "/${path}"
}
}
}
],
"outbounds": [
{
"protocol": "freedom",
"settings": {}
}
]
}
EOF
cat << EOF > /srv/sebs.js
{
"add":"${domain}",
"aid":"0",
"host":"",
"id":"${uuid}",
"net":"ws",
"path":"/${path}",
"port":"443",
"ps":"${psname}",
"tls":"tls",
"type":"none",
"v":"2"
}
EOF
cat /srv/clientconfig.json \
| sed "s/@@domain/${domain}/g" \
| sed "s/@@path/${path}/g" \
| sed "s/@@uuid/${uuid}/g" \
> /root/.v2ray/client.json
nohup /bin/parent caddy -conf="/etc/caddy/Caddyfile" --log stdout --agree=false &
echo "-----------------客户端配置JSON--------------------" > /root/.v2ray/output.txt
cat /root/.v2ray/client.json >> /root/.v2ray/output.txt
node showconfig.js >> /root/.v2ray/output.txt
cat /root/.v2ray/output.txt
/usr/bin/v2ray -config /etc/v2ray/config.json
| true
|
5bd3c4be5cb417218356ddfc80f400954b397a8e
|
Shell
|
mcLaTrends/Forecast-POC
|
/deploy_utils/checkVersion.sh
|
UTF-8
| 1,623
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]
then
echo "serviceName (service name) not supplied"
exit 1
fi
if [ -z "$2" ]
then
echo "serviceVersion (desired version number) not supplied"
exit 1
fi
if [ -z "$3" ]
then
echo "teamAccountNumber (desired team account number) not supplied"
exit 1
fi
serviceName=$1
serviceVersion=$2
teamAccountNumber=$3
s3VersionPath="s3://service.${teamAccountNumber}.sd.com/version/"
s3VersionQuery="aws s3 cp ${s3VersionPath}${serviceName} temp --only-show-errors"
${s3VersionQuery}
if [ ! -f temp ]
then
echo "Cannot access S3 version bucket"
exit 1
else
serviceDeployedVersion=$(cat temp)
rm temp
fi
#serviceGreatestVersion holds the greatest of serviceDeployedVersion and serviceVersion
serviceGreatestVersion=$(printf "${serviceVersion}\n${serviceDeployedVersion}" | sort -Vr | head -1)
if [ ${serviceVersion} = ${serviceDeployedVersion} ];
then
echo "Error: The desired version number, ${serviceVersion} is identical to the current version number ${serviceDeployedVersion} present at ${s3ServiceVersionPath}${serviceName}."
exit 1
elif [ ${serviceVersion} = ${serviceGreatestVersion} ];
then
echo "Success: The desired version number, ${serviceVersion} is greater than the current version number ${serviceDeployedVersion} present at ${s3ServiceVersionPath}${serviceName}."
exit 0
elif [ ${serviceVersion} != ${serviceGreatestVersion} ];
then
echo "Error: The desired version number, ${serviceVersion} is less than the current version number ${serviceDeployedVersion} present at ${s3ServiceVersionPath}${serviceName}."
exit 1
fi
| true
|
a25e8635b263ca30c2f97e4a38e6a56eb6439c0c
|
Shell
|
eeayiaia/scmt
|
/resources/plugins.d/hadoop/master.init.d/10--add-master-hosts.sh
|
UTF-8
| 323
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#Script runs once to add hadoop-master to /etc/hosts
#Get script directory
DIR="${BASH_SOURCE%/*}"
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
. "$DIR/../../../scripts.d/utils.sh" || exit 1
. "$DIR/../resources/config" || exit 1
#Set master ip to /etc/hosts
echo "$MASTER_NODE hadoop_master" >> /etc/hosts
| true
|
ff07edfa1f60eabab10ad03f1bdec9bbe5d67874
|
Shell
|
hpcc-docker-kubernetes/HPCC-Docker-Ansible
|
/hpcc-tools/config_hpcc.sh
|
UTF-8
| 9,338
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(dirname $0)
function usage()
{
cat <<EOF
Usage: $(basename $0) <options>
<options>:
-u: update mode. It will only re-create dali/thor master environment.xml
and environment.xml with real ip. Re-generate ansible host file,
run updtdalienv and restart thor master.
EOF
exit 1
}
function create_ips_string()
{
IPS=
[ ! -e "$1" ] && return
while read ip
do
ip=$(echo $ip | sed 's/[[:space:]]//g')
[ -n "$ip" ] && IPS="${IPS}${ip}\\;"
done < $1
}
function create_envxml()
{
[ -e ${roxie_ips} ] && roxie_nodes=$(cat ${roxie_ips} | wc -l)
[ -e ${esp_ips} ] && esp_nodes=$(cat ${esp_ips} | wc -l)
[ -z "$roxie_nodes" ] && roxie_nodes=0
[ -z "$esp_nodes" ] && esp_nodes=0
cmd="$SUDOCMD ${HPCC_HOME}/sbin/envgen -env ${CONFIG_DIR}/${ENV_XML_FILE} \
-override roxie,@copyResources,true \
-override roxie,@roxieMulticastEnabled,false \
-override thor,@replicateOutputs,true \
-override esp,@method,htpasswd \
-override thor,@replicateAsync,true \
-thornodes ${thor_nodes} -slavesPerNode ${slaves_per_node} \
-espnodes ${esp_nodes} -roxienodes ${roxie_nodes} \
-supportnodes ${support_nodes} -roxieondemand 1"
if [ -n "$1" ]
then
#dafilesrv
cmd="$cmd -assign_ips $1 ."
fi
cmd="$cmd -ip $dali_ip"
if [ $thor_nodes -gt 0 ]
then
create_ips_string ${thor_ips}
cmd="$cmd -assign_ips thor ${dali_ip}\;${IPS}"
fi
if [ $roxie_nodes -gt 0 ]
then
create_ips_string ${roxie_ips}
cmd="$cmd -assign_ips roxie $IPS"
fi
if [ $esp_nodes -gt 0 ]
then
create_ips_string ${esp_ips}
cmd="$cmd -assign_ips esp $IPS"
fi
echo "$cmd"
eval "$cmd"
}
function collect_ips()
{
if [ -z "$1" ] || [ "$1" != "-x" ]
then
trials=3
while [ $trials -gt 0 ]
do
${SCRIPT_DIR}/get_ips.sh
${SCRIPT_DIR}/get_ips.py
[ $? -eq 0 ] && break
trials=$(expr $trials \- 1)
sleep 5
done
fi
}
function setup_ansible_hosts()
{
${SCRIPT_DIR}/ansible/setup.sh -d /tmp/ips -c /tmp/hpcc.conf
export ANSIBLE_HOST_KEY_CHECKING=False
}
function restore_hpcc_config()
{
if [ ! -e /etc/HPCCSystems/environment.conf ]; then
cp -r /etc/HPCCSystems.bk/* /etc/HPCCSystems/
chown -R hpcc:hpcc /etc/HPCCSystems/
fi
if [ $NUM_ROXIE_LB -gt 0 ]; then
for i in $(seq 1 ${NUM_ROXIE_LB})
do
if [ ! -e /etc/HPCCSystems/roxie/${i}/environment.conf ]; then
cp -r /etc/HPCCSystems.bk/* /etc/HPCCSystems/roxie/${i}/
chown -R hpcc:hpcc /etc/HPCCSystems/roxie/${i}
fi
done
fi
if [ ! -e /etc/HPCCSystems/esp/environment.conf ]; then
cp -r /etc/HPCCSystems.bk/* /etc/HPCCSystems/esp/
chown -R hpcc:hpcc /etc/HPCCSystems/esp
fi
if [ -d /etc/HPCCSystems/thor ] && [ ! -e /etc/HPCCSystems/thor/environment.conf ]; then
cp -r /etc/HPCCSystems.bk/* /etc/HPCCSystems/thor/
chown -R hpcc:hpcc /etc/HPCCSystems/thor
fi
}
function set_hpcc_data_owner()
{
ansible-playbook /opt/hpcc-tools/ansible/set_hpcc_owner.yaml --extra-vars "hosts=roxie"
}
function stop_hpcc()
{
ansible-playbook /opt/hpcc-tools/ansible/stop_hpcc.yaml --extra-vars "hosts=non-dali"
ansible-playbook /opt/hpcc-tools/ansible/stop_hpcc.yaml --extra-vars "hosts=dali"
}
function start_hpcc()
{
ansible-playbook /opt/hpcc-tools/ansible/start_hpcc.yaml --extra-vars "hosts=dali"
ansible-playbook /opt/hpcc-tools/ansible/start_hpcc.yaml --extra-vars "hosts=non-dali"
}
function get_lb_ips()
{
lb_ips=/etc/ansible/lb-ips
[ ! -d ${lb_ips} ] && cp -r /etc/ansible/ips $lb_ips
if [ -e ${lb_ips}/roxie ]
then
if [ $NUM_ROXIE_LB -gt 0 ]
then
#rm -rf ${lb_ips}/roxie
#touch ${lb_ips}/roxie
#for i in $(seq 1 $NUM_ROXIE_LB)
#do
# lb_ip=ROXIE${i}_SERVICE_HOST
# eval lb_ip=\$$lb_ip
# [ -n "$lb_ip" ] && echo ${lb_ip} >> ${lb_ips}/roxie
#done
cp /tmp/lb-ips/roxie ${lb_ips}/
fi
fi
if [ -e ${lb_ips}/thor ]
then
if [ -n "$NUM_THOR_SV" ] && [ $NUM_THOR_SV -gt 0 ]
then
#rm -rf ${lb_ips}/thor
#touch ${lb_ips}/thor
#for i in $(seq 1 $NUM_THOR_SV)
#do
# padded_index=$(printf "%04d" $i)
# lb_ip=THOR${padded_index}_SERVICE_HOST
# eval lb_ip=\$$lb_ip
# [ -n "$lb_ip" ] && echo ${lb_ip} >> ${lb_ips}/thor
#done
cp /tmp/lb-ips/thor ${lb_ips}/
fi
fi
#[ -e ${lb_ips}/esp ] && [ -n "$ESP_SERVICE_HOST" ] && echo ${ESP_SERVICE_HOST} > ${lb_ips}/esp
[ -e ${lb_ips}/esp ] && [ -s "/tmp/lb-ips/esp" ] && cp /tmp/lb-ips/esp ${lb_ips}/
}
function set_vars_for_envgen()
{
HPCC_HOME=/opt/HPCCSystems
ENV_XML_FILE=environment.xml
if [ -n "$NUM_THOR_SV" ] && [ $NUM_THOR_SV -gt 0 ]
then
thor_ips=/etc/ansible/lb-ips/thor
else
thor_ips=/etc/ansible/ips/thor
fi
[ -e ${thor_ips} ] && thor_nodes=$(cat ${thor_ips} | wc -l)
support_nodes=1
slaves_per_node=1
[ -n "$SLAVES_PER_NODE" ] && slaves_per_node=${SLAVES_PER_NODE}
[ -z "$thor_nodes" ] && thor_nodes=0
}
function create_envxml_with_lb()
{
CONFIG_DIR=/etc/HPCCSystems
if [ -n "$USE_SVR_IPS" ] && [ $USE_SVR_IPS -eq 0 ]
then
roxie_ips=/etc/ansible/ips/roxie
esp_ips=/etc/ansible/ips/esp
else
roxie_ips=${lb_ips}/roxie
esp_ips=${lb_ips}/esp
fi
create_envxml
chown -R hpcc:hpcc $CONFIG_DIR
}
function create_envxml_with_real_ips()
{
esp_ips=/etc/ansible/ips/esp
roxie_ips=/etc/ansible/ips/roxie
thor_ips=/etc/ansible/ips/thor
CONFIG_DIR=/etc/HPCCSystems/real
[ ! -d $CONFIG_DIR ] && cp -r /etc/HPCCSystems.bk $CONFIG_DIR
create_envxml
chown -R hpcc:hpcc $CONFIG_DIR
CONFIG_DIR=/etc/HPCCSystems
}
# This is only needed if thor using proxy service
function create_envxml_for_thor()
{
if [ -n "$NUM_THOR_SV" ] && [ $NUM_THOR_SV -gt 0 ]
then
CONFIG_DIR=/etc/HPCCSystems/thor
cp /etc/HPCCSystems/environment.xml ${CONFIG_DIR}/
create_envxml dafilesrv
chown -R hpcc:hpcc $CONFIG_DIR
fi
}
function create_envxml_for_esp()
{
CONFIG_DIR=/etc/HPCCSystems/esp
cp /etc/HPCCSystems/environment.xml ${CONFIG_DIR}/
esp_svc_ip=$(cat ${lb_ips}/esp)
if [ -n "$esp_svc_ip" ]; then
sed "s/${esp_svc_ip}/\./g" /etc/HPCCSystems/environment.xml > ${CONFIG_DIR}/environment.xml
fi
chown -R hpcc:hpcc $CONFIG_DIR
}
function create_envxml_for_roxie()
{
if [ -z "$NUM_ROXIE_LB" ] || [ $NUM_ROXIE_LB -le 0 ]
then
cp -r /etc/HPCCSystems/environment.xml /etc/HPCCSystems/roxie/
chown hpcc:hpcc /etc/HPCCSystems/roxie/environment.xml
return
fi
for i in $(seq 1 ${NUM_ROXIE_LB})
do
CONFIG_DIR=/etc/HPCCSystems/roxie/${i}
roxie_svc_ip=$(cat ${lb_ips}/roxie | head -n ${i} | tail -n 1 )
sed "s/${roxie_svc_ip}/\./g" /etc/HPCCSystems/environment.xml > ${CONFIG_DIR}/environment.xml
chown -R hpcc:hpcc $CONFIG_DIR
done
}
#------------------------------------------
# Need root or sudo
#
SUDOCMD=
[ $(id -u) -ne 0 ] && SUDOCMD=sudo
#------------------------------------------
# LOG
#
LOG_DIR=/log/hpcc-tools
mkdir -p $LOG_DIR
LONG_DATE=$(date "+%Y-%m-%d_%H-%M-%S")
LOG_FILE=${LOG_DIR}/config_hpcc_${LONG_DATE}.log
touch ${LOG_FILE}
exec 2>$LOG_FILE
set -x
update=0
while getopts "*hu" arg
do
case $arg in
h) usage
;;
u) update=1
;;
?)
echo "Unknown option $OPTARG"
usage
;;
esac
done
echo "update mode: $update"
#----------------------------------------o
# Start sshd
#
ps -efa | grep -v sshd | grep -q sshd
[ $? -ne 0 ] && $SUDOCMD mkdir -p /var/run/sshd; $SUDOCMD /usr/sbin/sshd -D &
#------------------------------------------
# Collect conainters' ips
#
collect_ips
#------------------------------------------
# Create HPCC components file
#
hpcc_config=$(ls /tmp/ips | tr '\n' ',')
echo "cluster_node_types=${hpcc_config%,}" > /tmp/hpcc.conf
#backup
[ -d /etc/HPCCSystems/ips ] rm -rf /etc/HPCCSystems/ips
cp -r /tmp/ips /etc/HPCCSystems/
[ -d /etc/HPCCSystems/lb-ips ] rm -rf /etc/HPCCSystems/lb-ips
cp -r /tmp/lb-ips /etc/HPCCSystems/
cp /tmp/hpcc.conf /etc/HPCCSystems/
#------------------------------------------
# Setup Ansible hosts
#
setup_ansible_hosts
dali_ip=$(cat /etc/ansible/ips/dali)
if [ $update -eq 0 ]
then
restore_hpcc_config
set_hpcc_data_owner
stop_hpcc
get_lb_ips
fi
set_vars_for_envgen
create_envxml_with_lb
if [ -z "$USE_SVR_IPS" ] || [ $USE_SVR_IPS -ne 0 ]
then
if [ $update -eq 0 ]
then
create_envxml_for_roxie
create_envxml_for_esp
create_envxml_for_thor
fi
create_envxml_with_real_ips
else
ansible-playbook /opt/hpcc-tools/ansible/push_env.yaml --extra-vars "hosts=hpcc"
fi
if [ $update -eq 0 ]
then
ansible-playbook /opt/hpcc-tools/ansible/refresh_dali.yaml
start_hpcc
else
ansible-playbook /opt/hpcc-tools/ansible/refresh_dali.yaml
ansible-playbook /opt/hpcc-tools/ansible/start_thor.yaml --extra-vars "hosts=dali"
fi
set +x
echo "$SUDOCMD /opt/HPCCSystems/sbin/configgen -env /etc/HPCCSystems/environment.xml -listall2"
$SUDOCMD /opt/HPCCSystems/sbin/configgen -env /etc/HPCCSystems/environment.xml -listall2
echo "HPCC cluster configuration is done."
| true
|
5ee84544df3e6f98ea772a4b576d092f144556f4
|
Shell
|
tungtran3012/Linux2018
|
/GK-De1/bai2.sh
|
UTF-8
| 403
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
f2()
{
for i in $*
do
bool=1
if [ $i -eq 0 ]
then
echo "0 khong la so nguyen to"
elif [ $i -eq 1 ]
then
echo "1 la so nguyen to"
else
for((j=2;j<=(i/2);j++))
do
#var=`expr $i % $j`
if [ $(($i%$j)) -eq 0 ]
then
echo "$i khong la so nguyen to"
break
fi
done
if [ $bool -eq 1 ]
then
echo "$i la so nguyen to"
fi
fi
done
}
f2 1 2 3 4 5 6 7
| true
|
85929ec674a3783e199055f4536656ab6855991e
|
Shell
|
ch1huizong/study
|
/lang/shl/abs/10/match_string_10-27.sh
|
UTF-8
| 369
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# 简单的字符串匹配
match_string(){
MATCH=0
NOMATCH=90
PARAMS=2
BAD_PARAMS=91
[ $# -eq $PARAMS ] || return $BAD_PARAMS
case "$1" in
"$2") return $MATCH;;
*) return $NOMATCH;;
esac
}
a=one
b=two
c=three
d=two
match_string $a
echo $?
match_string $a $b
echo $?
match_string $b $d
echo $?
exit 0
| true
|
3c7bfbc32abce20e706ade9270863fab9a2bd546
|
Shell
|
saavuio/s_nuxt_2nd
|
/init.sh
|
UTF-8
| 1,554
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
S_BASE_ROOT="$(cd "$(dirname "$0")" && cd .. && pwd)"
cd $S_BASE_ROOT
if [ "$(basename $(echo $S_BASE_ROOT))" != "s_base" ]; then
echo "Can't run from this directory."
exit 1
fi
TARGET_SHA=$1
function base_fetch {
NAME=$1
VERSION=$2
rm -rf ./${NAME}
rm -rf ./${NAME}_cache
# UNCOMMENT FOR REMOTE SETUP (default)
git clone --single-branch -b $VERSION https://github.com/saavuio/$NAME
# UNCOMMENT AND CORRECT FOR LOCAL SETUP (development)
# LOCAL_PATH=/path/to/s_nuxt_2nd
# cp -a ${LOCAL_PATH} ./$NAME
# rm -rf ./$NAME/base/node_modules
if [ ! -z "$TARGET_SHA" ]; then
cd ${NAME}
git -c advice.detachedHead=false checkout $TARGET_SHA
cd ..
CACHE_BRANCH=main-repo-sha-$TARGET_SHA
else
CACHE_BRANCH=$VERSION
fi
# UNCOMMENT FOR REMOTE SETUP (default)
git clone --single-branch -b $CACHE_BRANCH https://github.com/saavuio/${NAME}_cache
# UNCOMMENT FOR LOCAL SETUP (development)
# mv ./$NAME/base/node_modules_cache ${NAME}_cache
cp ${NAME}_cache/node_modules.tar.bz2 ${NAME}/base
S_BASE_NAME=$NAME S_BASE_VERSION=$VERSION PROJECT_ROOT_PATH=.. \
./$NAME/scripts/after_base_fetch.sh
}
function base_build {
NAME=$1
VERSION=$2
S_BASE_NAME=$NAME S_BASE_VERSION=$VERSION PROJECT_ROOT_PATH=.. \
./$NAME/scripts/base_build.sh
S_BASE_NAME=$NAME S_BASE_VERSION=$VERSION PROJECT_ROOT_PATH=.. \
./$NAME/scripts/after_base_build.sh
}
# -- s_nuxt_2nd
if [ ! -d s_nuxt_2nd ] || [ -z $OBF ]; then
base_fetch "s_nuxt_2nd" "v3"
base_build "s_nuxt_2nd" "v3"
fi
| true
|
d155173a0d28919a63b524097381ea240f16247c
|
Shell
|
priomsrb/trigger-await
|
/await
|
UTF-8
| 577
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
triggerDir=~/.trigger-await
triggerName=$1
startTime=$(date +%s)
echo Waiting for trigger $1. Press ENTER to skip...
while true
do
currentTime=$(date +%s)
timeSinceStart=$(expr $currentTime - $startTime)
# Check if the trigger file was modified after we started awaiting
triggerFileUpdated=$(find $triggerDir/$triggerName -newermt "-$(echo $timeSinceStart) seconds" 2>/dev/null)
if [[ "$triggerFileUpdated" ]]; then
break
fi
# Wait 1 second for enter to be pressed
read -t 1
# Exit if enter was pressed
if [[ $? == 0 ]]; then exit; fi
done
| true
|
e902b3bdb0418be725926c71f5df6438506a94ba
|
Shell
|
Hongyang449/scVDJ_seq
|
/analysis/muscle.sh
|
UTF-8
| 596
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# muscle alignment
START=$(date)
# muscle alignment
mkdir -p ./muscle_fastaout
mkdir -p ./muscle_htmlout
mkdir -p ./muscle_fastaout_nowrap
for FILE in ./fa_ROW.COL.DV/ROW*.fa
do
muscle -in $FILE -fastaout ./muscle_fastaout/$(basename $FILE .fa).afa -htmlout ./muscle_htmlout/$(basename $FILE .fa).html
done
echo "nowrap.afa"
for FILE in ./muscle_fastaout/ROW*.afa
do
cat $FILE | seqkit seq -w 0 > ./muscle_fastaout_nowrap/$(basename $FILE .afa).fa
done
# DONE
END=$(date)
echo "***DONE"
echo "$START - $END"
#Sat Jan 27 16:20:21 EST 2018 - Sat Jan 27 17:02:00 EST 2018
| true
|
848385af0e0fb19654f03c6cdb36950e4311c069
|
Shell
|
Mahoney/circleci-docker-openjdk-node
|
/bash_libs/docker.sh
|
UTF-8
| 255
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function calc_docker_image {
local docker_registry=${DOCKER_REGISTRY:-""}
if [ -z "$docker_registry" ]; then
echo -n "$DOCKER_REPO/$DOCKER_ARTIFACT"
else
echo -n "$docker_registry/$DOCKER_REPO/$DOCKER_ARTIFACT"
fi
}
| true
|
ef1779ef86ca7c5511b96087431c54c87f8c661a
|
Shell
|
ShalokShalom/plan.sh
|
/bluez-firmware/plan.sh
|
UTF-8
| 427
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_origin=cosmos
pkg_name=bluez-firmware
pkg_version=1.2
pkg_description="Firmware for Broadcom BCM203x and STLC2300 Bluetooth chips"
pkg_upstream_url="http://www.bluez.org/"
pkg_license=('GPL2')
pkg_source=("http://bluez.sf.net/download/${pkg_name}-${pkg_version}.tar.gz")
pkg_shasum=('1cc3cefad872e937e05de5a0a2b390dd')
do_build() {
./configure --prefix=/usr
}
do_package() {
make DESTDIR=${pkg_prefix} install
}
| true
|
310605b46a38e5eef7b116f0f9821b6664f90163
|
Shell
|
he8us/DAS
|
/devtools/devbox/commands/show_help.sh
|
UTF-8
| 920
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
show_help(){
cat <<EOF
Usage: ${0##*/} [-hv] [-e ENVIRONMENT] COMMAND
Options
-h,--help display this help and exit
-e,--environment ENVIRONMENT work with ENVIRONMENT configuration
-v verbose mode
Commands
launch|start|up Start the environment
stop Stop the environment
down Stop & clear the environment
exec|run Run a docker command
ps List docker process
build Build images
composer PHP Composer command
assets Build frontend assets
npm Npm command
docker-compose Docker compose command
init Init environement
EOF
}
| true
|
23c67f1dc43502951757f141324a8a385a69f199
|
Shell
|
ShalokShalom/apps
|
/kuserfeedback/PKGBUILD
|
UTF-8
| 934
| 2.8125
| 3
|
[] |
no_license
|
pkgname=kuserfeedback
pkgver=1.0.0
pkgrel=1
pkgdesc="Framework for collecting user feedback for applications via telemetry and surveys"
arch=('x86_64')
url="https://github.com/KDE/kuserfeedback"
license=('MIT')
depends=('qt5-declarative' 'qt5-svg') # 'php' optional, check what needs with plasma 5.15
makedepends=('extra-cmake-modules' 'qt5-tools' 'clang')
source=("https://download.kde.org/stable/kuserfeedback/${pkgname}-${pkgver}.tar.xz")
sha256sums=('5a2f53ebb4b99a280757ca32bd9b686a7764a726e7e4d8bafee33acbb44b9db7')
prepare() {
cd ${pkgname}-${pkgver}
sed -i -e 's|QMLLINT_EXECUTABLE qmllint|QMLLINT_EXECUTABLE qmllint-qt5|' cmake/FindQmlLint.cmake
}
build() {
mkdir -p build
cd build
cmake ../${pkgname}-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_LIBDIR=lib \
-DBUILD_TESTING=OFF
make
}
package() {
cd build
make DESTDIR=${pkgdir} install
}
| true
|
f2f10bb7eb324765e9878cb2146046e5ffae0f2b
|
Shell
|
hapebe/c-itw
|
/extra/random-1000.sh
|
UTF-8
| 80
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
for i in `seq 1 1000` ; do
echo $(( ($RANDOM % 2000) - 1000))
done
| true
|
c7b3e3775c8bb0060a7542ae8034b6894a610b34
|
Shell
|
mindspore-ai/akg
|
/third_party/incubator-tvm/3rdparty/dlpack/tests/travis/run_test.sh
|
UTF-8
| 317
| 2.859375
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
if [ ${TASK} == "lint" ] || [ ${TASK} == "all_test" ]; then
if [ ! ${TRAVIS_OS_NAME} == "osx" ]; then
./tests/scripts/task_lint.sh || exit -1
fi
fi
if [ ${TASK} == "build" ] || [ ${TASK} == "all_test" ]; then
./tests/scripts/task_build.sh || exit -1
fi
echo "All travis test passed.."
| true
|
5a01b8c9fb2d70b3b9f478b5b4ec15b13e49e145
|
Shell
|
jneen/gesundheit
|
/fulltest.sh
|
UTF-8
| 271
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
ARGS="$@"
[[ -z "$@" ]] && ARGS='test/*test.coffee'
[[ -d lib-tmp ]] || mkdir lib-tmp
coffee -o lib-tmp/ -c src/ &&
cp src/fluid.js lib-tmp/
rm -fr lib &&
node-jscoverage lib-tmp/ lib/ &&
vows --debug-brk --cover-html $ARGS &&
rm -fr lib && mv lib-tmp/ lib/
| true
|
0a4d30eaad9c71080e03b27ca1e8324817f42c52
|
Shell
|
slashbeast/better-initramfs
|
/bootstrap/lebuilds/mdadm.lebuild
|
UTF-8
| 409
| 2.6875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
name='mdadm'
version='4.1'
sources=( "https://bitbucket.org/piotrkarbowski/better-initramfs/downloads/${name}-${version}.tar.xz" )
homepage='http://neil.brown.name/blog/mdadm'
license='GPLv2'
build_cmd="
cd /source/${name}-${version} && \
make MAP_DIR='/dev/.mdadm'
make DESTDIR=/binit/prefix install
"
deploy="/binit/prefix/sbin/mdadm"
STATIC=1
fetch_source
install_source
build_source
| true
|
f84074ab1d68f358321a14b0c2f8c81591b6bdce
|
Shell
|
flzxsqc200/ubuntu-xfce-vnc
|
/src/common/install/no_vnc.sh
|
UTF-8
| 1,325
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
### every exit != 0 fails the script
set -e
set -u
update-ca-certificates -f
echo "Install noVNC - HTML5 based VNC viewer"
mkdir -p $NO_VNC_HOME/utils/websockify
wget -qO- https://github.com/novnc/noVNC/archive/refs/tags/v1.0.0.tar.gz | tar xz --strip 1 -C $NO_VNC_HOME
#wget -qO- https://github.com/novnc/noVNC/archive/v1.0.0.tar.gz | tar xz --strip 1 -C $NO_VNC_HOME
#wget --no-check-certificate -qO- https://github.com/novnc/noVNC/archive/v1.2.0.tar.gz | tar xz --strip 1 -C $NO_VNC_HOME
# use older version of websockify to prevent hanging connections on offline containers, see https://github.com/ConSol/docker-headless-vnc-container/issues/50
apt install git -y
git clone https://github.com/novnc/websockify.git $NO_VNC_HOME/utils/websockify
#wget -qO- https://github.com/novnc/websockify/archive/refs/tags/v0.6.1.tar.gz | tar xz --strip 1 -C $NO_VNC_HOME/utils/websockify
#wget -qO- https://github.com/novnc/websockify/archive/v0.6.1.tar.gz | tar xz --strip 1 -C $NO_VNC_HOME/utils/websockify
#wget --no-check-certificate -qO- https://github.com/novnc/websockify/archive/v0.10.0.tar.gz | tar xz --strip 1 -C $NO_VNC_HOME/utils/websockify
chmod +x -v $NO_VNC_HOME/utils/*.sh
## create index.html to forward automatically to `vnc_lite.html`
ln -s $NO_VNC_HOME/vnc_lite.html $NO_VNC_HOME/index.html
| true
|
2999cc49674edd4dbb6488ba829669666ebdc76d
|
Shell
|
NSBum/qsh
|
/clients/psql/scripts/columns
|
UTF-8
| 583
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
QUERY_CUSTOMIZATION=$2
cat <<EOF > $1
select * from (
select t.table_type, c.table_schema, c.table_name, c.column_name, c.ordinal_position, c.column_default, c.is_nullable,
c.data_type, c.character_maximum_length, numeric_precision, numeric_scale
from information_schema.columns c
inner join information_schema.tables t on c.table_catalog = t.table_catalog and c.table_schema = t.table_schema
and c.table_name = t.table_name
where c.table_schema != 'information_schema'
and c.table_schema not like 'pg\_%'
order by 2, 3, 5
) t
$QUERY_CUSTOMIZATION;
EOF
| true
|
7d8452932968f9e31e95ac75e85ab7d08471b7bd
|
Shell
|
oudaykhaled/iot-pipe-root
|
/extra/temp-help-system/systems/hbase/fully-distributed-with-managed-zk/scripts/hadoop-services.sh
|
UTF-8
| 1,271
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "HADOOP SERVICES"
$HADOOP_HOME/sbin/start-dfs.sh
$HADOOP_HOME/sbin/start-yarn.sh
echo "RUN jps - Java Virtual Machine Process Status Tool"
jps -lm
echo "Get basic filesystem information and statistics."
hdfs dfsadmin -report
echo "HBASE SERVICES"
$HBASE_HOME/bin/start-hbase.sh
echo "RUN jps - Java Virtual Machine Process Status Tool"
jps -lm
echo "Give HMaster some time to initialize"
sleep 7s
# LINK: https://stackoverflow.com/a/39664156
echo "Get basic HBASE status from $(hostname)"
echo -e 'status' | $HBASE_HOME/bin/hbase shell
# LINK: https://www.cloudera.com/documentation/enterprise/5-9-x/topics/cdh_ig_hbase_tools.html
# LINK: https://intellipaat.com/tutorial/hbase-tutorial/performance-tunning/
# LINK: https://superuser.blog/hbase-benchmarking/
# LINK: http://gbif.blogspot.com/2012/02/performance-evaluation-of-hbase.html
# echo "HBase Write Benchmark: using 1 thread and no MapReduce"
# time hbase org.apache.hadoop.hbase.PerformanceEvaluation --nomapred randomWrite 1
# echo "HBase Read Benchmark: using 1 thread and no MapReduce"
# time hbase org.apache.hadoop.hbase.PerformanceEvaluation --nomapred randomRead 1
# echo "HBase Scan Benchmark: using 1 thread"
# time hbase org.apache.hadoop.hbase.PerformanceEvaluation scan 1
| true
|
939376a69efbaa650fe07320d9126f594ee3b99d
|
Shell
|
Anomalocaridid/tinycoredotfiles
|
/.bashrc
|
UTF-8
| 2,946
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/false
# Declares $PATH
#PATH=$PATH
# Sources defaults
source /usr/local/etc/bashrc
# Color Variables
CYAN="\e[1;36m"
PINK="\e[1;35m"
WHITE="\e[0;1m"
RESET="\e[0m"
BASE_COLOR=$WHITE
COLOR1=$CYAN
COLOR2=$PINK
# Variables for prompt customization
PROMPT_COMMAND=__prompt_command
# Dynamic prompt
__prompt_command() {
local EXIT="$?"
local BASE_COLOR="\[$BASE_COLOR\]"
local COLOR1="\[$COLOR1\]"
local COLOR2="\[$COLOR2\]"
local RESET="\[\e[0m\]"
PS1=""
local ERROR="\[\e[1;31m\]"
# Changes prompt based on last command's error code.
if [[ $EXIT != 0 ]]; then
PS1+="$ERROR$EXIT$RESET "
fi
PS1+="$COLOR1\u$BASE_COLOR@$COLOR2\h$BASE_COLOR:\w\n$BASE_COLOR\$> $RESET"
}
PS2="\[$BASE_COLOR\] > \[$RESET\]"
# ls always uses color
alias ls="ls --color=always"
# One key config sourcing
alias .="clear && . ~/.bashrc"
# Shows available space in main drive
alias space="df --output='source,size,used,avail,pcent' /dev/sda1"
# Changes lynx.cfg's path to ~/lynx.cfg
alias lynx="lynx -cfg ~/lynx.cfg"
# Helps with managing dotfiles with a bare git repo.
alias config="git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME"
# easy vnc setup
alias vnc-start="x11vnc -usepw -display :0"
# Location of bootloader config.
BOOTCONF="/mnt/sda1/tce/boot/extlinux/extlinux.conf"
# Location of onboot.lst
ONBOOTLST="/mnt/sda1/tce/onboot.lst"
# Sorts onboot.lst alphabetically
sort-onboot() {
if [[ -f $ONBOOTLST ]]; then
sort --ignore-case --output=$ONBOOTLST $ONBOOTLST
echo "$ONBOOTLST sorted"
else
echo "$ONBOOTLST does not exist!" >&2
echo "No changes made."
return 1
fi
}
# Backup and restore onboot.lst
ONBOOTBACKUP="$HOME/onboot.lst.backup"
backup-onboot() {
if [[ -f $ONBOOTLST ]]; then
cp $ONBOOTLST $ONBOOTBACKUP
echo "$ONBOOTLST backed up to $ONBOOTBACKUP"
else
echo "$ONBOOTLST does not exist!" >&2
echo "No changes made."
return 1
fi
}
restore-onboot() {
if [[ -f $ONBOOTBACKUP ]]; then
cp $ONBOOTBACKUP $ONBOOTLST
echo $ONBOOTLST restored from $ONBOOTBACKUP
else
echo "$ONBOOTBACKUP does not exist!" >&2
echo "No changes made."
return 1
fi
}
# Changes the default shell to bash in Tiny Core Linux
change_shell() {
which bash | sudo tee -a /etc/shells > /dev/null
sudo sed -i '/^derpsquid:/s#:[^:]\+$#:/usr/local/bin/bash#' /etc/passwd
}
# Sets up ssh-agent and adds ssh key at default location
ssh-setup(){
eval "$(ssh-agent -s)" && ssh-add
}
# Variables to minimize repetition in below commands
color_punct() {
echo "$COLOR1$1$COLOR2"
}
COLON=$(color_punct :)
COMMA=$(color_punct ,)
PERIOD=$(color_punct .)
# Prints the date and time in bold white
echo -e "${BASE_COLOR}Tiny Core Linux Version $COLOR2$(version)"
echo -e "${BASE_COLOR}Welcome back, $COLOR1$USER$COLOR2!"
echo -e $(date +"${BASE_COLOR}Today is $COLOR2%A$COMMA %B %d$COMMA %Y$BASE_COLOR$PERIOD")
echo -e $(date +"${BASE_COLOR}The current time is $COLOR2%I$COLON%M$COLON%S$COLON%N %p$PERIOD")
echo -e "$RESET"
| true
|
b9c8d2828ecee317ce435a30f16a0a11e8e88fe8
|
Shell
|
Adaptavist/puppet-sh_profile
|
/files/system-java.sh
|
UTF-8
| 582
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
if [ -d /usr/java/default ]; then
export JAVA_HOME=/usr/java/default
elif [ -d /usr/lib/jvm/java-6-sun ]; then
export JAVA_HOME=/usr/lib/jvm/java-6-sun
elif [ -d /usr/lib/jvm/java-7-oracle ]; then
export JAVA_HOME=/usr/lib/jvm/java-7-oracle
else
# Test if an interactive terminal (from: http://tldp.org/LDP/abs/html/intandnonint.html)
[[ -t 0 || -p /dev/stdin ]] && echo 'Unable to find Sun JAVA!' >&2
fi
if [ "x$JAVA_HOME" != "x" ]; then
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib
export PATH=$JAVA_HOME/bin:$PATH
fi
| true
|
a275c825790e6bd4273af7e3e0e84e14afce3fd6
|
Shell
|
anelson/dotfiles
|
/home/.local/bin/run-rust-rls.sh
|
UTF-8
| 1,282
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Wrapper script to run the rust language server, ensuring that the correct version is run.
#
# Usually I want the stable toolchain and the stable RLS.
#
# But sometimes I have projects using nightly and in that case I want to use nightly RLS.
#
# But sometimes nightly is broken, so I use some slightly-older archived version of nightly, and its RLS
#
# In that case I use `rustup override set (toolchain)` in the directory of a project to force a specific roolchain.
#
# This script runs the RLS corresponding to the overridden toolchain of the current directory, if applicable, otherwise
# it runs the stable RLS.
toolchain=$(rustup show active-toolchain | cut -f1 -d " ")
rustup run $toolchain rls
# TODO: This code below will use the Rust Analyzer instead of RLS. Analyzer is
# the way forward for IDE-like support in Rust, but it's not ready for prime
# time yet.
#rust_analyzer_path="$HOME/.local/bin/rls-lsp-server"
#
## TODO: update this as there are frequent new releases
#rust_analyzer_url="https://github.com/rust-analyzer/rust-analyzer/releases/download/2020-02-11/ra_lsp_server-linux"
#
#if [ ! -e $rust_analyzer_path ]; then
# curl --silent -L -o $rust_analyzer_path $rust_analyzer_url
# chmod +x $rust_analyzer_path
#fi
#
#$rust_analyzer_path
#
| true
|
64a2117482081c04bd10fd131fb193397a69dabf
|
Shell
|
rodolfovalentim/2016-2-comp
|
/t3/gen_tests.sh
|
UTF-8
| 411
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
DATA=/home/rodolfo/Workspace/2016-2-comp
IN=$DATA/in
OUT=$DATA/t3/my-out
EXE=./trab3
DOT="dot -Tpdf"
rm -rf $OUT
mkdir -p $OUT
for infile in `ls $IN/*.cm`; do
base=$(basename $infile)
outfile=$OUT/${base/.cm/.out}
$EXE < $infile > $outfile
# outfile=$OUT/${base/.cm/.dot}
# $EXE < $infile > $outfile
# outfile2=$OUT/${base/.cm/.pdf}
# $DOT $outfile -o $outfile2
done
| true
|
536816ae661410b3d9a43b69e013893ec66e1084
|
Shell
|
felipesotero/dotfiles
|
/terminal/.bash_profile
|
UTF-8
| 1,416
| 2.953125
| 3
|
[] |
no_license
|
# Make sublime work on the command line
export PATH=/bin:/sbin:/usr/local/bin:/usr/bin:/usr/local/sbin:/Users/luizsotero/bin:$PATH
export EDITOR='subl -w'
### Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
### Added for homebrew
export PATH="/usr/local/bin:$PATH"
### For git completion
if [ -f `brew --prefix`/etc/bash_completion ]; then
. `brew --prefix`/etc/bash_completion
fi
### for handling more files at once
ulimit -n 2560
export NVM_DIR="/Users/luizsotero/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
# set where virutal environments will live
export WORKON_HOME=$HOME/.virtualenvs
# ensure all new environments are isolated from the site-packages directory
export VIRTUALENVWRAPPER_VIRTUALENV_ARGS='--no-site-packages'
# use the same directory for virtualenvs as virtualenvwrapper
export PIP_VIRTUALENV_BASE=$WORKON_HOME
# makes pip detect an active virtualenv and install to it
export PIP_RESPECT_VIRTUALENV=true
if [[ -r /Users/luizsotero/Library/Python/2.7/bin/virtualenvwrapper.sh ]]; then
source /Users/luizsotero/Library/Python/2.7/bin/virtualenvwrapper.sh
else
echo "WARNING: Can't find virtualenvwrapper.sh"
fi
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="/Users/luizsotero/.sdkman"
[[ -s "/Users/luizsotero/.sdkman/bin/sdkman-init.sh" ]] && source "/Users/luizsotero/.sdkman/bin/sdkman-init.sh"
| true
|
9072b081b846081e3356360d3c2955af6a90a1e6
|
Shell
|
drandyhaas/Haasoscope
|
/dockerfiles/quartus.sh
|
UTF-8
| 1,099
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This is an example script for running the "Quartus" graphical
# interface in a docker container. This is an example - you may need
# to copy this file and modify it so that it works in your
# environment.
#
# Prior to running this script, the docker image should have been
# created with something like:
# docker build -t quartus -f quartus.docker
# Map x-windows socket into container
DPARAM="-v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=${DISPLAY}"
# If passing the x-windows socket doesn't work, then the following may
# work instead:
#xhost +localhost
#DPARAM="${DPARAM} --net=host -e DISPLAY=host.docker.internal:0"
# Obtain directory of the haasoscope software (based on the location
# of this script) and map the local firmware directory to /project .
SRCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
DPARAM="${DPARAM} -v ${SRCDIR}/max10_adc_firmware:/project -w /project"
# Run Quartus in container
docker run -it --rm ${DPARAM} localhost/quartus quartus serial1.qpf "$@"
# If "xhost +localhost" was used above then reenable security here.
#xhost -localhost
| true
|
612ae965dec6eb1835a20be07b25092f3e9d8a5b
|
Shell
|
mgijax/pgmgddbschema
|
/procedure/VOC_processAnnotHeaderAll_create.object
|
UTF-8
| 7,520
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# History
#
# lec 01/24/2008
# - TR8216, fix selection of "toAdd" table
#
cd `dirname $0` && . ./Configuration
${PG_MGD_DBSCHEMADIR}/procedure/VOC_processAnnotHeaderAll_drop.object
cat - <<EOSQL | ${PG_DBUTILS}/bin/doisql.csh $0
CREATE OR REPLACE FUNCTION VOC_processAnnotHeaderAll (
v_annotTypeKey int
)
RETURNS VOID AS
\$\$
DECLARE
v_headerLabel int;
v_headerKey int;
v_pkey int;
v_objectKey int;
v_oldSeq int; -- current sequence number
v_newSeq int; -- new sequence number
v_prevObjectKey int;
rec record;
BEGIN
--
-- NAME: VOC_processAnnotHeaderAll
--
-- DESCRIPTION:
--
-- incrementally update VOC_AnnotHeader by annotation type
--
-- INPUT:
--
-- v_annotTypeKey : VOC_Annot._AnnotType_key
--
-- RETURNS:
-- VOID
--
v_headerLabel := _Label_key FROM DAG_Label WHERE label = 'Header';
v_headerKey := max(_AnnotHeader_key) FROM VOC_AnnotHeader;
IF v_headerKey = null
THEN
v_headerKey := 1000;
END IF;
-- set of 'new' headers based on most recent annotation update
-- need to check if any ancestors are header terms
-- AND if the annotated term is itself a header term
CREATE TEMP TABLE set0 ON COMMIT DROP
AS SELECT DISTINCT a._Object_key, h._Term_key, h.sequenceNum, a._Qualifier_key, isNormal = 0
FROM VOC_Annot a, VOC_Term t, VOC_VocabDAG vd, DAG_Node d, DAG_Closure dc, DAG_Node dh, VOC_Term h
WHERE a._AnnotType_key = v_annotTypeKey
AND a._Term_key = t._Term_key
AND t._Vocab_key = vd._Vocab_key
AND vd._DAG_key = d._DAG_key
AND t._Term_key = d._Object_key
AND d._Node_key = dc._Descendent_key
AND dc._Ancestor_key = dh._Node_key
AND dh._Label_key = v_headerLabel
AND dh._Object_key = h._Term_key
UNION
SELECT DISTINCT a._Object_key, h._Term_key, h.sequenceNum, a._Qualifier_key, isNormal = 0
FROM VOC_Annot a, VOC_Term t, VOC_VocabDAG vd, DAG_Node d, DAG_Closure dc, DAG_Node dh, VOC_Term h
WHERE a._AnnotType_key = v_annotTypeKey
AND a._Term_key = t._Term_key
AND t._Vocab_key = vd._Vocab_key
AND vd._DAG_key = d._DAG_key
AND t._Term_key = d._Object_key
AND d._Node_key = dc._Descendent_key
AND dc._Descendent_key = dh._Node_key
AND dh._Label_key = v_headerLabel
AND dh._Object_key = h._Term_key
ORDER BY a._Object_key, h.sequenceNum
;
CREATE INDEX set0_idx1 ON set0(_Term_key);
CREATE INDEX set0_idx2 ON set0(_Object_key);
CREATE INDEX set0_idx3 ON set0(_Qualifier_key);
-- set isNormal
-- isNormal = 1 if all of the qualifiers for a given header term = 2181424
-- else isNormal = 0
CREATE TEMP TABLE normal ON COMMIT DROP
AS SELECT DISTINCT _Object_key, _Term_key
FROM set0 s1
WHERE s1._Qualifier_key = 2181424
AND NOT EXISTS (SELECT 1 FROM set0 s2
WHERE s1._Object_key = s2._Object_key
AND s1._Term_key = s2._Term_key
AND s2._Qualifier_key != 2181424)
;
UPDATE set0
SET isNormal = 1
FROM normal n
WHERE n._Object_key = set0._Object_key
AND n._Term_key = set0._Term_key
;
-- now SELECT the DISTINCT headers
CREATE TEMP TABLE set1 ON COMMIT DROP
AS SELECT DISTINCT _Object_key, _Term_key, sequenceNum, isNormal
FROM set0
;
CREATE INDEX set1_idx1 ON set1(_Term_key);
CREATE INDEX set1_idx2 ON set1(_Object_key);
-- set of headers that are currently cached
CREATE TEMP TABLE set2 ON COMMIT DROP
AS SELECT _AnnotHeader_key, _Object_key, _Term_key, sequenceNum, isNormal
FROM VOC_AnnotHeader
WHERE _AnnotType_key = v_annotTypeKey
ORDER BY _Object_key, sequenceNum
;
CREATE INDEX set2_idx1 ON set2(_Term_key);
CREATE INDEX set2_idx2 ON set2(_Object_key);
-- any headers in set2 that is not in set1 are deleted
CREATE TEMP TABLE toDelete ON COMMIT DROP
AS SELECT s2._AnnotHeader_key
FROM set2 s2
WHERE not EXISTS
(SELECT 1 FROM set1 s1 WHERE s2._Term_key = s1._Term_key AND s2._Object_key = s1._Object_key)
;
CREATE INDEX toDelete_idx1 ON toDelete(_AnnotHeader_key);
delete FROM VOC_AnnotHeader
using toDelete d
WHERE d._AnnotHeader_key = VOC_AnnotHeader._AnnotHeader_key
;
-- set of headers that are currently cached after deletion
CREATE TEMP TABLE set3 ON COMMIT DROP
AS SELECT _Object_key, _Term_key, sequenceNum
FROM VOC_AnnotHeader
WHERE _AnnotType_key = v_annotTypeKey
ORDER BY _Object_key, sequenceNum
;
CREATE INDEX set3_idx1 ON set3(_Term_key);
CREATE INDEX set3_idx2 ON set3(_Object_key);
-- any headers in set1 that are not in set3 are added
CREATE TEMP TABLE toAdd ON COMMIT DROP
AS SELECT 1 as SERIAL, _Object_key, _Term_key, sequenceNum, isNorma
FROM set1 s1
WHERE NOT EXISTS (SELECT 1 FROM set3 s3 WHERE s1._Term_key = s3._Term_key AND s1._Object_key = s3._Object_key)
ORDER BY s1._Object_key, s1.sequenceNum
;
-- update the isNormal bit for any headers in #set1 that are in #set3 (existing headers)
update VOC_AnnotHeader
set isNormal = s1.isNormal
FROM set1 s1, set3 s3
WHERE VOC_AnnotHeader._AnnotType_key = v_annotTypeKey
AND VOC_AnnotHeader._Object_key = s1._Object_key
AND VOC_AnnotHeader._Term_key = s1._Term_key
AND s1._Object_key = s3._Object_key
AND s1._Term_key = s3._Term_key
;
-- get the maximum sequence number for existing headers
CREATE TEMP TABLE maxSequence ON COMMIT DROP
AS SELECT max(sequenceNum) as maxSeq, _Object_key
FROM set3
group by _Object_key
;
-- get the maximum sequence number for any new headers
INSERT INTO maxSequence SELECT DISTINCT 0, _Object_key FROM toAdd t
WHERE not EXISTS (SELECT 1 FROM set3 s WHERE t._Object_key = s._Object_key)
;
CREATE INDEX idx1 on maxSequence(_Object_key)
;
INSERT INTO VOC_AnnotHeader
SELECT v_headerKey + i, v_annotTypeKey, t._Object_key, t._Term_key, m.maxSeq + i, isNormal,
1001, 1001, NULL, NULL, current_date, current_date
FROM toAdd t, maxSequence m
WHERE t._Object_key = m._Object_key
;
-- automatically approve all annotations with one header
CREATE TEMP TABLE toApprove ON COMMIT DROP
AS SELECT _AnnotHeader_key
FROM VOC_AnnotHeader
WHERE _AnnotType_key = v_annotTypeKey
AND _ApprovedBy_key is null
group by _Object_key having count(*) = 1
;
CREATE INDEX toApprove_idx1 ON toApprove(_AnnotHeader_key);
UPDATE VOC_AnnotHeader
SET _ApprovedBy_key = 1001, approval_date = current_date
FROM toApprove t
WHERE t._AnnotHeader_key = VOC_AnnotHeader._AnnotHeader_key
;
-- automatically set all headers to non-approved if there is at least one header (by object) that is non-approved */
CREATE TEMP TABLE toNotApprove ON COMMIT DROP
AS SELECT _AnnotHeader_key
FROM VOC_AnnotHeader v1
WHERE v1._AnnotType_key = v_annotTypeKey
AND v1._ApprovedBy_key is null
AND EXISTS (SELECT 1 FROM VOC_AnnotHeader v2 WHERE v2._AnnotType_key = v_annotTypeKey
AND v1._AnnotHeader_key != v2._AnnotHeader_key
AND v1._Object_key = v2._Object_key
AND v2._ApprovedBy_key is not null)
;
CREATE INDEX toNotApprove_idx1 ON toNotApprove(_AnnotHeader_key);
UPDATE VOC_AnnotHeader
SET _ApprovedBy_key = null, approval_date = null
FROM toNotApprove t
WHERE t._AnnotHeader_key = VOC_AnnotHeader._AnnotHeader_key
;
-- re-order
v_prevObjectKey := -1;
FOR rec IN
SELECT _AnnotHeader_key, _Object_key, sequenceNum
FROM VOC_AnnotHeader
WHERE _AnnotType_key = v_annotTypeKey
ORDER by _Object_key, sequenceNum
LOOP
SELECT into v_pkey, v_objectKey, v_oldSeq
rec._annotheader_key, rec._object_key, rec.sequencenum;
IF v_objectKey != v_prevObjectKey
THEN
v_newSeq := 1;
END IF;
UPDATE VOC_AnnotHeader SET sequenceNum = v_newSeq WHERE _AnnotHeader_key = v_pkey;
v_newSeq := v_newSeq + 1;
v_prevObjectKey := v_objectKey;
END LOOP;
END;
\$\$
LANGUAGE plpgsql;
GRANT EXECUTE ON FUNCTION VOC_processAnnotHeaderAll(int) TO public;
COMMENT ON FUNCTION VOC_processAnnotHeaderAll(int) IS 'incrementally update VOC_AnnotHeader by annotation type';
EOSQL
| true
|
45ed129c251db7a5c2d4d0973583b15e981ae907
|
Shell
|
AndreyJComm/zadanietestowe
|
/bootstrap.sh
|
UTF-8
| 899
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Install base part
apt-get -y update
apt-get -y install git cron nano curl htop apt-transport-https
# Install php 5
sudo add-apt-repository ppa:ondrej/php -y
apt-get -y update
apt-get -y upgrade
apt-get -y install php5 php5-gd php5-fpm php5-mysql libapache2-mod-php5 php5-mcrypt
# Install Apache2
apt-get -y install apache2
#Install mysql
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password 1111'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password 1111'
sudo apt-get -y install mysql-server
# Downloading and Installing Composer
curl -sS https://getcomposer.org/installer | sudo php -- --install-dir=/usr/local/bin --filename=composer
# Restart Apache2
sudo echo "default_charset = \"UTF-8\"" >> /etc/php5/apache2/php.ini
sudo rm -f /var/www/html/index.html
sudo /etc/init.d/apache2 restart
| true
|
c86e6c2fde6eb7d7ce69e1a31990d3bcebf2af2f
|
Shell
|
CodingGearsCourses/Mastering-Bash-Shell-Scripting
|
/Module-13-Debugging/02-more-debugging.sh
|
UTF-8
| 249
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -u
set -o pipefail
echo "Welcome to GlobaleTraining.com"
for x in $(seq 10);do
echo $x
if [[ $x -eq 5 ]]; then
cat /etc/passwd | grp nologin | wc -l
echo " A B C"
echo $MESSAGE
ehco "Found 5!!"
fi
done
| true
|
3a512266d8f66df3635cf88a0e278b99d1b89322
|
Shell
|
pragya1990/globule
|
/setup2_code_new/mod-globule-1.3.2/tools/mkinstaller.sh
|
UTF-8
| 5,469
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# This script generates the shell-script-installer "installer.sh".
# The "installer.sh" script can be downloaded by semi-end-users
# which want a fully automated method of compiling Globule and
# its prerequisites and utility software packages, but want to
# have a home-build/compiled Globule and not a binary only installation
#
# The "installer.sh" is basically a script and a shar-archive in one.
# The script has basically a number of simple functions.
# - unpack itself (the shar-archive which is appended at the end
# - possibly retrieve new updates from the web site (currently this is
# omitted if the shar-archive is present, the shar archive is disabled
# from the installer.sh script afterwards when placed in the target
# directory).
# - create the destination directory and place the sources in it. Normally
# the destination is /usr/local/globule and the sources are placed in
# /usr/local/globule/src.
# - build the sources and install the software
# - install a demo httpd.conf and install demo web pages.
#
# The installer.sh script is both used as a complete all-in-one installer
# script which contains all sources as well as a script which retrieves
# upgrades of the sources from the web and re-installs the software.
# To this end, the script detects whether it contains an enabled shar
# archive at the end (it patches itself when it installs itself to disable
# the shar archive) and whether the software has previously installed.
#
# The model is that the initial "installer.sh" script downloaded from the
# web (normally ftp://ftp.globule.org/pub/globule/installer.sh) contains
# the full sources as a shar archive. When everything is installed, the
# core install.sh script with disabled shar archive is placed in
# /usr/local/globule/src. Then called again, or when the software is
# reinstalled, the installer.sh script will try to update this installer.sh
# from the web (normally ftp://ftp.globule.org/pub/globule/auto/installer.sh)
# Note that everything in the auto/ directory is suppost to be fetched by
# the install.sh script to update existing packages. Also updated
# individual software distributions are fetched from this location, however
# from the auto/src/ subdirectory to facilate building and extracting the
# shar archive.
#
# This script either pushes a true release, a prerelease or a testing
# release. Without command line arguments, a testing release is
# produced. A prerelease is pushed when the parameter "prerelease" is
# given as first command line argument, and a full release is pushed
# when the command line argument is "release".
# For a full release, the install.sh script in BOTH the primary location
# ftp://ftp.globule.org/pub/globule/install.sh (for the full shar archive)
# as the updater script (same, but without shar archive) in
# ftp://ftp.globule.org/pub/globule/auto/install.sh
# Note that the files are updated based on the distribution file, not
# copied from the location of this mkinstall.sh script.
#
# Placing updated packages of mod-globule*.tar.gz and the other software
# packages in ftp://ftp.globule.org/pub/globule/auto/src is the
# responsibility of the maintainer and falls outside the scope of this
# script.
#
# The paths for the prerelease and testing release are
# /home/ftp/pub/globule/prerelease/ and /home/ftp/pub/globule/testing/
# respectively.
# Note that the full shar archive is also made under a different name,
# namely "prerelease.sh" or "testing.sh" instead of "installer.sh".
#
destination=/home/ftp/pub/globule
cd `dirname $0`/..
rm -f mod-globule-*.tar.gz
make dist
case "$1" in
release)
releaseas=release
;;
prerelease)
releaseas=prerelease
;;
*)
releaseas=testing
;;
esac
case $releaseas in
prerelease)
cp mod-globule-*.tar.gz /home/ftp/pub/globule/prerelease/src/
cd $destination/prerelease
target=prerelease.sh
;;
testing)
cp mod-globule-*.tar.gz /home/ftp/pub/globule/testing/src/
cd $destination/testing
target=testing.sh
;;
release)
cp mod-globule-*.tar.gz /home/ftp/pub/globule/auto/src/
cd $destination/auto
target=installer.sh
;;
*)
exit 1
esac
if [ "`domainname`" = "(none)" ]; then
fqhn="`hostname`"
else
fqhn="`hostname | cut -f1 -d .`.`domainname`"
fi
tar xOzf src/mod-globule* \
`tar tzf src/mod-globule* | egrep /tools/installer.sh$` > installer.sh
chmod 755 installer.sh
case $releaseas in
release)
sed < installer.sh > installer.sh~ \
-e "s/^\(downloadurl=.[a-z]*\):\/\/[^\/]*\/\(.*\)$/\1:\/\/$fqhn\/\2/"
mv installer.sh~ $target
chmod 755 $target
;;
prerelease)
sed < installer.sh > installer.sh~ \
-e 's/\$downloadurl\/auto/\$downloadurl\/prerelease/g' \
-e "s/^\(downloadurl=.[a-z]*\):\/\/[^\/]*\/\(.*\)$/\1:\/\/$fqhn\/\2/"
mv installer.sh~ $target
chmod 755 $target
;;
testing)
sed < installer.sh > installer.sh~ \
-e 's/\$downloadurl\/auto/\$downloadurl\/testing/g' \
-e "s/^\(downloadurl=.[a-z]*\):\/\/[^\/]*\/\(.*\)$/\1:\/\/$fqhn\/\2/"
mv installer.sh~ $target
chmod 755 $target
;;
esac
tar xOzf src/mod-globule* \
`tar tzf src/mod-globule* | egrep /sample/httpd.conf$` > src/httpd.conf
tar xOzf src/mod-globule* \
`tar tzf src/mod-globule* | egrep /sample/webalizer.conf$` > src/webalizer.conf
sed < $target > ../$target \
-e 's/^stage=3$/stage=1/'
shar >> ../$target -s berry@cs.vu.nl -n globule -p -x \
-T src/httpd.conf \
-B `find src -type f -maxdepth 1 \! -name httpd.conf`
chmod 755 ../$target
exit 0
| true
|
66ab407eabb593e2073377d6ad371b38cdb9a96b
|
Shell
|
mbach04/odie
|
/odie-app.sh
|
UTF-8
| 12,225
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
SCRIPT_NAME=$(basename "$0")
DIR_NAME=$(dirname "$0")
. ${DIR_NAME}/scripts/lib.sh
#set -x
#set -e
#set -u
export OUTPUT_NAME=${OUTPUT_NAME:-""}
# Command Line Arguments
DEBUG=0
FORCE_IMPORT=0
INCLUDE_BUILDS=0
ONLY_IMAGES=0
declare TARGET_PROJECT
declare TAR
declare TAGS
usage() {
cat <<EOF
usage: odie app export PROJECT [APP] [--output-name=FILE]
usage: odie app import [TAR] [--to=PROJECT]
usage: odie app create PROJECT
Create a new basic project under ${PROJECTS_DIR}/PROJECT
================
More Info:
usage: odie app import help
usage: odie app export help
EOF
}
usage_export() {
cat <<EOF
usage: odie app export PROJECT [APP] [--output-name=FILE]
================
Application Export Options:
${bold}--output-name NAME${normal} - specify the output name
${bold}--builds${normal} - include the builds in output
EOF
}
export_images() {
local CONFIG_PATH=${1}/config.yml
local OUTPUT_PATH=${2}
local PROJECT_NAME=${3}
local OUTPUT_NAME=${4}
cd ${GIT_CLONE}
mkdir -p ${OUTPUT_PATH}
if [ -f "${CONFIG_PATH}" ]; then
IMAGE_STREAM="$(contrib/bin/yaml_linux_amd64 r ${CONFIG_PATH} image_streams | perl -pe 's/[-:]//g;')"
set +x
# check that they have defined an image stream (this allows the export to work w\o a running OCP cluster
if [[ "$IMAGE_STREAM" -ne "null" ]] ; then
run_cmd ./playbooks/container_images/export-images.yml -e "config_path=${CONFIG_PATH}" -e "odie_images_dir=${OUTPUT_PATH}" -e "project_name=${PROJECT_NAME}" -e "output_name=${OUTPUT_NAME}" & spin $! "Exporting images from OCP"
fi
fi
}
tar_dir() {
#set -x
TARGET="$1"
NAME="$(basename ${TARGET})"
local OUTPUT_DIR=$(realpath ${EXPORT_DIR})
local ARCHIVE=${OUTPUT_DIR}/${NAME}.tar
run_cmd pushd ${TARGET}
run_cmd tar cvf ${ARCHIVE} . & spin $! "Creating TAR archive"
popd
rm -rf "${TARGET}"
}
RSYNC_CMD="`which rsync` -avzL -x "
# IDEA: can you make the export more generic??
export_project() {
#set +e
PROJECT_NAME=$1
OUTPUT_NAME=${OUTPUT_NAME:-odie-export-${PROJECT_NAME}-`date +%Y-%m-%d`}
# TODO: make sure this is being reset after args set
OUTPUT_PATH="${EXPORT_DIR}/${OUTPUT_NAME}/"
PROJECT_PATH="${PROJECTS_DIR}"
#INCLUDES="--include='/${PROJECT_NAME}'"
mkdir -p ${OUTPUT_PATH}
EXCLUDES=" --exclude .odie-project-provision --exclude apps "
if [[ "${INCLUDE_BUILDS}" != 1 ]]; then
EXCLUDES="${EXCLUDES} --exclude build"
fi
run_cmd ${RSYNC_CMD} ${EXCLUDES} ${PROJECT_PATH}/${PROJECT_NAME} ${OUTPUT_PATH}/projects/ & spin $! "Rsyncing project output"
mkdir -p ${OUTPUT_PATH}/projects/${PROJECT_NAME}/apps
export_images "${PROJECT_PATH}/${PROJECT_NAME}" "${OUTPUT_PATH}/images/" "${PROJECT_NAME}" "${OUTPUT_NAME}"
tar_dir "${OUTPUT_PATH}"
}
export_app() {
local PROJECT_NAME=$1
local APP_NAME=$2
OUTPUT_NAME=${OUTPUT_NAME:-odie-export-${PROJECT_NAME}-${APP_NAME}-`date +%Y-%m-%d`}
#OUTPUT_NAME=odie-export-${PROJECT_NAME}-${APP_NAME}-`date +%Y-%m-%d`
local APP_PATH="${PROJECTS_DIR}/${PROJECT_NAME}/apps/${APP_NAME}"
#INCLUDES=' --exclude="*" --include "apps/${APP_NAME}" '
local OUTPUT_PATH="${EXPORT_DIR}/${OUTPUT_NAME}"
local OUT_APP_PATH="${OUTPUT_PATH}/apps"
mkdir -p ${OUT_APP_PATH}
EXCLUDES="--exclude .odie-app-provision"
if [[ "${INCLUDE_BUILDS}" != 1 ]]; then
EXCLUDES="${EXCLUDES} --exclude build"
fi
run_cmd ${RSYNC_CMD} ${EXCLUDES} ${APP_PATH} ${OUT_APP_PATH} & spin $! "Rsyncing Application output"
export_images "${APP_PATH}/" "${OUT_APP_PATH}/${APP_NAME}/images/" "${PROJECT_NAME}" "${OUTPUT_NAME}"
tar_dir "${OUTPUT_PATH}"
}
provision() {
cd ${GIT_CLONE}
if [[ ! -z "${1}" ]]; then
export ODIE_SELECTED_PROJECT=${1}
fi
if [[ ! -z "${2}" ]]; then
export ODIE_SELECTED_APPLICATION=${2}
fi
if [[ ! -z "${TAGS}" ]]; then
TAG_CMD="--tags ${TAGS}"
else
TAG_CMD=""
fi
is_oc_logged_in
run_ansible_play "Provision Application/Project Components" ./playbooks/app_deployment/provision.yml -v ${TAG_CMD}
}
unprovision() {
cd ${GIT_CLONE}
if [[ ! -z "${1}" ]]; then
export ODIE_SELECTED_PROJECT=${1}
fi
if [[ ! -z "${2}" ]]; then
export ODIE_SELECTED_APPLICATION=${2}
fi
if [[ ! -z "${TAGS}" ]]; then
TAG_CMD="--tags ${TAGS}"
else
TAG_CMD=""
fi
is_oc_logged_in
run_ansible_play "Unprovision Application/Project Components" ./playbooks/app_deployment/unprovision.yml -v ${TAG_CMD}
}
function header() {
HEADER="Red Hat ODIE Application Provisioner- ${INSTALLER_VERSION}"
if [[ ! -v NO_HEADER ]]; then
echo
echo ${HEADER}
echo
echo "- View log file in another terminal : ${bold}tail -f ${LOG_FILE}${normal} "
echo
fi
}
parse_tar() {
#local TAR=${1}
local TYPE=${1}
echo -n $(tar -tf ${TAR} ./${TYPE}/\* --exclude="*/*/*/*" --strip-components=1 --no-wildcards-match-slash 2>/dev/null | perl -ne 'next if /^\s*$|.tar.gz/; s#^./(projects|apps|images)##; s#/|.yml##g;print;' )
}
unsupported_import_function() {
echo ${bold}[${red}ERROR${normal}${bold}]${normal} Import functionality does not yet support: $1
exit 1
}
import() {
if [[ ! -f "${TAR}" ]]; then
echo ${bold}[${red}ERROR${normal}${bold}]${normal} Import TAR not found: ${TAR}
exit 1
fi
local PROJECTS=$(parse_tar projects)
local APPS=$(parse_tar apps)
local PROJECT_COUNT=$(echo ${PROJECTS} | wc -w )
local APPS_COUNT=$(echo ${APPS} | wc -w )
if [[ "${DEBUG}" = 1 ]]; then
echo "PROJECTS are ${PROJECTS} (${PROJECT_COUNT})"
echo "APPS are ${APPS} (${APPS_COUNT})"
fi
echo "Completed" >/dev/null & spin $! "Preparing Import settings"
[[ ! -z "${PROJECTS}" && "${PROJECT_COUNT}" != 1 ]] && unsupported_import_function "Multiple project import"
[[ ! -z "${APPS}" && "${APPS_COUNT}" != 1 ]] && unsupported_import_function "Multiple apps import"
mkdir -p "${PROJECTS_DIR}"
if [[ "${ONLY_IMAGES}" != 1 && "${PROJECT_COUNT}" = 1 ]]; then
import_project ${PROJECTS}
fi
if [[ "${ONLY_IMAGES}" != 1 && "${APPS_COUNT}" = 1 ]]; then
import_apps ${APPS}
fi
}
import_project() {
local PROJECTS=$1
local TARGET_PROJECT=${TARGET_PROJECT:-$PROJECTS}
local OUT_PATH="${PROJECTS_DIR}/${TARGET_PROJECT}"
[[ -d "${OUT_PATH}" ]] && unsupported_import_function "Directory [${OUT_PATH}] already exists, manually delete directory and OpenShift project to proceed"
tar -C ${PROJECTS_DIR} --xform="s|projects/${PROJECTS}|${TARGET_PROJECT}|" -xf ${TAR} ./projects/${PROJECTS} & spin $! "Extracting TAR archive of project"
mkdir -p ${OUT_PATH}/apps
}
import_apps() {
local APPS=$1
#[ ${DEBUG} ] && echo "DEBUG: ${TARGET_PROJECT:=""}"
[[ ! -v TARGET_PROJECT ]] && unsupported_import_function "Must defined ${bold}--to-project${normal} target project"
local TARGET_PROJECT=${TARGET_PROJECT}
local APP_PATH="${PROJECTS_DIR}/${TARGET_PROJECT}/apps/"
local OUT_PATH="${APP_PATH}/${APPS}"
[[ -d "${OUT_PATH}" ]] && unsupported_import_function "Directory already exists, please manually delete ${OUT_PATH} directory and OpenShift project"
[[ ! -d "${APP_PATH}" ]] && unsupported_import_function "Project ${TARGET_PROJECT} doesn't exist"
tar -C ${APP_PATH} --xform="s|apps/${APPS}|${APPS}|" -xf ${TAR} ./apps/${APPS} & spin $! "Extracting TAR archive for ${APPS}"
}
import_images() {
local IMAGES=$1
local TARGET_PROJECT=${TARGET_PROJECT:-$PROJECTS}
local OUT_PATH="${IMAGES_DIR}/"
mkdir -p ${OUT_PATH}
tar -C ${OUT_PATH} --xform="s|images/${IMAGES}|${IMAGES}|" -xf ${TAR} ./images/${IMAGES}\* & spin $! "Extracting archive"
run_cmd ./playbooks/container_images/import-images.yml -e "manifest_path=${OUT_PATH}/${IMAGES}.yml" -e "images_target=${OUT_PATH}" -e "ocp_project=${TARGET_PROJECT}" -e "output_name=${OUTPUT_NAME}" & spin $! "Importing container images into ${TARGET_PROJECT}"
}
usage_import() {
cat <<EOF
usage: odie app import [TAR] [--to=PROJECT]
================
Import Options:
${bold}--only-images${normal} - skip application import and only import images
${bold}--to${normal} - include everything
EOF
#${bold}--reuse${normal} - use the existing files
# Experimental:
# ${bold}--force${normal} - delete existing directories/files and force import
# ${bold}--all${normal} - include everything
}
update_crl() {
cd ${GIT_CLONE}
if [[ ! -z "${1}" ]]; then
export ODIE_SELECTED_PROJECT=${1}
fi
if [[ ! -z "${2}" ]]; then
export ODIE_SELECTED_APPLICATION=${2}
fi
is_oc_logged_in
run_ansible_play "Update CRL" ./playbooks/app_deployment/update_crl.yml -v --tags shutdown,secrets,startup
}
usage_update_crl() {
cat <<EOF
usage: odie app update-crl
================
Import Options:
${bold}none${normal}
EOF
}
export params="$(getopt -o a,h,d,t: -l debug,only-images,force,to:,tags:,to-project:,help,output-name:,builds,all,reuse,password --name ${SCRIPT_NAME} -- "$@")"
if [[ $? -ne 0 ]]
then
usage
exit 1
fi
eval set -- "$params"
while true
do
case $1 in
-h|--help)
usage
shift
exit 0
;;
-d|--debug)
DEBUG=1
shift
;;
--output-name)
if [ -n "$2" ]; then
OUTPUT_NAME="$2"
else
echo "ERROR: Must specify output name with ${bold}--output-name${normal}"
exit 1
fi
shift 2
;;
--force)
FORCE_IMPORT=1
shift
;;
--only-images)
ONLY_IMAGES=1
shift
;;
--password)
vault_password
shift
;;
--builds)
INCLUDE_BUILDS=1
shift
;;
--to|--to-project)
TARGET_PROJECT=$2
shift 2
;;
-t|--tags)
TAGS=$2
shift 2
;;
--)
shift; break ;;
*)
echo "Unknown arg: $1"
exit 1
;;
esac
done
while true
do
case $1 in
export)
if [[ "$2" = "help" ]]; then
usage_export
exit 0
fi
PROJECT_NAME=$2
APP_NAME=$3
if [[ ! -z "${PROJECT_NAME}" && ! -z "${APP_NAME}" ]]; then
header
export_app $PROJECT_NAME $APP_NAME
complete_message "Application Export"
elif [[ ! -z "${PROJECT_NAME}" ]]; then
header
export_project $PROJECT_NAME
complete_message "Project Export"
else
echo "ERROR: Invalid execution"
usage_export
exit 1
fi
exit 0
;;
create)
TAR=/opt/odie/bundles/current
TARGET_PROJECT=${2}
echo "its going ${TARGET_PROJECT}"
header
import
complete_message "Project Created"
exit 0
shift;;
import)
if [[ "$2" = "help" ]]; then
usage_import
exit 0
fi
TAR=$(realpath ${2})
header
import
complete_message "Application Import"
exit 0
shift;;
provision)
if [[ "x${2}" = "xhelp" ]]; then
echo "Function not documented. Consult Red Hat."
exit 1
fi
header
provision $2 $3
complete_message "Application Provisioning"
exit 0
;;
unprovision)
if [[ "x${2}" = "xhelp" ]]; then
echo "Function not documented. Consult Red Hat."
exit 1
fi
header
unprovision $2 $3
complete_message "Application Unprovisioning"
exit 0
;;
mount)
if [[ "x${2}" = "xhelp" ]]; then
echo "Function not documented. Consult Red Hat."
exit 1
fi
header
TAGS="pv"
provision $2 $3
complete_message "Mounting PV Dirs"
exit 0
;;
update-crl)
if [[ "x${2}" = "xhelp" ]]; then
usage_update_crl
exit 0
fi
header
update_crl $2 $3
complete_message "Application Update-CRL"
exit 0
;;
*)
echo "Invalid Subcommand: $2"
usage
exit 1
;;
esac
done
usage
| true
|
ef707a4d2aa3a5e00c840348d00bfe4bc754655d
|
Shell
|
nathandesmet/dotfiles
|
/.zshenv
|
UTF-8
| 365
| 2.796875
| 3
|
[] |
no_license
|
for file in ~/.{aliases,functions}; do
[ -r "$file" ] && source "$file"
done
unset file
set -o vi # use vi as the terminal readline editting tool
# also useful when pressing 'v' to open-up a vim window
# where you can modify the command that you are currently typing
fpath+=$HOME/code/github.com/sindresorhus/pure
export PURE_GIT_PULL=0
| true
|
de1484a2ac1df626b508529e0608bd5a59716d4e
|
Shell
|
kkola/bsve-integration
|
/grits.sh
|
UTF-8
| 2,727
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ethernet="eth0"
if [[ $1 && $2 ]]; then
if [ "$1"=="--ethernet" ]; then
ethernet="$2"
fi
fi
./initial-checks.sh --ethernet $ethernet || exit 1
#Ensure data dump file is in our directory
if [ ! -f geonames.tar ]; then
aws s3 cp s3://bsve-integration/geonames.tar ./geonames.tar
fi
#Build and spin up our mongodb
./mongodb.sh --ethernet $ethernet
#Import the geonames dataset
ln -s $(pwd)/geonames.tar /var/log/geonames.tar
cd /var/log/ && tar -xf geonames.tar &&\
docker exec -t mongodb mongorestore --db geonames /var/log/geonames
#Ensure we have a copy of the grits image
if [[ ! -f grits-provisioned.tar.gz && ! -f grits-provisioned.tar ]]; then
aws s3 cp s3://bsve-integration/grits-provisioned.tar.gz ./grits-provisioned.tar.gz
gzip -d grits-provisioned.tar.gz
fi
#Load the image
docker load < grits-provisioned.tar
export LOCAL_IP=$(ifconfig $ethernet|grep "inet addr"|awk -F":" '{print $2}'|awk '{print $1}')
#Get and setup config files
wget https://raw.githubusercontent.com/ecohealthalliance/grits-deploy-ansible/master/compose.yml --output-document=grits.yml
sed -i -r "s/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b/$LOCAL_IP/" grits.yml
sed -i "s/image: grits$/image: grits-provisioned/" grits.yml
sed -i "/ \- \/mnt\/grits:\/home\/grits/d" grits.yml
#Instantiate a new grits container
docker-compose -f grits.yml up -d
#Change all mongo references to use new local ip address
docker exec -t grits find /var/lib/mongo/grits/ -type f -exec sed -i -r "s/mongodb\:\/\/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b\:27017/mongodb\:\/\/$LOCAL_IP:27017/" {} \;
docker exec -t grits find /home/grits/ -type f -exec sed -i -r "s/mongodb\:\/\/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b\:27017/mongodb\:\/\/$LOCAL_IP:27017/" {} \;
docker exec -t grits find /etc/supervisor/conf.d/ -type f -exec sed -i -r "s/mongodb\:\/\/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b\:27017/mongodb\:\/\/$LOCAL_IP:27017/" {} \;
docker exec -t grits sed -i -r "s/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b:27017/$LOCAL_IP:27017/" /home/grits/diagnostic-dashboard/.scripts/autocompleteCollections.js
docker exec -t grits sed -i -r "s/ROOT_URL=\"http\:\/\/grits.eha.io\"/ROOT_URL=\"http\:\/\/$LOCAL_IP\"/" /etc/supervisor/conf.d/dashboardd.conf
#Restart all the services
docker kill grits
docker start grits
echo "*****************************************************************************************"
echo "Please update with your own bsve credentials at /home/grits/grits-api/config.py"
echo "Afterwards please restart the container."
echo "Grits app will be available at http://$LOCAL_IP/new?compact=true&bsveAccessKey=loremipsumhello714902&hideBackButton=true"
echo "*****************************************************************************************"
| true
|
e7b5cdc1b967ef31fad7bff5fe76e22b31b12997
|
Shell
|
chapeaute/playbooks
|
/freeipa-client/templates/usr/local/sbin/ipa-user-migrate.j2
|
UTF-8
| 1,725
| 3.953125
| 4
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
REALM="{{ freeipa_realm }}"
if [ $# -ne 1 ]; then
echo "Script must take 1 argument. $0 username" >&2
exit 1
fi
# User name
user=$1
# Get old user uid and gid
old_uid=$(getent -s files passwd $1 | awk -F: '{print $3}')
old_gid=$(getent -s files passwd $1 | awk -F: '{print $4}')
# Get user main group
group=$(id -gn $user 2>/dev/null)
# Get new user uid and gid
new_uid=$(getent -s sss passwd $user@$REALM | awk -F: '{print $3}')
new_gid=$(getent -s sss passwd $user@$REALM | awk -F: '{print $4}')
if [ -z "$old_uid" -a -z "$new_uid" ]; then
echo "Couldn't find local user '$user'. Check if user exist in local. Abording account migration..." >&2
exit 1
elif [ -n "$old_uid" -a -z "$new_uid" ] ; then
echo "Couldn't find '$user' for realm '$REALM'. Check if '$user' is declared in FreeIPA. Abording account migration..." >&2
exit 1
elif [ -z "$old_uid" -a -n "$new_uid" ]; then
echo "Couldn't find local user '$user'. User seems to be already migrate!"
exit 0
fi
w -h | awk '{print $1}' | sort -n | uniq | grep opsu > /dev/null
if [ $? -eq 0 ]; then
echo "User '$user' already login, can't migrate account! Abording..." >&2
exit 1
fi
ps -u $user -o user= > /dev/null
if [ $? -eq 0 ]; then
echo "Some process are running for User '$user'. Stop it before migrating account! Abording..." >&2
exit 1
fi
echo "Change UID from $old_uid to $new_uid ($user)"
find / -uid $old_uid -exec chown -h $new_uid {} \+ 2>/dev/null
echo "Change GID from $old_gid to $new_gid ($user)"
find / -gid $old_gid -exec chgrp -h $new_gid {} \+ 2>/dev/null
echo "Remove local user $user"
userdel $user
if [ $? -ne 0 ]; then
echo "Failed to remove user $user" 2>&1
exit 1
fi
| true
|
4b079501c3cbc54eccc79ced838715f1d07bd9b0
|
Shell
|
streamich/fasttrack
|
/scripts/jest.sh
|
UTF-8
| 3,115
| 4.125
| 4
|
[
"Unlicense",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# Install Jest.
DIR_SRC="src" # Source folder.
DIR_DIST="lib" # Distribution folder.
JEST_CONFIG_FILE="jest.config.js"
JEST_TEST_DIR="$DIR_SRC/__tests__"
JEST_SETUP_FILENAME="setup.js"
JEST_TS=false # Whether to add TypeScript support.
HELP=false
# Fetch CLI named params.
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--jest-config-file)
JEST_CONFIG_FILE="$2"
shift # past argument
shift # past value
;;
--jest-test-dir)
JEST_TEST_DIR="$2"
shift # past argument
shift # past value
;;
--jest-setup-filename)
JEST_SETUP_FILENAME="$2"
shift # past argument
shift # past value
;;
--jest-ts)
JEST_TS=true
shift # past argument
;;
--dir-src)
DIR_SRC="$2"
shift # past argument
shift # past value
;;
--dir-dist)
DIR_DIST="$2"
shift # past argument
shift # past value
;;
-h|--help)
HELP=true
shift # past argument
;;
*)
shift # in case of unknown argument
;;
esac
done
set -- "${POSITIONAL[@]}"
if [ "$HELP" = true ] ; then
echo "fasttrack jest [options]"
echo ""
echo " Install Jest"
echo ""
echo "Options"
echo " --jest-config-file config file name, defaults to jest.config.js"
echo " --jest-test-dir main testing folder, defaults to 'src/__tests__'"
echo " --jest-setup-filename runtime setup file, defaults to 'setup.js'"
echo " --jest-ts boolean flag, if set, will install TypeScript transform"
echo " --dir-src source folder, defaults to 'src'"
echo " --dir-dist dist folder to be added to Git ignore, defaults to 'lib'"
echo " -h, --help show this output"
exit 0
fi
echo "Adding Jest."
echo "Installing Jest dependencies."
yarn add --dev @types/jest jest
if [ "$JEST_TS" = true ] ; then
echo "Installing Jest TypeScript dependencies."
yarn add --dev ts-jest
fi
TS_TRANSFORM=""
if [ "$JEST_TS" = true ] ; then
TS_TRANSFORM=$(cat <<-END
transform: {
'^.+\\.tsx?$': 'ts-jest',
},
END
)
fi
echo "Creating Jest test directory."
mkdir -p $JEST_TEST_DIR
echo "Creating Jest runtime setup file."
cat >$JEST_TEST_DIR/$JEST_SETUP_FILENAME <<EOL
// Jest setup.
process.env.JEST = true;
EOL
echo "Add Jest sample test."
cat >$JEST_TEST_DIR/index.spec.ts <<EOL
xit('Jest working', () => {});
EOL
echo "Writing Jest config to $JEST_CONFIG_FILE."
cat >$JEST_CONFIG_FILE <<EOL
module.exports = {
verbose: true,
testURL: 'http://localhost/',
setupFiles: ['<rootDir>/${JEST_TEST_DIR}/setup.js'],
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx'],
${TS_TRANSFORM}
transformIgnorePatterns: [],
testRegex: '.*/__tests__/.*\\.(test|spec)\\.(jsx?|tsx?)$',
};
EOL
# Check climod-add-script is installed.
if ! [ -x "$(command -v climod-add-script)" ]; then
yarn global add climod-add-script
fi
# Add scripts to `package.json`.
climod-add-script --name=test --cmd="jest --no-cache --config='$JEST_CONFIG_FILE'"
| true
|
40265eb3e7de7b6a25522f1109328c3012a5477a
|
Shell
|
apple-open-source/macos
|
/NFS/nfs4mapid/nfs4mapid-tester.sh
|
UTF-8
| 4,296
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
ODNODE=nod.apple.com
DF_USERS=bricker
DF_GROUPS=apple_sw
WKIDS="OWNER@ GROUP@ EVERYONE@ INTERACTIVE@ NETWORK@ DIALUP@ BATCH@ ANONYMOUS@ AUTHENTICATED@ SERVICE@"
NFS4_CURRENT_DOMAIN=$(sysctl -n vfs.generic.nfs.client.default_nfs4domain)
function set_nfs4_domain {
typeset domain=$(sysctl -n vfs.generic.nfs.client.default_nfs4domain)
if [[ $domain == $1 ]]; then
return 0
fi
sudo sysctl -w vfs.generic.nfs.client.default_nfs4domain=$1 || {
echo "[INFO] test aborted"
echo "[FAIL] Could not set nfs4 default domain"
return 1
}
}
function getid_and_guid {
unset ID
unset GUID
eval $(sudo nfs4mapid $* 2>/dev/null | awk 'NR == 1 { printf "ID=%s; ", $NF} NR == 2 { print "GUID=" $NF}')
}
function getmapid {
sudo nfs4mapid $* | awk '{print $NF}'
}
function testname {
typeset SID SGUID SNAME STATUS=0 SNAME2 IDOPT="-u" OPT STATUS=0
if [[ $1 == "-g" ]]; then
IDOPT=$1
OPT="-G"
shift
fi
SNAME=$1
getid_and_guid $OPT $SNAME
SID=$ID; SGUID=$GUID
if [[ $ID == -2 && $SNAME != "nobody" ]]; then
echo "[INFO] test aborted"
echo "[INFO] $SNAME does not map"
return 1
fi
# Now check the reverse mapping
getid_and_guid $IDOPT $ID
if [[ $SNAME != $ID || $GUID != $SGUID ]]; then
echo "[INFO] $SID maps to $ID not $SNAME and/or $GUID does not map to $SGUID"
STATUS=1
fi
# Check that the we get the user/group from the guid mapping
SNAME2=$(getmapid $OPT $GUID)
if [[ $SNAME != $SNAME2 ]]; then
echo "[INFO] $GUID maps to $SNAME2 not $SNAME"
STATUS=1
fi
return $STATUS
}
function testid {
typeset SNAME SGUID STATUS=0 SNAME2 IDOPT="-u" OPT STATUS=0
if [[ $1 == "-g" ]]; then
IDOPT=$1
OPT="-G"
shift
fi
NID=$1
getid_and_guid $IDOPT $NID
SNAME=$ID; SGUID=$GUID
if [[ $SNAME == $NID ]]; then
echo "[INFO] $NID does not map to a name"
fi
# Now check the reverse mapping
getid_and_guid $OPT $SNAME
if [[ $NID != $ID || $GUID != $SGUID ]]; then
echo "[INFO] $SNAME maps to $ID not $NID and/or $GUID does not map to $SGUID"
STATUS=1
fi
# Check that the we get the user/group from the guid mapping
SNAME2=$(getmapid $OPT $GUID)
if [[ $SNAME != $SNAME2 ]]; then
echo "[INFO] $GUID maps to $SNAME2 not $SNAME"
STATUS=1
fi
return $STATUS
}
function status
{
local stat=$?
if [[ $stat == 0 ]]; then
echo "[PASS] $1"
else
echo "[FAIL] $1"
fi
return $stat
}
function testwellknowns {
typeset i STATUS=0 SNAME GUID TNAME
for i in $WKIDS
do
TNAME="Testing wellknown id $i"
echo "[BEGIN] $TNAME"
GUID=$(getmapid -G $i)
SNAME=$(getmapid -G $GUID)
if [[ $i != $SNAME ]]; then
echo "[INFO] $i maps to $GUID, but that maps to $SNAME"
false
fi
status "$TNAME" || STATUS=1
done
return $STATUS
}
function testusers {
typeset i STATUS=0 NODE TNAME
NODE=${1:-$ODNODE}
shift
for i in $@
do
if [[ ${i##*@} == $i ]]; then
i=$i@$NODE
fi
TNAME="Testing user $i"
echo "[BEGIN] $TNAME"
testname $i
status "$TNAME" || STATUS=1
done
return $STATUS
}
function testgroups {
typeset i STATUS=0 TNAME
NODE=${1:-$ODNODE}
shift
for i in $@
do
if [[ ${i##*@} == $i ]]; then
i=$i@$NODE
fi
TNAME="Testing group $i"
echo "[BEGIN] $TNAME"
testname -g $i
status "$TNAME" || STATUS=1
done
return $STATUS
}
function testdomain {
typeset STATUS=0
set_nfs4_domain $1 || return 1
testusers "$1" $XUSERS || STATUS=1
testgroups "$1" $XGROUPS || STATUS=1
testwellknowns || STATUS=1
return $STATUS
}
function Usage {
echo ${0##*/} [-h] [[-U user] ...] [[-G group] ...] [-D NFS4DOMAIN] [-d ODNODE]
exit 1
}
while getopts "hU:u:G:g:D:d:" opt
do
case $opt in
U) XUSERS="$USERS $OPTARG";;
G) XGROUPS="$GROUPS $OPTARG";;
D) NFS4DOMAIN=$OPTARG;;
d) ODNODE=$OPTARG;;
*) Usage;;
esac
done
XUSERS=${XUSERS:-$DF_USERS}
XGROUPS=${XGROUPS:-$DF_GROUPS}
shift $(($OPTIND-1))
if (( $# > 0)); then
Usage
fi
STATUS=0
testdomain || STATUS=1
if [[ -n $NFS4DOMAIN ]]; then
testdomain $NFS4DOMAIN || STATUS=1
fi
if [[ -n $NFS4_CURRENT_DOMAIN && $NFS4_CURRENT_DOMAIN != $NFS4DOMAIN ]]; then
testdomain $NFS4_CURRENT_DOMAIN || STATUS=1
fi
exit $STATUS
| true
|
676ca033c14adff7de07bcc46e9ca58886e90694
|
Shell
|
jvsr/TermStart
|
/includes/string.bash
|
UTF-8
| 182
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
STRINGERROR=11
strlen () {
if [ $# -ne 1 ]; then
echo -e "\nError: strlen(string s): Incorrect number of parameters"; exit $STRINGERROR
fi
echo -n ${#1} | wc -m
}
| true
|
fb68a89c96a6e467b471da71c50502183824b496
|
Shell
|
qinghuise/test
|
/on_line.sh
|
UTF-8
| 351
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
cd /data/
unzip $1 -d /data/test_mimidai
cd /data/test_mimidai/WEB-INF/classes
rm -f ./*
dir_num=`ls |wc -l`
if [ $dir_num -eq 2 ];then
cp /var/www/mimidai-cms/WEB-INF/classes/* ./
else:
exit 1
fi
mv /var/www/mimidai-cms /var/www/mimidai-cms-$(date +%Y%m%d-%H%M%S)
mv /data/test_mimidai /var/www/mimidai-cms
sdffsdfsdfsfs
| true
|
280e494207e34527ab398f9ad9cad33377feccda
|
Shell
|
istioinaction/book-source-code
|
/bin/query-catalog.sh
|
UTF-8
| 2,198
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
help () {
cat <<EOF
This script is a collection of request that are used in the book.
Below is a list of arguments and the requests that those make:
- 'get-items' Continuous requests that print the response status code
- 'random-agent' Adds either chrome or firefox in the request header.
Usage: ./bin/query-catalog.sh status-code
EOF
exit 1
}
TYPE=$1
case ${TYPE} in
get-items-cont)
echo "#" curl -s -H \"Host: catalog.istioinaction.io\" -w \"\\nStatus Code %{http_code}\" localhost/items
echo
sleep 2
while :
do
curl -s -H "Host: catalog.istioinaction.io" -w "\nStatus Code %{http_code}\n\n" localhost/items
sleep .5
done
;;
get-items)
echo "#" curl -s -H \"Host: catalog.istioinaction.io\" -w \"\\nStatus Code %{http_code}\" localhost/items
echo
curl -s -H "Host: catalog.istioinaction.io" -w "\nStatus Code %{http_code}" localhost/items
;;
random-agent)
echo "== REQUEST EXECUTED =="
echo curl -s -H "Host: catalog.istioinaction.io" -H "User-Agent: RANDOM_AGENT" -w "\nStatus Code %{http_code}\n\n" localhost/items
echo
while :
do
useragents=(chrome firefox)
agent=${useragents[ ($RANDOM % 2) ]}
curl -s -H "Host: catalog.istioinaction.io" -H "User-Agent: $agent" -w "\nStatus Code %{http_code}\n\n" localhost/items
sleep .5
done
;;
delayed-responses)
CATALOG_POD=$(kubectl get pods -l version=v2 -n istioinaction -o jsonpath={.items..metadata.name} | cut -d ' ' -f1)
if [ -z "$CATALOG_POD" ]; then
echo "No pods found with the following query:"
echo "-> kubectl get pods -l version=v2 -n istioinaction"
exit 1
fi
kubectl -n istioinaction exec -c catalog $CATALOG_POD \
-- curl -s -X POST -H "Content-Type: application/json" \
-d '{"active": true, "type": "latency", "latencyMs": 1000, "volatile": true}' \
localhost:3000/blowup
;;
*)
help
;;
esac
| true
|
1f34285e34e00cfa5781f8366a48fe700ead06eb
|
Shell
|
CommissarCletus/dotfiles
|
/shell/aliases
|
UTF-8
| 313
| 2.671875
| 3
|
[] |
no_license
|
# vim: set syntax=sh
# unfuck the ls
alias ls="ls --color=auto"
# Nicer man colors
man() {
LESS_TERMCAP_md=$'\e[01;36m' \
LESS_TERMCAP_me=$'\e[0m' \
LESS_TERMCAP_se=$'\e[0m' \
LESS_TERMCAP_so=$'\e[01;32m' \
LESS_TERMCAP_ue=$'\e[0m' \
LESS_TERMCAP_us=$'\e[01;26m' \
command man "$@"
}
| true
|
b6db430d86fcadfe6b2922459c9aa5651f16abb1
|
Shell
|
thunder/docker-thunder-performance
|
/build.sh
|
UTF-8
| 1,918
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
#
# Build thunder performance docker image
TAG_NAME=""
PROJECT_PATH=""
PROFILE="thunder"
THUNDER_TEST_GROUP="Thunder_Base_Set"
# Process script options
while [ -n "$1" ]; do
case "$1" in
--tag)
TAG_NAME="$2"
shift
;;
--project-path)
# Check if correct directory path is provided
if [ ! -d "$2" ]; then
echo "Provided project path is not a directory."
exit 1
fi
PROJECT_PATH="$2"
shift
;;
--profile)
PROFILE="$2"
shift
;;
--test-group)
THUNDER_TEST_GROUP="$2"
shift
;;
*) echo "Option $1 not recognized." ;;
esac
shift
done
SCRIPT_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# Copy Thunder project to Dockerfile context if project path is provided
if [ "${PROJECT_PATH}" != "" ]; then
rm -rf "${SCRIPT_DIRECTORY}/www"
cp -R "${PROJECT_PATH}" "${SCRIPT_DIRECTORY}/www"
fi
if [ "${PROFILE}" != "thunder" ]; then
ADDITIONAL_DEPS=("drush/drush" "thunder/thunder_performance_measurement" "thunder/testsite_builder")
# Compose project to ensure dependencies are correct.
cd "${SCRIPT_DIRECTORY}/www"
COMPOSER_MEMORY_LIMIT=-1 composer require "${ADDITIONAL_DEPS[@]}"
cd "${SCRIPT_DIRECTORY}"
fi
# Call composer install to fix the bin symlinks
# Note: do not use -d on composer as it can end up reverting changes.
cd "${SCRIPT_DIRECTORY}/www"
composer install
cd "${SCRIPT_DIRECTORY}"
# Remove all git info for smaller docker images.
find "${SCRIPT_DIRECTORY}/www" -type d -name ".git" -print0 | xargs -0 rm -rf
# Build docker image
docker build --build-arg PROFILE="${PROFILE}" --build-arg THUNDER_TEST_GROUP="${THUNDER_TEST_GROUP}" "${SCRIPT_DIRECTORY}" --tag "${TAG_NAME}"
| true
|
a843aba45f208c40f9e3072a03f537add21cbc2f
|
Shell
|
WMFO/wmfo-covid-broadcasting-script
|
/play_at_scheduled_time.sh
|
UTF-8
| 1,372
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -z ${1+x} ] || [ -z ${2+x} ] ; then
echo "Usage: $0 [track-to-play] [target-date]"
exit 1
fi
target_date=$2
echo "testing date validity:"
date -d "$target_date"
if [ $? -ne 0 ] ; then
echo error parsing date
exit 1
else
echo date successfully parsed!
fi
target_file=$1
if [ -f "$target_file" ] ; then
echo target_file exists! now to calculate sleep time...
else
echo "target_file doesn't exist :( exiting..."
exit 1
fi
filename=$(basename -- "$target_file")
extension="${filename##*.}"
if [ "$extension" == "mp3" ] || [ "$extension" == "wav" ] ; then
echo "I can play file with extension $extension"
else
echo "Cannot play $extension"
exit 1
fi
current_epoch=$(date +%s)
target_epoch=$(date -d "$target_date" +%s)
# First, calculate the number of seconds we should wait
sleep_seconds=$(( $target_epoch - $current_epoch ))
if [ $sleep_seconds > 0 ] ; then
echo Sleeping $sleep_seconds seconds until the scheduled show start...
sleep $sleep_seconds # Wait that number of seconds
fi
/opt/wmfo/macro_sh/axia_manipulate.sh 192.168.0.110 7 13500 > /dev/null # Switch to Studio C
if [ "$extension" == "mp3" ] ; then
/usr/bin/mpg123 $target_file
elif [ "$extension" == "wav" ] ; then
/usr/bin/aplay $target_file # Play a song
fi
/opt/wmfo/macro_sh/axia_manipulate.sh 192.168.0.110 7 10500 > /dev/null # Switch back to Studio A when done
exit 0
| true
|
2004b4701c246d8030b4666fc28cb49c3291ec31
|
Shell
|
staalmannen/hawaii-pkgbuilds
|
/hawaii-terminal-git/PKGBUILD
|
UTF-8
| 747
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
pkgname=hawaii-terminal-git
pkgver=0.0.0
pkgrel=1
pkgdesc="The Hawaii Terminal Emulator"
arch=('i686' 'x86_64')
url="http://www.maui-project.org"
license=('GPL2')
depends=('qt5-declarative' 'qt5-quickcontrols' 'yat-git')
makedepends=('git' 'cmake')
options=('debug')
_gitroot="git://github.com/mauios/hawaii-terminal.git"
_gitbranch=master
_gitname=terminal
source=(${_gitname}::${_gitroot}#branch=${_gitbranch})
md5sums=('SKIP')
pkgver() {
cd ${srcdir}/${_gitname}
git describe --always | sed 's|-|.|g'
}
prepare() {
mkdir -p build
}
build() {
cd build
cmake ../${_gitname} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=RelWithDebInfo
}
package() {
cd build
make DESTDIR="${pkgdir}" install
}
| true
|
a8dee2c4a8ebdfed5f7d5a1e58508cea61d84042
|
Shell
|
kaitai-io/kaitai_struct_tests
|
/ci-lua
|
UTF-8
| 517
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
. ./config
LUA_OUT_DIR="${TEST_OUT_DIR}/lua"
rm -rf "${LUA_OUT_DIR}"
mkdir -p "${LUA_OUT_DIR}"
export LUA_PATH="$LUA_PATH;$LUA_RUNTIME_DIR/?.lua;spec/lua/?.lua;spec/lua/extra/?.lua;compiled/lua/?.lua;;"
# Add `lua_install` dir to PATH, as this is where hererocks installs Lua at CI
export PATH=$PATH:$PWD/../lua_install/bin
lua spec/lua/run_test_suite.lua --output junit --name "${LUA_OUT_DIR}/report"
./kst-adoption-report lua
aggregate/convert_to_json lua "${LUA_OUT_DIR}" "${LUA_OUT_DIR}/ci.json"
| true
|
ec9bf69b4f5c2fad6f5bb30218af6c6bce042835
|
Shell
|
g0v/tw-rental-house-data
|
/crawler/gobg.sh
|
UTF-8
| 159
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
mkdir -p ../logs
setsid ./go.sh >> ../logs/`date +'%Y.%m.%d.%H%M'`.go.log 2>&1 &
| true
|
dcefd11df5b2c514bd29615dfd414219cc789760
|
Shell
|
njharman/dotfiles
|
/bin/g
|
UTF-8
| 230
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
sack__vim_shortcut=$(sed -n "$1p" < /home/njharman/.sack_shortcuts)
sack__line=`echo $sack__vim_shortcut | cut -d" " -f1`
sack__file=`echo $sack__vim_shortcut | sed 's/'$sack__line' //'`
vim +$sack__line "$sack__file"
| true
|
d399f209950e4b5101f4b76fd959c72be0ea82d6
|
Shell
|
gina-alaska/emodis_ndvi_python
|
/scripts/ver-for-docker/1yr_emodis_250_download_withauthoration_py.bash
|
UTF-8
| 517
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#downloads emodis data by wget
if [ $# != 2 ]; then
echo "Usage:1yr_emodis_250_download.bash dir_data year"
exit 1
fi
#url=http://dds.cr.usgs.gov/emodis/Alaska/historical/TERRA
url=$raw_data_url
dir_data=$1
year=$2
#check if raw data have already been downloaded"
if [ ! -f ${dir_data}/${year}/*.zip ]; then
mkdir -p ${dir_data}/${year}
cd ${dir_data}/$year
wget --user jiang@gina.alaska.edu --password Gina7Zhu -r -nd -np -nH --reject="index.html*" -A "*NDVI*QKM*.zip" $url/$year .
fi
exit 0
| true
|
7746da4a8faf4f9facd5a9964d09b23c954235d9
|
Shell
|
akalinkin/dotfiles
|
/.scripts/zenmode.sh
|
UTF-8
| 2,541
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# This script toggle Zen mode
STATUSFILE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.zenmodestatus"
usage="$(basename "$0") [-h] [-m n] -- toggle or enable/disable terminal ZenMode
where:
-h show this help text
-m set mode (0 - disabled, 1 - enabled)"
# Process args
MODE=-1
while getopts :hm: option; do
case "${option}" in
h) echo "$usage"; exit;;
m) MODE=${OPTARG};;
:) printf "missing argument for -%s\n" "$OPTARG" >&2
echo "$usage" >&2
exit 1
;;
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
echo "$usage" >&2
exit 1
;;
esac
done
# toggle FullScreen (require wmctrl)
toggle_full_screen() {
wmctrl -r ':ACTIVE:' -b toggle,fullscreen
}
# disable arrow keys
disable_arrow_kews() {
xmodmap -e "keycode 111 = "
xmodmap -e "keycode 113 = "
xmodmap -e "keycode 114 = "
xmodmap -e "keycode 116 = "
}
# enable arrow keys
enable_arrow_keys() {
xmodmap -e "keycode 111 = Up NoSymbol Up NoSymbol Up"
xmodmap -e "keycode 113 = Left NoSymbol Left NoSymbol Left"
xmodmap -e "keycode 114 = Right NoSymbol Right NoSymbol Right"
xmodmap -e "keycode 116 = Down NoSymbol Down NoSymbol Down"
}
disable_mouse() {
# xinput set-prop 10 "Device Enabled" 0 # Ubuntu 18.04
xinput set-prop 14 "Device Enabled" 0 # Debian Stretch
}
enable_mouse() {
xinput set-prop 14 "Device Enabled" 1
}
enable_mode() {
disable_arrow_kews
disable_mouse
toggle_full_screen
notify-send -u low -t 1500 -a DOTFILES_SCRIPT "PowerUser" "ZenMode <b>Enabled</b>
\r
\r<i>ArrowKeys:</i> disabled
\r<i>Mouse:</i> disabled
\r<i>Fullscreen:</i> enabled"
}
disable_mode() {
enable_arrow_keys
enable_mouse
toggle_full_screen
notify-send -u low -t 1500 -a DOTFILES_SCRIPT "PowerUser" "ZenMode <b>Disabled</b>
\r
\r<i>ArrowKeys:</i> enabled
\r<i>Mouse:</i> enabled
\r<i>Fullscreen:</i> disabled"
}
if [ "$MODE" = -1 ]; then
# TODO: Echo line below only in Verbose mode
# echo "ZenMode param was not passed. Try to get from saved state"
if [ ! -w "$STATUSFILE" ]; then
MODE=1
else
MODE=`cat $STATUSFILE`
# toggle mode
if [ "$MODE" = 1 ]; then MODE=0; else MODE=1;fi
fi
elif [ "$MODE" = 0 ] || [ "$MODE" = 1 ]; then
# it's alright the value in acceptable range
# TODO: Echo line below only in Verbose mode
echo "Correct -m option value"
else
echo "Incorrect -m option value"
echo "$usage" >&2
exit -1
fi
echo "$MODE" > $STATUSFILE
if [ "$MODE" = 0 ]; then disable_mode; fi
if [ "$MODE" = 1 ]; then enable_mode; fi
| true
|
01c79d3ab964e5f538d9dbff5b05983822c6c4a7
|
Shell
|
iangfernandes96/web-app
|
/scripts/restore
|
UTF-8
| 1,013
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
DB_NAME="outreach"
DB_PASS="root"
VERSION="v0.2.1"
BACKUP_FILE=$1
# Absolute path to the scripts directory
SCRIPTS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
DOC_FOLDER=$(cd ../src/static && pwd)
usage() {
echo "Usage:"
echo "$0 <path/to/the/backup/file.tar>"
}
if [ -z $BACKUP_FILE ]; then
echo "Error: No arguments were passed!!"
echo "Error: The path to the backup file must be passed."
usage
exit 1;
fi
# untar the $BACKUP_FILE file
tar -xvf $BACKUP_FILE
if [ $? -ne 0 ]; then
echo "Error: Something went wrong while untaring."
echo "Aborting restore."
exit 1;
fi
# stop the services before restoring..
#$SCRIPTS_DIR/stop
# restore from the dump folder
mysql -u root -p$DB_PASS $DB_NAME < $DB_NAME.sql
if [ $? -ne 0 ]; then
echo "Error: Something went wrong while restoring db dump."
echo "Aborting restore."
exit 1;
fi
# start back the services
$SCRIPTS_DIR/start
mv var/www/src/static/uploads /var/www/src/static
echo "Restore successful."
exit 0;
| true
|
eb0922be5c77f9140fdb9ae4f8a338cc6d7a0257
|
Shell
|
supahrods/urs-sftp
|
/new_edit/1-job/urs_d_LongDurationCalls2.sh
|
UTF-8
| 31,079
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------------
# Author : John Rodel Villa
# Date : March 7, 2019
# Version : 1.4
#
# Description : Processing of file entries according to cases
# Case1 : File entries with missing last aCR 'true,1,false'
# Case2 : File entries with missing initial aCR 'false,2,true'
# Case3 : File entries with missing middle aCR 'true,1,true'
# Case4 : File entries with a one line initial aCR only
# Case5 : File entries with a one line middle aCR only
# Case6 : File entries with a one line last aCR only
# Case7/possible_success: The call identifier of the entry is already present in one of the above/below cases (not counting error case)
# Case8/error_case : File entries where aCR is not part of the defined business cases
# Case9 : File entries with missing initial (false,2,true) and last (true,1,false) aCR
#
#----------------------------------------------------------
# Revision History:
#
# Version: 1.5
# Author: John Rodel Villa
# Date: April 26, 2019
# Description: Change wording of logs during merging process
#
# Version: 1.4
# Author: John Rodel Villa
# Date: March 7, 2019
# Description: Updated script to reflect URS phase 2 changes
#
# Version: 1.3
# Author: Joussyd M. Calupig
# Date: February 4, 2019
# Description: Updated path/directories and Headers
#----------------------------------------------------------
# Source configuration file for variables
source /appl/urpadm/conf/urs_d_LDC.conf;
# Logging start of LDC processing
echo "$(date "+%F %H:%M"): Processing of LDC files will start..." >> $LOG_DIR/$NAMING_CONVENTION2.log;
# Process Case1 (File entries with missing last aCR 'true,1,false')
echo "$(date "+%F %H:%M"): Case1 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case1 files
for i in $(ls $CASE1_DIR/ | grep .*.ftr$); do # Check each file in CASE1_DIR that has a file extension of .ftr
touch -m $CASE1_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE1_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
if [ $(stat -c %Y $CASE1_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $CALL_IDENTIFIER $POSSIBLE_SUCCESS; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "true,1,false"; then
for a in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $a $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE1_DIR/$i $SUCCESS_DIR;
fi;
fi;
elif [ $(stat -c %Y $CASE1_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case1. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE1_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE1_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE1_DIR/placeholder_$CALL_IDENTIFIER > $CASE1_DIR/$i;
rm $CASE1_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
sed -i "$ s/true,1,true,$(tail -n1 $CASE1_DIR/$i | cut -f31 -d,)/true,1,false,$(tail -n1 $CASE1_DIR/$i | cut -f31 -d,)/" $CASE1_DIR/$i;
for a in $(nl $CASE1_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE1_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE1_DIR/$i | cut -f26-31 -d,)/" $CASE1_DIR/$i;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE1_DIR/$i $SUCCESS_DIR;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case1 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case1 files
# Process Case2 (File entries with missing initial aCR 'false,2,true')
echo "$(date "+%F %H:%M"): Case2 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case2 files
for i in $(ls $CASE2_DIR/ | grep .*.ftr$); do # Check each file in CASE2_DIR that has a file extension of .ftr
touch -m $CASE2_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE2_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
if [ $(stat -c %Y $CASE2_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $CALL_IDENTIFIER $POSSIBLE_SUCCESS; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "false,2,true"; then
for a in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $a $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE2_DIR/$i $SUCCESS_DIR;
fi;
fi;
elif [ $(stat -c %Y $CASE2_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case2. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE2_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE2_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE2_DIR/placeholder_$CALL_IDENTIFIER > $CASE2_DIR/$i;
rm $CASE2_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
sed -i "1 s/true,1,true,$(head -n1 $CASE2_DIR/$i | cut -f31 -d,)/false,2,true,$(head -n1 $CASE2_DIR/$i | cut -f31 -d,)/" $CASE2_DIR/$i;
for a in $(nl $CASE2_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE2_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE2_DIR/$i | cut -f26-31 -d,)/" $CASE2_DIR/$i;
done
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE2_DIR/$i $SUCCESS_DIR;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case2 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case2 files
# Process Case3 (File entries with missing middle aCR 'true,1,true')
echo "$(date "+%F %H:%M"): Case3 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case3 files
for i in $(ls $CASE3_DIR/ | grep .*.ftr$); do # Check each file in CASE3_DIR that has a file extension of .ftr
touch -m $CASE3_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE3_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
M_NUM=$(head -n1 $CASE3_DIR/$i | cut -f25 -d,); # Save the first sequence number of the call
L_NUM=$(tail -n1 $CASE3_DIR/$i | cut -f25 -d,); # Save the last sequence number of the call
BETWEEN_SEQUENCE=$(eval "t=({$(($M_NUM+1))..$(($L_NUM-1))})"; echo ${t[*]}); # The supposed entries in between the first and last entries
if [ $(stat -c %Y $CASE3_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $CALL_IDENTIFIER $POSSIBLE_SUCCESS; then
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE3_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF | sort -t, -k25 -V >> $BETWEEN_DIR/urs_d_LongDurationCalls_placeholder_$CALL_IDENTIFIER;
sed -i '1 d' $BETWEEN_DIR/urs_d_LongDurationCalls_placeholder_$CALL_IDENTIFIER;
sed -i '$ d' $BETWEEN_DIR/urs_d_LongDurationCalls_placeholder_$CALL_IDENTIFIER;
for a in $BETWEEN_SEQUENCE; do
if cat $BETWEEN_DIR/urs_d_LongDurationCalls_placeholder_$CALL_IDENTIFIER | cut -f25 -d, | grep -q $a; then
CHECK_COUNTER=$(($CHECK_COUNTER+1));
fi;
done;
rm $BETWEEN_DIR/urs_d_LongDurationCalls_placeholder_$CALL_IDENTIFIER 2> /dev/null;
if [ $CHECK_COUNTER == $(echo $BETWEEN_SEQUENCE | tr " " "\n" | wc -l) ]; then
for b in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $b $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE3_DIR/$i $SUCCESS_DIR;
fi;
CHECK_COUNTER=0;
fi;
elif [ $(stat -c %Y $CASE3_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case3. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE3_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE3_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE3_DIR/placeholder_$CALL_IDENTIFIER > $CASE3_DIR/$i;
rm $CASE3_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
for a in $(nl $CASE3_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE3_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE3_DIR/$i | cut -f26-31 -d,)/" $CASE3_DIR/$i;
done
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE3_DIR/$i $SUCCESS_DIR;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case3 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case3 files
# Process Case4 (File entries with a one line initial aCR only)
echo "$(date "+%F %H:%M"): Case4 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case4 files
for i in $(ls $CASE4_DIR/ | grep .*.ftr$); do # Check each file in CASE4_DIR that has a file extension of .ftr
touch -m $CASE4_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE4_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
if [ $(stat -c %Y $CASE4_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $(head -n1 $CASE4_DIR/$i | cut -f31 -d,) $POSSIBLE_SUCCESS; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "true,1,false"; then
for a in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $a $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE4_DIR/$i $SUCCESS_DIR;
fi;
fi;
elif [ $(stat -c %Y $CASE4_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case4. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE4_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE4_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE4_DIR/placeholder_$CALL_IDENTIFIER > $CASE4_DIR/$i;
rm $CASE4_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
if [ $(cat $CASE4_DIR/$i 2> /dev/null | wc -l) == 1 ]; then
sed -i "$ s/false,2,true,$(tail -n1 $CASE4_DIR/$i | cut -f31 -d,)/false,0,false,$(tail -n1 $CASE4_DIR/$i | cut -f31 -d,)/" $CASE4_DIR/$i;
sed -i "$ s/$(tail -n1 $CASE4_DIR/$i | cut -f25-31 -d,)/,$(tail -n1 $CASE4_DIR/$i | cut -f26-31 -d,)/" $CASE4_DIR/$i;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE4_DIR/$i $SUCCESS_DIR;
else
sed -i "$ s/true,1,true,$(tail -n1 $CASE4_DIR/$i | cut -f31 -d,)/true,1,false,$(tail -n1 $CASE4_DIR/$i | cut -f31 -d,)/" $CASE4_DIR/$i;
for a in $(nl $CASE4_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE4_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE4_DIR/$i | cut -f26-31 -d,)/" $CASE4_DIR/$i;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE4_DIR/$i $SUCCESS_DIR;
fi;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case4 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case4 files
# Process Case5 (File entries with a one line middle aCR only)
echo "$(date "+%F %H:%M"): Case5 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case5 files
for i in $(ls $CASE5_DIR/ | grep .*.ftr$); do # Check each file in CASE5_DIR that has a file extension of .ftr
touch -m $CASE5_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE5_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
if [ $(stat -c %Y $CASE5_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $CALL_IDENTIFIER $POSSIBLE_SUCCESS; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "true,1,false"; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "false,2,true"; then
for a in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $a $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE5_DIR/$i $SUCCESS_DIR;
fi;
fi;
fi;
elif [ $(stat -c %Y $CASE5_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case5. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE5_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE5_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE5_DIR/placeholder_$CALL_IDENTIFIER > $CASE5_DIR/$i;
rm $CASE5_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
if [ $(cat $CASE5_DIR/$i 2> /dev/null | wc -l) == 1 ]; then
sed -i "$ s/true,1,true,$(tail -n1 $CASE5_DIR/$i | cut -f31 -d,)/false,0,false,$(tail -n1 $CASE5_DIR/$i | cut -f31 -d,)/" $CASE5_DIR/$i;
sed -i "$ s/$(tail -n1 $CASE5_DIR/$i | cut -f25-31 -d,)/,$(tail -n1 $CASE5_DIR/$i | cut -f26-31 -d,)/" $CASE5_DIR/$i;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE5_DIR/$i $SUCCESS_DIR;
else
sed -i "1 s/true,1,true,$(head -n1 $CASE5_DIR/$i | cut -f31 -d,)/false,2,true,$(head -n1 $CASE5_DIR/$i | cut -f31 -d,)/" $CASE5_DIR/$i;
for a in $(nl $CASE5_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE5_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE5_DIR/$i | cut -f26-31 -d,)/" $CASE5_DIR/$i;
done
sed -i "$ s/true,1,true,$(head -n1 $CASE5_DIR/$i | cut -f31 -d,)/true,1,false,$(head -n1 $CASE5_DIR/$i | cut -f31 -d,)/" $CASE5_DIR/$i;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE5_DIR/$i $SUCCESS_DIR;
fi;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case5 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case5 files
# Process Case6 (File entries with a one line last aCR only)
echo "$(date "+%F %H:%M"): Case6 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case6 files
for i in $(ls $CASE6_DIR/ | grep .*.ftr$); do # Check each file in CASE6_DIR that has a file extension of .ftr
touch -m $CASE6_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE6_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
if [ $(stat -c %Y $CASE6_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $CALL_IDENTIFIER $POSSIBLE_SUCCESS; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "false,2,true"; then
for a in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $a $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE6_DIR/$i $SUCCESS_DIR;
fi;
fi;
elif [ $(stat -c %Y $CASE6_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case6. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE6_DIR | cut -f1 -d: | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE6_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE6_DIR/placeholder_$CALL_IDENTIFIER > $CASE6_DIR/$i;
rm $CASE6_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
if [ $(cat $CASE6_DIR/$i 2> /dev/null | wc -l) == 1 ]; then
sed -i "$ s/true,1,false,$(tail -n1 $CASE6_DIR/$i | cut -f31 -d,)/false,0,false,$(tail -n1 $CASE6_DIR/$i | cut -f31 -d,)/" $CASE6_DIR/$i;
sed -i "$ s/$(tail -n1 $CASE6_DIR/$i | cut -f25-31 -d,)/,$(tail -n1 $CASE6_DIR/$i | cut -f26-31 -d,)/" $CASE6_DIR/$i;
sed -i "/$(head -n1 $CASE6_DIR/$i | cut -f31 -d,)/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE6_DIR/$i $SUCCESS_DIR;
else
sed -i "1 s/true,1,true,$CALL_IDENTIFIER/false,2,true,$CALL_IDENTIFIER/" $CASE6_DIR/$i;
for a in $(nl $CASE6_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE6_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE6_DIR/$i | cut -f26-31 -d,)/" $CASE6_DIR/$i;
done
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE6_DIR/$i $SUCCESS_DIR;
fi;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case6 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case6 files
# Process Case9 (File entries with missing initial (false,2,true) and last (true,1,false) aCR)
echo "$(date "+%F %H:%M"): Case9 processing has started..." >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case9 files
for i in $(ls $CASE9_DIR/ | grep .*.ftr$); do # Check each file in CASE9_DIR that has a file extension of .ftr
touch -m $CASE9_DIR/$i; # Update file timestamp
CALL_IDENTIFIER=$(head -n1 $CASE9_DIR/$i 2> /dev/null | cut -f31 -d,); # Save the call identifier in a variable
if [ $(stat -c %Y $CASE9_DIR/$i) -lt $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then # Check if file has reached aging time
# If aging is not yet reached, the block below will check if call has complete file entries and move every file to the SUCCESS_DIR
if grep -qr $CALL_IDENTIFIER $POSSIBLE_SUCCESS; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "true,1,false"; then
if grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f28-30 -d, | grep -q "false,2,true"; then
for a in $(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq); do
mv $a $SUCCESS_DIR;
done;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE9_DIR/$i $SUCCESS_DIR;
fi;
fi;
fi;
elif [ $(stat -c %Y $CASE9_DIR/$i) -ge $(cat $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt 2> /dev/null | grep $i | cut -f3 -d" ") ]; then
# If aging has been reached, the block below will try to consolidated entries for the same identifier, sort them,
# and do the editing for Case9. Move the edited file to SUCCESS_DIR
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS $CASE9_DIR | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs cat 2> /dev/null | awk NF >> $CASE9_DIR/placeholder_$CALL_IDENTIFIER;
sort -t, -k25 -V $CASE9_DIR/placeholder_$CALL_IDENTIFIER > $CASE9_DIR/$i;
rm $CASE9_DIR/placeholder_$CALL_IDENTIFIER 2> /dev/null;
sed -i "1 s/true,1,true,$(head -n1 $CASE9_DIR/$i | cut -f31 -d,)/false,2,true,$(head -n1 $CASE9_DIR/$i | cut -f31 -d,)/" $CASE9_DIR/$i;
for a in $(nl $CASE9_DIR/$i | cut -c6); do
sed -i "$a s/$(sed -n ${a}p $CASE9_DIR/$i | cut -f25-31 -d,)/$a,$(sed -n ${a}p $CASE9_DIR/$i | cut -f26-31 -d,)/" $CASE9_DIR/$i;
done
sed -i "$ s/true,1,true,$(head -n1 $CASE9_DIR/$i | cut -f31 -d,)/true,1,false,$(head -n1 $CASE9_DIR/$i | cut -f31 -d,)/" $CASE9_DIR/$i;
sed -i "/$CALL_IDENTIFIER/d" $TSTAMP_DIR/urs_d_LongDurationCalls_call_aging_tstamp.txt;
mv $CASE9_DIR/$i $SUCCESS_DIR;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "The following files' contents have been merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs echo >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "Total number of records: $(cat $SUCCESS_DIR/$i 2> /dev/null | wc -l)" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "$(grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | wc -l) file/s merged to $SUCCESS_DIR/$i" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
echo "-------------------------------------------------------------------------------" >> $LOG_DIR/${NAMING_CONVENTION2}.log;
grep -r $CALL_IDENTIFIER $POSSIBLE_SUCCESS | cut -f1 -d: | uniq | xargs readlink -f 2> /dev/null | xargs -I {} mv {} $MERGE_DIR 2> /dev/null;
fi;
done;
echo "$(date "+%F %H:%M"): Case9 processing has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log; # Logging for processing of Case9 files
# Logging end of LDC processing
echo "$(date "+%F %H:%M"): Processing of LDC files has finished" >> $LOG_DIR/$NAMING_CONVENTION2.log;
# EOF
| true
|
4a6577eb328c1c73b2e9c3f7c5ad00ade4b6acdd
|
Shell
|
water-rev/hk5demandsapi
|
/docker/development/build.sh
|
UTF-8
| 501
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
DOCKER_FILE=./docker/development/Dockerfile
PROJECT_NAME=hk5demandsapi-dev
LAST_COMMIT=$(git rev-parse --short HEAD 2> /dev/null | sed "s/\(.*\)/\1/")
IMAGE_TAG=${PROJECT_NAME}:${LAST_COMMIT}
echo "Docker Build Push Details"
echo "Image Tag: ${IMAGE_TAG}"
echo "Docker File: ${DOCKER_FILE}"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "Deploying..."
cd ../..
docker build --rm -f ${DOCKER_FILE} -t ${IMAGE_TAG} .
else
echo "Deployment Cancelled"
fi
| true
|
b261e574ebd0b5ca8294ff280b6a4da973558d09
|
Shell
|
arvl130/kiss32-uclibc-repo
|
/core/uclibc/build
|
UTF-8
| 149
| 2.640625
| 3
|
[] |
no_license
|
#! /bin/sh -e
for patch in *.patch; do
patch -p1 < "$patch"
done
make all
make DESTDIR="$1" install
mv "$1/lib"/* "$1/usr/lib"
rmdir "$1/lib"
| true
|
a648cf394889a85867ae19afc60f3b89a830160b
|
Shell
|
FQNet/QUTAGDAQ
|
/RunFQNETDAQ.sh
|
UTF-8
| 1,121
| 2.78125
| 3
|
[] |
no_license
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/QUTAG-LX64-V1.1.6
#####################################################################
#Configuration Parameters
#####################################################################
#Sets the coincidence window in [ps]. If you want the program to
#calculate the appropriate coincidence window automatically, set
#it to -1
CoincidenceWindow=30000
#Don't change this
MasterRate=1
#Number of [us] for each readout cycle.
#This number times the signal rate should be below the
#max buffer size, or else you will lose data
CollectTime=10000
#Total time that you want to collect data for in [s]
DAQTotalTime=10
#####################################################################
#Internal Calculations
#####################################################################
#Calculate number of readout cycles to take data for DAQTotalTime
CollectRounds=$(($DAQTotalTime*1000000/$CollectTime))
#echo $CoincidenceWindow $MasterRate $CollectTime $CollectRounds
~/QUTAG-LX64-V1.1.6/userlib/QUTAGDAQ/FQNETDAQ signal $CoincidenceWindow $MasterRate $CollectTime $CollectRounds
| true
|
d04f1cf28180955e8ddc67ad07220b9e120285e2
|
Shell
|
reginaldojunior/XU3EM
|
/Tools/check_temp.sh
|
UTF-8
| 1,693
| 3.46875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
echo -e "timestamp \tVIRTUAL_SCALING \tCPU0_FREQ \tCPU1_FREQ \tCPU2_FREQ \tCPU3_FREQ \tCPU4_FREQ \tCPU5_FREQ \tCPU6_FREQ \tCPU7_FREQ \tGPU_FREQ \tCPU_GOVERNOR \tCPU0_TEMP \tCPU1_TEMP \tCPU2_TEMP \tCPU3_TEMP \tGPU_TEMP"
# Main infinite loop
while true; do
# CPU Governor
CPU_GOVERNOR=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor`
#----scaling frequency and virtual temp -----
#scaling freq
for i in {0..7}
do
CPU0_FREQ[i]=$((`cat /sys/devices/system/cpu/cpu$i/cpufreq/scaling_cur_freq`/1000))
done
#virtual temp
for i in {0..4}
do
TEMP[i]=$((`cat /sys/devices/virtual/thermal/thermal_zone$i/temp`/1000))
if (( ${TEMP[i]} > 100000 )); then
kill $2
fi
done
GPU_FREQ=$((`cat /sys/bus/platform/drivers/mali/11800000.mali/devfreq/devfreq0/cur_freq`/1000000))
echo -e "$(date +'%s%N') \t1 \t${CPU0_FREQ[0]} \t${CPU0_FREQ[1]} \t${CPU0_FREQ[2]} \t${CPU0_FREQ[3]} \t${CPU0_FREQ[4]} \t${CPU0_FREQ[5]} \t${CPU0_FREQ[6]} \t${CPU0_FREQ[7]} \t$GPU_FREQ \t$CPU_GOVERNOR \t${TEMP[0]} \t${TEMP[1]} \t${TEMP[2]} \t${TEMP[3]} \t${TEMP[4]}"
#----hardware frequency and class temp -----
#cpuinfo freq
for i in {0..7}
do
CPU0_FREQ[i]=$((`cat /sys/devices/system/cpu/cpu$i/cpufreq/cpuinfo_cur_freq`/1000))
done
#class temp
for i in {0..4}
do
TEMP[i]=$((`cat /sys/class/thermal/thermal_zone$i/temp`/1000))
if (( ${TEMP[i]} > 100000 )); then
kill $2
fi
done
echo -e "$(date +'%s%N') \t0 \t${CPU0_FREQ[0]} \t${CPU0_FREQ[1]} \t${CPU0_FREQ[2]} \t${CPU0_FREQ[3]} \t${CPU0_FREQ[4]} \t${CPU0_FREQ[5]} \t${CPU0_FREQ[6]} \t${CPU0_FREQ[7]} \t$GPU_FREQ \t$CPU_GOVERNOR \t${TEMP[0]} \t${TEMP[1]} \t${TEMP[2]} \t${TEMP[3]} \t${TEMP[4]}"
sleep $1
done
| true
|
ab54ebd8c488eb1573ffb0ad4c7b731e287548ac
|
Shell
|
jskeates/coder-addons
|
/install-scripts/kubeadm
|
UTF-8
| 399
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f "/usr/local/bin/kubeadm" ]
then
echo "kubeadm is not installed. Installing now... (if prompted, enter password for sudo)"
sudo curl -Lo /usr/local/bin/kubeadm https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubeadm
sudo chmod +x /usr/local/bin/kubeadm
exec /usr/local/bin/kubeadm $@
else
exec /usr/local/bin/kubeadm $@
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.