blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4a913e028c5271cbd7c6d6e9e517d9fec4d5f9b3
|
Shell
|
ltgoslo/finegrained_modelling
|
/data/process_data.sh
|
UTF-8
| 2,631
| 2.765625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# Create new folder where you will keep all processed data
mkdir processed
cd processing_scripts
# Process mpqa data
python3 process_mpqa.py
# Process darmstadt data
cd ..
unzip DarmstadtServiceReviewCorpus.zip
cd DarmstadtServiceReviewCorpus
unzip services
unzip universities
grep -rl "&" universities/basedata | xargs sed -i 's/&/and/g'
cd ..
cd processing_scripts
python3 process_darmstadt.py
# Process semeval data
python3 process_semeval.py
# Process sentihood
python3 process_sentihood.py
# Process mitchell et al
cd ..
mkdir mitchell
tar -xvf MitchellEtAl-13-OpenSentiment.tgz -C mitchell
grep -Prl "TELEPHONE\tNUMBER" mitchell/en/10-fold/* | xargs sed -iE 's/TELEPHONE\tNUMBER/TELEPHONE-NUMBER/g'
cd processing_scripts
python3 process_mitchell.py
# Process wang, et al.
cd ..
mkdir wangetal
unzip wangetal.zip -d wangetal
cd wangetal
tar -xvf annotations.tar.gz
tar -xvf tweets.tar.gz
cd ../processing_scripts
python3 process_wang.py
# Process Jiang et al.
python3 process_jiang.py
cd ..
# Create all of the conll datasets
for corpus in processed/darmstadt/services processed/darmstadt/universities processed/jiang processed/mitchell processed/mpqa processed/semeval/Laptop processed/semeval/Restaurant processed/wang; do
# TARGETS
python3 convert_to_bio.py --indir $corpus --outdir $corpus/targets --to_add Target --no_polarity
python3 convert_to_bio.py --indir $corpus --outdir $corpus/targets_polarity --to_add Target
# HOLDERS
python3 convert_to_bio.py --indir $corpus --outdir $corpus/holders --to_add Source --no_polarity
python3 convert_to_bio.py --indir $corpus --outdir $corpus/holders_polarity --to_add Source
# EXPRESSIONS
python3 convert_to_bio.py --indir $corpus --outdir $corpus/expressions --to_add Polar_expression --no_polarity
python3 convert_to_bio.py --indir $corpus --outdir $corpus/expressions_polarity --to_add Polar_expresion
# TARGETS AND EXPRESSIONS
python3 convert_to_bio.py --indir $corpus --outdir $corpus/targets_expressions --to_add Target Polar_expression --no_polarity
python3 convert_to_bio.py --indir $corpus --outdir $corpus/targets_expressions_polarity --to_add Target Polar_expresion
# TARGETS AND HOLDERS
python3 convert_to_bio.py --indir $corpus --outdir $corpus/targets_holders --to_add Target Source --no_polarity
python3 convert_to_bio.py --indir $corpus --outdir $corpus/targets_holders_polarity --to_add Target Source
# FULL
python3 convert_to_bio.py --indir $corpus --outdir $corpus/full --no_polarity
python3 convert_to_bio.py --indir $corpus --outdir $corpus/full_polarity
done
| true
|
776cc381b17b8c1dd77ce24702da297947dade3e
|
Shell
|
mateusvale/bash_script
|
/script-movimentacao-arquivos/script-criacao-arquivos.sh
|
UTF-8
| 240
| 2.65625
| 3
|
[] |
no_license
|
movimentacao="/home/mateus/fabricas-de-scripts/script-movimentacao-arquivos/movimentacao";
num_arquivos=20;
for ((a=1; a <= num_arquivos ; a++ ));
do
arquivo="$movimentacao/teste$a.txt"
touch $arquivo
chmod 774 $arquivo
done
| true
|
0acb9adc1ab741a21341e1e2e5a8822128ab8ec2
|
Shell
|
Shaptic/dotfiles
|
/acpi/yoga-brightness.sh
|
UTF-8
| 621
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
BR_DIR="/sys/devices/pci0000:00/0000:00:02.0/drm/card0/card0-eDP-1/intel_backlight/"
test -d "$BR_DIR" || exit 0
MIN=0
MAX=$(cat "$BR_DIR/max_brightness")
VAL=$(cat "$BR_DIR/brightness")
if [ "$1" = down ]; then
VAL=$((VAL-41))
else
VAL=$((VAL+41))
fi
if [ "$VAL" -lt $MIN ]; then
VAL=$MIN
elif [ "$VAL" -gt $MAX ]; then
VAL=$MAX
fi
PERCENT=`echo "$VAL / $MAX" | bc -l`
export XAUTHORITY=/home/m0bius/.Xauthority
export DISPLAY=:0.0
echo "xrandr --output eDP1 --brightness $PERCENT" > /tmp/yoga-brightness.log
xrandr --output eDP1 --brightness $PERCENT
echo $VAL > "$BR_DIR/brightness"
| true
|
bc15b8767a1ab6563c322c69f1692f700f690f4e
|
Shell
|
rmap-project/rmap-shell-installer
|
/rmap-installer/install_rmap.sh
|
UTF-8
| 8,069
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script installs an RMap server.
# The account used to run the script must have sudo privileges.
# Only include this file once
[[ -n "$RMAP_RMAP_INCLUDED" ]] && return
RMAP_RMAP_INCLUDED=true
# install_common.sh and other initialization is performed in install_tomcat.sh
source install_tomcat.sh
print_bold_white "Installing RMap:"
################################################################################
# Perl modules and functions
ensure_installed cpanminus
# Install a Perl package if it is not yet installed.
# The name of the package is the only parameter.
function ensure_perl_installed
{
NAME=$1
[[ `perl -e "use $NAME;" 2>/dev/null` ]] &&
{
print_green "Installing '$NAME'..."
cpanm $NAME &>> $LOGFILE \
|| abort "Could not install $NAME"
}
}
ensure_perl_installed YAML
ensure_perl_installed Getopt::Long
ensure_perl_installed ExtUtils::MakeMaker
ensure_perl_installed Text::ParseWords
ensure_perl_installed Fcntl
ensure_installed epel-release
ensure_installed perl-BerkeleyDB
################################################################################
# Set up NOID ID Minter
# Determine install type:
# There is either no previous installation (NEW),
# or there is a previous version that must be replaced (UPGRADE),
# or the current versios is already installed (NONE).
# Assume there can only be one previous version.
installed_version=`find /rmap -maxdepth 1 -name Noid-* -not -name *.back`
if [[ $installed_version == "" ]]; then
print_green "Will perform initial NOID installation."
INSTALL_TYPE=NEW
elif [[ $installed_version != $NOID_PATH ]]; then
print_green "Will upgrade the NOID installation."
INSTALL_TYPE=UPGRADE
else
print_green "NOID installation is up to date."
INSTALL_TYPE=NONE
fi
if [[ $INSTALL_TYPE != "NONE" ]]; then
# For upgrades, save the current NOID folder as a backup
if [[ $INSTALL_TYPE == "UPGRADE" ]]; then
print_green "Backing up NOID data..."
BACKUP_PATH=$installed_version.back
if [[ -d $BACKUP_PATH ]]; then
remove $BACKUP_PATH
fi
mv $installed_version $BACKUP_PATH &>> $LOGFILE \
|| abort "Could not rename NOID folder"
fi
# Download NOID distribution file
if [[ -f $NOID_ZIP ]]; then
remove $NOID_ZIP
fi
print_green "Downloading NOID..."
wget --no-verbose $NOID_URI > /dev/null 2>> $LOGFILE \
|| abort "Could not download NOID"
# Unzip, tweak and install NOID distribution
print_green "Unzipping NOID..."
tar -xf $NOID_ZIP -C $PARENT_DIR &>> $LOGFILE \
|| abort "Could not unzip NOID"
# Install NOID for perl
cp $NOID_PATH/lib/Noid.pm /usr/share/perl5 &>> $LOGFILE \
|| abort "Could not install NOID perl library"
remove $NOID_ZIP
# Remove the "tainted" flag from the NOID script
sed "s,-Tw,-w," < $NOID_PATH/noid > $NOID_PATH/noid.fixed 2>> $LOGFILE \
|| abort "Could not modify NOID script"
mv $NOID_PATH/noid.fixed $NOID_PATH/noid &>> $LOGFILE \
|| abort "Could not replace modified NOID script"
chmod 775 $NOID_PATH/noid &>> $LOGFILE \
|| abort "Could not change permissions on NOID script"
# Restore or create a NOID ID minter database, then test it
# If update install, move saved folders back
if [[ $INSTALL_TYPE == "UPGRADE" && -d $BACKUP_PATH/noiddb ]]; then
print_green "Restoring NOID database..."
cp -r $BACKUP_PATH/noiddb $NOID_PATH &>> $LOGFILE \
|| abort "Could not restore NOID database"
else # install type is NEW
print_green "Creating NOID database..."
mkdir $NOID_PATH/noiddb &>> $LOGFILE \
|| abort "Could not create NOID database folder"
pushd $NOID_PATH/noiddb &>> $LOGFILE
perl $NOID_PATH/noid dbcreate .reeeeeeeeek &>> $LOGFILE \
|| abort "Could not create NOID database"
popd &>> $LOGFILE
fi
[[ `perl $NOID_PATH/noid -f $NOID_PATH/noiddb mint 1 2>/dev/null` != "id:*" ]] \
|| abort "NOID minter failed initial test"
# Create the NOID web service
# TODO - Need to also do this when Tomcat is deleted and reinstalled.
print_green "Creating NOID web service..."
pushd $TOMCAT_PATH/webapps &>> $LOGFILE
wget https://github.com/rmap-project/rmap/releases/download/v1.0.0-beta/noid.war \
>> $LOGFILE 2>/dev/null \
|| abort "Could not download NOID web app"
set_owner_and_group noid.war
popd &>> $LOGFILE
sed "s,NOIDPATH,$NOID_PATH,g" < noid.sh > $NOID_PATH/noid.sh 2>> $LOGFILE \
|| abort "Could not install NOID script"
chmod 775 $NOID_PATH/noid.sh &>> $LOGFILE \
|| abort "Could not change permissions on NOID script"
# Update ownership of all NOID files
set_owner_and_group $NOID_PATH
fi
################################################################################
# RMap Configuration
print_green "Configuring RMap API and Account Manager web apps..."
# Make sure there is a properties folder
if [[ ! -d $RMAP_PROPS_FOLDER ]]; then
mkdir $RMAP_PROPS_FOLDER &>> $LOGFILE \
|| abort "Could not create RMap properties folder"
fi
# TODO - If MARIADB or GRAPHDB domain names are same as TOMCAT's, use "localhost"?
sed " \
s,RMAPSERVERURL,$TOMCAT_DOMAIN_NAME,; \
s,MARIADBSERVERURL,$MARIADB_DOMAIN_NAME,; \
s,MARIADBDBNAME,$MARIADB_DBNAME,; \
s,MARIADBUSER,$MARIADB_USER,; \
s,MARIADBPASSWORD,$MARIADB_PASSWORD,; \
s,GRAPHDBSERVERURL,$GRAPHDB_DOMAIN_NAME,; \
s,GRAPHDBDBNAME,$GRAPHDB_DBNAME,; \
s,GRAPHDBUSER,$GRAPHDB_USER,; \
s,GRAPHDBPASSWORD,$GRAPHDB_PASSWORD,; \
s,GOOGLEOAUTHKEY,$GOOGLE_OAUTH_KEY,; \
s,GOOGLEOAUTHSECRET,$GOOGLE_OAUTH_SECRET,; \
" \
< $RMAP_PROPS_FILE > $RMAP_PROPS_FOLDER/$RMAP_PROPS_FILE 2>> $LOGFILE \
|| abort "Could not create RMap configuration file"
set_owner_and_group $RMAP_PROPS_FOLDER
################################################################################
# RMap API
# TODO - Read version property from API POM file (if it exists):
# /rmap/apache*/webapps/api/META_INF/maven/info.rmapproject/rmap-api/pom.properties
# Compare it to value in $RMAP_API_VERSION
# Install if file doesn't exist or if version is different.
print_green "Downloading RMap API web app..."
wget --no-verbose $RMAP_API_URI -O api.war 2>> $LOGFILE \
|| abort "Could not download RMap API web app"
print_green "Installing RMap API web app..."
mv api.war $TOMCAT_PATH/webapps &>> $LOGFILE \
|| abort "Could not install RMap API web app"
# Wait for WAR file to be processed and "api" folder to be created
print_yellow_noeol "Waiting for the RMap API (this can take several seconds)"
wait_for_url "http://$TOMCAT_DOMAIN_NAME/api/discos"
################################################################################
# RMap Account Manager
print_green "Downloading RMap Account Manager web app..."
wget --no-verbose $RMAP_APP_URI -O app.war 2>> $LOGFILE \
|| abort "Could not download RMap Account Manager web app"
print_green "Installing RMap Account Manager web app..."
mv app.war $TOMCAT_PATH/webapps/ROOT.war &>> $LOGFILE \
|| abort "Could not install RMap Account Manager web app"
# Wait for WAR file to be processed and "app" folder to be created
print_yellow_noeol "Waiting for the RMap Account Manager (this can take several seconds)"
wait_for_url "http://$TOMCAT_DOMAIN_NAME"
################################################################################
# Finalization
# Restart Tomcat so it reflects these changes
print_green "Restarting Tomcat..."
systemctl daemon-reload &>> $LOGFILE \
|| abort "Could not refresh services list"
systemctl stop tomcat &>> $LOGFILE \
|| abort "Could not stop Tomcat service"
systemctl start tomcat &>> $LOGFILE \
|| abort "Could not start Tomcat server"
if [[ -z $IS_UPGRADE ]]; then
print_bold_white "Done installing RMap!"
else
print_bold_white "Done upgrading RMap!"
fi
print_white "" # A blank line
| true
|
102676a7e908b1448421f21bc655c852d558a26d
|
Shell
|
inodb/scripts
|
/monitorMem.sh
|
UTF-8
| 314
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# Serial version writes snapshots to $OUT.$i
OUT=$1
if [ ! $OUT ]; then
OUT=/dev/stdout
fi
i=0
for i in `seq 1 40`; do
ip=`printf "%02d" $i`
free -m > $OUT.$ip
echo "==========================================================" >> $OUT.$ip
top -b -n 2 >> $OUT.$ip
sleep 1
done
| true
|
a312faf6e77c33abe5a0c693d620cd1f89b5a2fb
|
Shell
|
drequena/psapt
|
/psapt/bin/special_commands/capture.sh
|
UTF-8
| 1,488
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# NAO EXECUTE ESSE SCRIPT MANUALMENTE!
# Recebe cenario e link a ser capturado.
PSAPT_DIR="/opt/psapt"
CEN_DIR="$PSAPT_DIR/cenarios"
LOG_FILE="log"
TCPDUMP="/usr/sbin/tcpdump"
CAPTURE_DIR="/tmp/pub/psatp"
DATE="/usr/bin/date"
JOBS="/usr/bin/jobs"
arg_num=$#
if [ $arg_num -lt 2 ] || [ $arg_num -gt 2 ]; then
echo "Nao execute esse script via linha de comando!"
exit 15
fi
cen_to_operate=$1
link_capture=$2
cenario="$CEN_DIR/$cen_to_operate"
run_dir="$cenario/run"
echo "capture.sh: Checando existencia do diretorio $CAPTURE_DIR" >> $run_dir/$LOG_FILE
if [ ! -d $CAPTURE_DIR ]; then
echo "$CAPTURE_DIR nao existe, criando..." >> $run_dir/$LOG_FILE
mkdir -p $CAPTURE_DIR
fi
echo "Criando arquivo $link_capture.run" >> $run_dir/$LOG_FILE
touch $run_dir/$link_capture".run"
capture_prefix=$($DATE +%m-%d-%y-%H%M%S)
capture_prefix="$capture_prefix.cap"
CAPTURE_FILE_NAME="$CAPTURE_DIR/$link_capture-$capture_prefix"
$TCPDUMP -i $link_capture -n -s 0 -w $CAPTURE_FILE_NAME &
if [ $? -ne 0 ]; then
echo "Erro ao iniciar captura do link $link_capture" >> $run_dir/$LOG_FILE
echo "Erro ao iniciar captura do link $link_capture"
echo "Apagando arquivo $link_capture.run" >> $run_dir/$LOG_FILE
rm $run_dir/$link_capture".run"
exit 0
fi
tcpdump_pid=$!
echo "Processo de captura do link $link_capture iniciado com sucesso. Executando sob pid: $tcpdump_pid" >> $run_dir/$LOG_FILE
echo $tcpdump_pid > $run_dir/$link_capture".run"
exit 1
| true
|
988add0f8e78f559a32f930f414315a896729159
|
Shell
|
FlorianHeigl/nagios
|
/check_mk/openssl_version/plugins/openssl_version
|
UTF-8
| 103
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
if which openssl >/dev/null ; then
echo "<<<openssl_version>>>"
openssl version
fi
| true
|
17a71b9a65b891d130e4c3b31212675a0b8d59d7
|
Shell
|
LARG/spl-release
|
/data/scripts/shorthand.sh
|
UTF-8
| 1,477
| 3.6875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#/bin/sh
if [ $1 = "start" ]; then
~/bin/villa-start.sh
elif [ $1 = "stop" ]; then
~/bin/villa-stop.sh
elif [ $1 = "restart" ]; then
~/bin/villa-stop.sh
sleep 2
~/bin/villa-start.sh
elif [ $1 = "status" ]; then
systemctl --user status ut_motion
systemctl --user status ut_vision
elif [ $1 = "connect" ]; then
WIFI_NAME=$( connmanctl services | grep -o "$2 .*$" | grep -o 'wifi\w*managed_\(none\|psk\)$' )
if [ -z "$WIFI_NAME" ]; then
echo "SSID not found. Check spelling. Try 'connmanctl scan wifi'?"
else
ROBOT_ID=$( cat /home/nao/data/config.yaml | grep -o "robot_id:.*$" | grep -o "[0-9]*" )
echo "Connecting to $WIFI_NAME"
sudo mkdir -p /var/lib/connman/$WIFI_NAME
sudo printf "[$WIFI_NAME]\nType=wifi\nName=$2\nSecurity=wpa\nPassphrase=Nao?!Nao?!\nIPv4=10.0.1.$ROBOT_ID/255.255.0.0\nIPv6=off\n" | sudo tee /var/lib/connman/$WIFI_NAME/settings
sudo connmanctl connect $WIFI_NAME
# Retry config just in case it's not set
sudo connmanctl config $WIFI_NAME --ipv4 manual 10.0.1.$ROBOT_ID 255.255.0.0
sudo connmanctl config $WIFI_NAME autoconnect on
fi
elif [ $1 = "disconnect" ]; then
WIFI_NAME=$( connmanctl services | grep -o "$2.*$" | grep -o 'wifi\w*managed_\(none\|psk\)' )
sudo connmanctl config $WIFI_NAME autoconnect off
echo "Attempting disconnect $WIFI_NAME"
sudo connmanctl disconnect $WIFI_NAME
else
echo "Invalid command"
fi
| true
|
6099b3a6708dd7191addb3805633d3618cc5d5c0
|
Shell
|
mrodriguez3313/True-Citizen-Science
|
/src_scripts/autoUpload.sh
|
UTF-8
| 407
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "initializing deamon to monitor and upload files to ipfs created in filesdump dir."
inotifywait -m ~/Documents/InsightDC/filesdump -e create -e moved_to |
while read path action file; do
echo "The file '$file' appeared in directory '$path' via '$action'"
ipfs add ~/Documents/InsightDC/filesdump/$file | awk '{split($0,a); print a[2]}' | ipfs pubsub pub Hashes
done
| true
|
74d46484b43e11d0dad3d97c3f4ec9f5d3415286
|
Shell
|
jasonkeene/atomics-talk
|
/src/benchmarks/run_benchmarks.sh
|
UTF-8
| 587
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
run_benchmarks() {
echo ====================================================
echo GOMAXPROCS=$GOMAXPROCS
echo GOROUTINES=$GOROUTINES
for file in $(ls **/*.go); do
base=$(echo $file | cut -f1 -d.)
echo running $base
time ./$base
echo
done
}
main() {
echo building binaries
./build.sh
echo
for i in 1 2 4 8 16 24; do
export GOMAXPROCS=$i
for j in 2 4 8 16 24; do
export GOROUTINES=$j
run_benchmarks
done
done
echo cleaning up
./clean.sh
}
main
| true
|
cb868f175deb87e252c0546cae3a27b245daef5c
|
Shell
|
Yangzhengtang/mspass
|
/scripts/tacc_examples/single_node.sh
|
UTF-8
| 1,375
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#SBATCH -J mspass # Job name
#SBATCH -o mspass.o%j # Name of stdout output file
#SBATCH -p skx-dev # Queue (partition) name
#SBATCH -N 1 # Total # of nodes (must be 1 for serial)
#SBATCH -n 1 # Total # of mpi tasks (should be 1 for serial)
#SBATCH -t 02:00:00 # Run time (hh:mm:ss)
#SBATCH -A MsPASS # Allocation name (req'd if you have more than 1)
# working directory
WORK_DIR=$SCRATCH/mspass/single_workdir
# directory where contains docker image
MSPASS_CONTAINER=$WORK2/mspass/mspass_latest.sif
# command that start the container
SING_COM="singularity run --home $WORK_DIR $MSPASS_CONTAINER"
module unload xalt
module load tacc-singularity
module list
pwd
date
NODE_HOSTNAME=`hostname -s`
LOGIN_PORT=`echo $NODE_HOSTNAME | perl -ne 'print (($2+1).$3.$1) if /c\d(\d\d)-(\d)(\d\d)/;'`
echo "got login node port $LOGIN_PORT"
# create reverse tunnel port to login nodes. Make one tunnel for each login so the user can just
# connect to stampede.tacc
for i in `seq 4`; do
ssh -q -f -g -N -R $LOGIN_PORT:$NODE_HOSTNAME:8888 login$i
done
echo "Created reverse ports on Stampede2 logins"
mkdir -p $WORK_DIR
cd $WORK_DIR
# database should be deployed as 'scratch' or 'tmp' mode
DB_PATH='scratch'
SINGULARITYENV_MSPASS_DB_PATH=$DB_PATH \
SINGULARITYENV_MSPASS_WORK_DIR=$WORK_DIR $SING_COM
| true
|
3a9daa0116ae7bc559ac8c9ce69f6771937b4162
|
Shell
|
slimsevernake/Store-and-CMS-React-with-Redux-RESTful-API-
|
/docker_dev.sh
|
UTF-8
| 2,482
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
services=(
bot-viber-dev
app-dev
cms-dev
api-dev
nginx-dev
postgres-dev
)
images=(
alexmasyukov/klumba_app:latest
alexmasyukov/klumba_api:latest
alexmasyukov/klumba_cms:latest
alexmasyukov/klumba_viber_bot
klumba_bot-viber-dev
klumba_app-dev
klumba_cms-dev
klumba_api-dev
klumba_bot-viber-dev
)
clearDockerLog(){
dockerLogFile=$(docker inspect $1 | grep -G '\"LogPath\": \"*\"' | sed -e 's/.*\"LogPath\": \"//g' | sed -e 's/\",//g')
rmCommand="rm $dockerLogFile"
screen -d -m -S dockerlogdelete ~/Library/Containers/com.docker.docker/Data/vms/0/tty
screen -S dockerlogdelete -p 0 -X stuff $"$rmCommand"
screen -S dockerlogdelete -p 0 -X stuff $'\n'
screen -S dockerlogdelete -X quit
}
read -p "Run docker system prune? " yn
case $yn in
[Yy]* ) docker system prune ;;
esac
read -p "Remove volumes? " yn
case $yn in
[Yy]*) docker volume rm $(docker volume ls -qf dangling=true) ;;
esac
read -p "Clear logs? " yn
case $yn in
[Yy]*)
for srv in ${services[@]}; do
clearDockerLog ${srv}
# docker run -it --rm --privileged --pid=host ${srv} nsenter -t 1 -m -u -n -i -- sh -c 'truncate -s0 /var/lib/docker/containers/*/*-json.log'
done
;;
esac
read -p "Remove images? " yn
case $yn in
[Yy]*)
for img in ${images[@]}; do
ff=$(docker images ${img} -q)
echo " "
read -p "Remove ${img} ?" rimg
case $rimg in
[Yy]* ) docker rmi -f $ff; # break ;;
esac
done
esac
read -p "Build DEV images? " yn
case $yn in
[Yy]*)
for srv in ${services[@]}; do
read -p "Build image - ${srv} ?" rsrv
case $rsrv in
[Yy]* )
docker-compose --file=docker-compose.dev.yml stop ${srv};
docker system prune ;
docker rmi -f klumba_${srv};
docker-compose --file=docker-compose.dev.yml build --no-cache ${srv};
docker-compose --file=docker-compose.dev.yml up -d ${srv};
# break ;;
esac
done
esac
echo ' '
echo ' '
echo '### IMAGES ###'
docker images
| true
|
00e7bdf0812af79a9a9de143a920f925c057999b
|
Shell
|
drzaeus77/netflix-no-ipv6-dns-proxy
|
/netflixdns.rc
|
UTF-8
| 894
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# $FreeBSD$
#
# PROVIDE: netflixdns
# REQUIRE: LOGIN
#
# Add the following line to /etc/rc.conf[.local] to enable netflixdns
#
# netflixdns_enable (bool): Set to "NO" by default.
# Set it to "YES" to enable netflixdns.
# netflixdns_command (path): Path to the python script
. /etc/rc.subr
name=netflixdns
rcvar=netflixdns_enable
load_rc_config $name
: ${netflixdns_enable="NO"}
: ${netflixdns_command="/usr/local/libexec/netflixdns"}
command="${netflixdns_command}"
command_interpreter="python"
pidfile="/var/run/${name}.pid"
start_cmd="${name}_start"
netflixdns_start()
{
local pid
pid=$(check_pidfile $pidfile $command)
if [ -n "${pid}" ]; then
echo "${name} already running? (pid=${pid})."
return 1
fi
echo -n "Starting ${name}"
PATH="${PATH}:/usr/local/bin" /usr/sbin/daemon -f -p ${pidfile} ${command} ${netflixdns_flags}
echo '.'
}
run_rc_command "$1"
| true
|
a09a5684c57ac3eaa5b99fa8b477f2ad313891eb
|
Shell
|
zzj0402/natural-questions-environment
|
/prepare-data.sh
|
UTF-8
| 1,049
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
counter=0
while [ $counter -le 9 ]
do
echo Processing training shard $counter ...
python -m language.question_answering.bert_joint.prepare_nq_data \
--logtostderr \
--input_jsonl /root/data/v1.0/train/nq-train-0$counter.jsonl.gz \
--output_tfrecord ~/output_dir/nq-train.tfrecords-0000$counter-of-00050 \
--max_seq_length=512 \
--include_unknowns=0.02 \
--vocab_file=bert-joint-baseline/vocab-nq.txt
((counter++))
done
while [ $counter -le 49 ]
do
echo Processing training shard $counter ...
python -m language.question_answering.bert_joint.prepare_nq_data \
--logtostderr \
--input_jsonl /root/data/v1.0/train/nq-train-$counter.jsonl.gz \
--output_tfrecord ~/output_dir/nq-train.tfrecords-000$counter-of-00050 \
--max_seq_length=512 \
--include_unknowns=0.02 \
--vocab_file=bert-joint-baseline/vocab-nq.txt
((counter++))
done
echo All $counter training data shards processed!
echo Making combined record ...
python -m combine-records
echo Finshed making records
| true
|
693b6a4dea4f923566829306c5440df455ad1694
|
Shell
|
omakoto/zenlog-legacy
|
/v4/zenlog-sh-helper
|
UTF-8
| 1,705
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Basic Zenlog bash helper functions.
# Install it with:
# . <(zenlog sh-helper)
cat <<'EOF'
# Same as "zenlog in_zenlog", except this one is faster.
function zenlog_in_zenlog() {
[[ $(tty 2>/dev/null) == $ZENLOG_TTY ]]
}
function in_zenlog() {
zenlog_in_zenlog
}
# Run a command without logging the output.
function _zenlog_nolog() {
"${@}"
}
alias 184=_zenlog_nolog
# Run a command with forcing log, regardless of ZENLOG_ALWAYS_184_COMMANDS.
function _zenlog_force_log() {
"${@}"
}
alias 186=_zenlog_force_log
function _zenlog_current_log_helper() {
local san_name=$1 raw_name=$2 env_name=$3
shift 3
local OPTIND
local pid=$ZENLOG_PID
local name=$san_name
while getopts "rep:" opt; do
case "$opt" in
p) pid="$OPTARG" ;;
r) name=$raw_name ;;
e) name=$env_name ;;
esac
done
shift $(($OPTIND - 1))
readlink "${ZENLOG_DIR}/pids/${pid}/${name}" 2>/dev/null
}
# Basically same as "zenlog current-log", except this one is faster.
function zenlog_current_log() {
_zenlog_current_log_helper P R E "$@"
}
# Basically same as "zenlog last-log", except this one is faster.
function zenlog_last_log() {
_zenlog_current_log_helper PP RR EE "$@"
}
# Print the current command's command line. Use with "zenlog start-command".
function bash_last_command() {
# Use echo to remove newlines.
HISTTIMEFORMAT= history 1 | sed -e 's/^ *[0-9][0-9]* *//'
}
function bash_dump_env() {
{
echo "PWD: $(pwd)"
echo "git HEAD: $(git rev-parse HEAD) $(git rev-parse --abbrev-ref HEAD)"
env # dump environmental variables.
# declare -p # this dumps shell variables too, but also functions, and may be too big.
} 2>/dev/null
}
EOF
| true
|
0a0c3749b577203b1844ef47aeb452393295d5f7
|
Shell
|
ktp-forked-repos/bash-utilities
|
/gco
|
UTF-8
| 546
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
# |gco|: compile |@program@.c| [...] , 'outputting' to basename of first arg.
# written by Hugh McGuire
if [ $# -lt 1 ]
then
echo Usage:\ `basename $0` '@program@.c' '[...]' >&2
exit 1
fi
DIRNAME=`dirname "$1"`
BASENAME_C=`basename "$1" .c`
BASENAME=`basename "$1"`
if [ "${BASENAME_C}.c" != "${BASENAME}" ]
then
echo Usage:\ `basename $0` '@program@.c' '[...]' >&2
exit 1
fi
# if debugging:
set -x # seems nice even if not debugging
# set -v
gcc -std=c99 -Wall -g "$@" -lm -o "$DIRNAME/$BASENAME_C"
| true
|
9255020fc580a8a2ae186c92f6ba9d478475d189
|
Shell
|
stevleibelt/shell_config
|
/_source/function/media/book
|
UTF-8
| 9,560
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ -f /usr/bin/naps2 ]];
then
function net_bazzline_media_book_add_ocr_to_pdf ()
{
local DESTINATION_FILE_PATH
local SOURCE_FILE_PATH
# ref: https://www.loc.gov/standards/iso639-2/php/code_list.php
local OCR_LANGUAGE
DESTINATION_FILE_PATH="${3:-${1}}"
OCR_LANGUAGE="${2:-deu}"
SOURCE_FILE_PATH="${1}"
if [[ -f "${SOURCE_FILE_PATH}" ]];
then
# ref: https://www.naps2.com/doc/command-line
#
# --enableocr: enables usage of ocr
# --force: overwrite if output file path exists
# -i: import file path
# -n 0: scan 0 pages
# -o: output file path
# --ocrlang: defines ocr language
if [[ "${SOURCE_FILE_PATH}" == "${DESTINATION_FILE_PATH}" ]];
then
naps2 console -i "${SOURCE_FILE_PATH}" -n 0 --enableocr --ocrlang "${OCR_LANGUAGE}" --force -o "${DESTINATION_FILE_PATH}"
else
naps2 console -i "${SOURCE_FILE_PATH}" -n 0 --enableocr --ocrlang "${OCR_LANGUAGE}" -o "${DESTINATION_FILE_PATH}"
fi
else
echo ":: Invalid source file path provided"
echo " >>${SOURCE_FILE_PATH}<< does not exist"
fi
}
fi
####
# finds all *.md files in current working directory
# converts each md file in a pdf file
# merges all created pdf files into one file
####
# @param: FILE_PATTERN - default: *.md
# @param: MERGED_PDF_FILENAME - default: _merged.pdf
####
function net_bazzline_media_book_convert_markdown_files_to_one_pdf ()
{
local CURRENT_WORKING_DIRECTORY
local FILE_PATTERN
local NUMBER_OF_FOUND_FILES
local MERGED_PDF_FILE_NAME
local PDF_DIRECTORY_PATH
local TEMPORARY_DIRECTORY
local TEMPORARY_FILE_PATH
CURRENT_WORKING_DIRECTORY=$(pwd)
FILE_PATTERN="${1-*.md}"
MERGED_PDF_FILE_NAME="${2-_merged.pdf}"
TEMPORARY_DIRECTORY=$(mktemp -d)
PDF_DIRECTORY_PATH="${TEMPORARY_DIRECTORY}/_pdf"
TEMPORARY_FILE_PATH="${TEMPORARY_DIRECTORY}/list_of_files.txt"
if [[ ! -f /usr/bin/pandoc ]];
then
echo ":: >>/usr/bin/pandoc<< is not a file."
echo " pandoc is mandatory"
exit 1
fi
if [[ ! -f /usr/bin/gs ]];
then
echo ":: >>/usr/bin/gs<< is not a file."
echo " gs is mandatory"
exit 2
fi
net_bazzline_core_echo_if_be_verbose ":: Dumping runtime environment"
net_bazzline_core_echo_if_be_verbose " CURRENT_WORKING_DIRECTORY >>${CURRENT_WORKING_DIRECTORY}<<"
net_bazzline_core_echo_if_be_verbose " FILE_PATTERN >>${FILE_PATTERN}<<"
net_bazzline_core_echo_if_be_verbose " MERGED_PDF_FILE_NAME >>${MERGED_PDF_FILE_NAME}<<"
net_bazzline_core_echo_if_be_verbose " PDF_DIRECTORY_PATH >>${PDF_DIRECTORY_PATH}<<"
net_bazzline_core_echo_if_be_verbose " TEMPORARY_FILE_PATH >>${TEMPORARY_FILE_PATH}<<"
#find all md files and put the file path into a temporary file
find . -iname "${FILE_PATTERN}" -type f -exec sh -c 'printf "${0:2}\n"' {} \; > "${TEMPORARY_FILE_PATH}"
NUMBER_OF_FOUND_FILES=$(wc -l < "${TEMPORARY_FILE_PATH}")
if [[ ${NUMBER_OF_FOUND_FILES} -gt 0 ]];
then
net_bazzline_core_echo_if_be_verbose ":: Processing >>${NUMBER_OF_FOUND_FILES}<< *.md files from >>${CURRENT_WORKING_DIRECTORY}<< or below."
/usr/bin/mkdir -p "${PDF_DIRECTORY_PATH}" || echo ":: Could not create directory >>${PDF_DIRECTORY_PATH}<<"
local OUTPUT_FILE_PATH
local FILE_BASEDIR
local FILE_BASENAME
local FILE_NAMES_TO_MERGE
while read -r FILE_PATH;
do
OUTPUT_FILE_PATH=$(echo "${FILE_PATH}" | sed 's/\//-/g')
FILE_BASEDIR=$(dirname "${FILE_PATH}")
FILE_BASENAME=$(basename "${FILE_PATH}")
net_bazzline_core_echo_if_be_verbose " FILE_BASEDIR >>${FILE_BASEDIR}<<"
net_bazzline_core_echo_if_be_verbose " FILE_BASENAME >>${FILE_BASENAME}<<"
net_bazzline_core_echo_if_be_verbose " OUTPUT_FILE_PATH >>${OUTPUT_FILE_PATH}<<"
cd "${FILE_BASEDIR}" || echo "Could not change into directory >>${FILE_BASEDIR}<<."
if [[ -f "${FILE_BASENAME}" ]];
then
net_bazzline_core_echo_if_be_verbose " Executing >>pandoc --standalone \"${FILE_BASENAME}\" --output=\"${PDF_DIRECTORY_PATH}/${OUTPUT_FILE_PATH}.pdf\"<<"
pandoc --standalone "${FILE_BASENAME}" --output="${PDF_DIRECTORY_PATH}/${OUTPUT_FILE_PATH}.pdf"
else
echo " Skipping file >>${FILE_BASENAME}<<, does not exist."
fi
cd - || echo "Could not change into previous directory."
done < "${TEMPORARY_FILE_PATH}"
cd "${CURRENT_WORKING_DIRECTORY}" || echo "Could not change into directory >>${CURRENT_WORKING_DIRECTORY}<<"
FILE_NAMES_TO_MERGE=$(ls "${PDF_DIRECTORY_PATH}"/*.pdf)
net_bazzline_core_echo_if_be_verbose ":: Creating file >>${MERGED_PDF_FILE_NAME}<< by using all files from >>${PDF_DIRECTORY_PATH}<<."
net_bazzline_core_echo_if_be_verbose " Executing >>gs -dNOPAUSE -sDEVICE=pdfwrite -sOUTPUTFILE=\"${MERGED_PDF_FILE_NAME}\" -dBATCH ${FILE_NAMES_TO_MERGE}<<"
# shellcheck disable=SC2086
gs -dNOPAUSE -sDEVICE=pdfwrite -sOUTPUTFILE="${MERGED_PDF_FILE_NAME}" -dBATCH ${FILE_NAMES_TO_MERGE}
else
echo ":: No files found for pattern >>${FILE_PATTERN}<< in >>${CURRENT_WORKING_DIRECTORY}<< or below."
fi
net_bazzline_core_echo_if_be_verbose ":: Removing temporary directory >>${TEMPORARY_DIRECTORY}<<."
rm -fr "${TEMPORARY_DIRECTORY}"
}
####
# @param: <string: source_file_path>
# [@param: <string: destination_file_path=source_file_path.compressed.pdf>]
# [@param: <int: image resolution=150]
# [@param: <string: pdf setting=/ebook]
#
# @see: https://opensource.com/article/20/8/reduce-pdf
####
function net_bazzline_media_book_compress_pdf ()
{
#bo: user input
DESTINATION_FILE_PATH=${2:-''}
IMAGE_RESOLUTION=${3:-150}
PDF_SETTINGS=${4:-'/ebook'}
SOURCE_FILE_PATH=${1}
#eo: user input
#bo: input validation
if [[ ${DESTINATION_FILE_PATH} == '' ]];
then
#assumed the file ends with .pdf, we are removing the last four
# characters and adding >>.compressed.pdf<<.
DESTINATION_FILE_PATH="${SOURCE_FILE_PATH:0:-4}.compressed.pdf"
fi
if [[ ! -f "${SOURCE_FILE_PATH}" ]];
then
echo ":: Provided source file path >>${SOURCE_FILE_PATH}<< does not exist."
return 1
fi
SOURCE_FILE_PATH_TYPE=$(file -b "${SOURCE_FILE_PATH}");
if [[ "${SOURCE_FILE_PATH_TYPE:0:12}" != "PDF document" ]];
then
echo ":: Provided source file path >>${SOURCE_FILE_PATH}<< is not a pdf document."
return 2
fi
#eo: input validation
if [[ ! -x $(command -v gs) ]];
then
#find all md files and put the file path into a temporary file
find . -iname "*.md" -type f -exec sh -c 'printf "${0:2}\n"' {} \; > "${TEMPORARY_FILE_PATH}"
local NUMBER_OF_FOUND_FILES=$(cat "${TEMPORARY_FILE_PATH}" | wc -l)
if [[ ${NUMBER_OF_FOUND_FILES} -gt 0 ]];
then
echo ":: Processing >>${NUMBER_OF_FOUND_FILES}<< *.md files from >>${CURRENT_WORKING_DIRECTORY}<< or below."
mkdir "${PDF_DIRECTORY_PATH}"
cat "${TEMPORARY_FILE_PATH}" | while read FILE_PATH;
do
echo -n "."
local OUTPUT_FILE_PATH=$(echo "${FILE_PATH}" | sed 's/\//-/g')
local FILE_BASEDIR=$(dirname "${FILE_PATH}")
local FILE_BASENAME=$(basename "${FILE_PATH}")
cd "${FILE_BASEDIR}"
pandoc -s "${FILE_BASENAME}" -o "${PDF_DIRECTORY_PATH}/${OUTPUT_FILE_PATH}.pdf"
cd -
done
cd "${CURRENT_WORKING_DIRECTORY}"
echo ":: Creating file >>${MERGED_PDF_FILE_NAME}<< by using all files from >>${PDF_DIRECTORY_PATH}<<."
gs -dNOPAUSE -sDEVICE=pdfwrite -sOUTPUTFILE=${MERGED_PDF_FILE_NAME} -dBATCH ${PDF_DIRECTORY_PATH}/*
fi
else
echo ":: No *.md files found in >>${CURRENT_WORKING_DIRECTORY}<< or below."
fi
rm -fr "${TEMPORARY_DIRECTORY}"
}
####
# @param: <string: source_file_path>
# [@param: <string: destination_file_path=source_file_path.compressed.pdf>]
# [@param: <int: image resolution=150]
# [@param: <string: pdf setting=/ebook]
#
# @see: https://opensource.com/article/20/8/reduce-pdf
####
function net_bazzline_media_book_compress_pdf ()
{
#bo: user input
DESTINATION_FILE_PATH=${2:-''}
IMAGE_RESOLUTION=${3:-150}
PDF_SETTINGS=${4:-'/ebook'}
SOURCE_FILE_PATH=${1}
#eo: user input
#bo: input validation
if [[ ${DESTINATION_FILE_PATH} == '' ]];
then
#assumed the file ends with .pdf, we are removing the last four
# characters and adding >>.compressed.pdf<<.
DESTINATION_FILE_PATH="${SOURCE_FILE_PATH:0:-4}.compressed.pdf"
fi
if [[ ! -f "${SOURCE_FILE_PATH}" ]];
then
echo ":: Provided source file path >>${SOURCE_FILE_PATH}<< does not exist."
return 1
fi
SOURCE_FILE_PATH_TYPE=$(file -b "${SOURCE_FILE_PATH}");
if [[ "${SOURCE_FILE_PATH_TYPE:0:12}" != "PDF document" ]];
then
echo ":: Provided source file path >>${SOURCE_FILE_PATH}<< is not a pdf document."
return 2
fi
#eo: input validation
if [[ ! -x $(command -v gs) ]];
then
echo ":: gs is not installed. Please install it and run this command again."
return 3
fi
gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=${PDF_SETTINGS} -dNOPAUSE -dBATCH -dColorImageResolution=${IMAGE_RESOLUTION} -sOutputFile="${DESTINATION_FILE_PATH}" "${SOURCE_FILE_PATH}"
}
| true
|
bf7dd72fbbfcf94970b00aefb5fc4927b2d6a77a
|
Shell
|
concher009/enlive
|
/08_09_lowrank/knee/0_prep.sh
|
UTF-8
| 556
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
source ../../BART.sh
if false;
then
cd data
wget http://old.mridata.org/knees/fully_sampled/p3/e1/s1/P3.zip
unzip P3.zip
cd ..
#extract single slice
$BART fft -u -i 1 data/kspace data/tmp_fft
$BART slice 0 160 data/tmp_fft data/single_slice
fi
$BART rss 8 data/single_slice data/tmp_rss
$BART threshold -H 21 data/tmp_rss data/tmp_pat
$BART pattern data/tmp_pat data/pat
$BART fmac data/pat data/single_slice data/tmp_full
#scale maximum to about 1
$BART scale 1e-8 data/tmp_full data/full
rm data/tmp_*
| true
|
961391155ff80482a0fc9d89b04d1ef58eccff03
|
Shell
|
ding-lab/SeqQEst
|
/src_qc_bulk/hlaQC.fq.call_hla.sh
|
UTF-8
| 3,089
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# Hua Sun
# 2021-03-01;
# bash run.sh -T dna -N sampleName -O ./OUT -B sample.bam
# -C config.ini
# -P bwa/razers3/optitype
# -N sampleName
# -T dna/rna
# -B *.bam
# -O outdir
# output outdir/sample/...
CONFIG=""
TYPE="dna"
pipeline="bwa"
while getopts "C:P:N:T:1:2:O:" opt; do
case $opt in
C)
CONFIG=$OPTARG
;;
P)
pipeline=$OPTARG
;;
N)
NAME=$OPTARG
;;
T)
TYPE=$OPTARG
;;
1)
FQ1=$OPTARG
;;
2)
FQ2=$OPTARG
;;
O)
OUTDIR=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
mkdir -p $OUTDIR
OUT=$OUTDIR/$NAME
mkdir -p $OUT
# load contig.ini
source ${CONFIG}
if [ ! -e $FQ1 ]; then
echo "[ERROR] The FastQ_1 of $NAME does not exist - $FQ1 ..." >&2
exit 1
fi
if [ ! -e $FQ2 ]; then
echo "[ERROR] The FastQ_2 of $NAME does not exist - $FQ2 ..." >&2
exit 1
fi
HLA_FASTA=''
if [[ $TYPE == "dna" ]] || [[ $TYPE == "DNA" ]] || [[ $TYPE == "WES" ]] || [[ $TYPE == "WGS" ]];then HLA_FASTA=$HLA_REF_DNA; fi
if [[ $TYPE == "rna" ]] || [[ $TYPE == "RNA" ]] || [[ $TYPE == "RNA-Seq" ]];then HLA_FASTA=$HLA_REF_RNA; fi
##===========================##
## Set function
##===========================##
# set run OptiType function
function run_optiType {
if [[ $TYPE == "dna" ]] || [[ $TYPE == "DNA" ]] || [[ $TYPE == "WES" ]] || [[ $TYPE == "WGS" ]]; then
$OptiTypePipeline -i $OUT/$NAME.fished_1.fastq $OUT/$NAME.fished_2.fastq --dna -o $OUT --config ${config_for_optiType}
fi
if [[ $TYPE == "rna" ]] || [[ $TYPE == "RNA" ]] || [[ $TYPE == "RNA-Seq" ]]; then
$OptiTypePipeline -i $OUT/$NAME.fished_1.fastq $OUT/$NAME.fished_2.fastq --rna -o $OUT --config ${config_for_optiType}
fi
exit 0
}
##===========================##
## Main
##===========================##
##------------- OptiTypePipeline (only using it re-run optitype)
if [[ $pipeline == "optiType" ]]; then
n=`ls $OUT/*/*.tsv | wc -l`
if [[ $n > 0 ]];then
echo "[INFO] The $OUT/$NAME already exist HLA genotype ..." >&2
exit 0
fi
run_optiType
exit 0
fi
##------------- bwa
if [[ $pipeline == "bwa" ]]; then
# R1
$BWA mem -t 8 $HLA_FASTA $FQ1 | $SAMTOOLS view -Shb -F 4 -o $OUT/$NAME.fished_1.bam -
$SAMTOOLS rmdup -s $OUT/$NAME.fished_1.bam $OUT/$NAME.fished_1.rmdup.bam
$SAMTOOLS bam2fq $OUT/$NAME.fished_1.rmdup.bam > $OUT/$NAME.fished_1.fastq
## remove temp file
rm -f $OUT/$NAME.fished_1.bam $OUT/$NAME.fished_1.rmdup.bam
# R2
$BWA mem -t 8 $HLA_FASTA $FQ2 | $SAMTOOLS view -Shb -F 4 -o $OUT/$NAME.fished_2.bam -
$SAMTOOLS rmdup -s $OUT/$NAME.fished_2.bam $OUT/$NAME.fished_2.rmdup.bam
$SAMTOOLS bam2fq $OUT/$NAME.fished_2.rmdup.bam > $OUT/$NAME.fished_2.fastq
## remove temp file
rm -f $OUT/$NAME.fished_2.bam $OUT/$NAME.fished_2.rmdup.bam
# run OptiType
run_optiType
fi
#rm -f $OUT/$NAME.fished_1.fastq $OUT/$NAME.fished_2.fastq
| true
|
674477b20f3a8f45c7a70589bbd27c71864cd5c8
|
Shell
|
flaccid/machine-image-tools
|
/scripts/qcow2_to_raw.sh
|
UTF-8
| 289
| 4
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh -e
# usage: qcow2_to_raw.sh [qcow2_image_path]
[ -z "$1" ] && echo 'no image file provided, exiting.' && exit 1
image="$1"
base="$(basename $image)"
image_base="${base%.*}"
# convert qcow2 to raw
echo "Converting $base to raw..."
qemu-img convert "$image" "$image_base.img"
| true
|
6ad65cc0530db5086d747dbf8e10c4f744fa50d7
|
Shell
|
tejas002/miniature-octo-engine
|
/octo-engine.sh
|
UTF-8
| 532
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
echo 'Starting the scripts'
#sleep 5
#folder path
for file in /home/Downloads/testing/scripts/m/*
do
echo "File path - $file"
#generating the timestamp - DD-MM-YYYY-HH:MM:SS
dt=$(date '+%d-%m-%Y-%H:%M:%S');
#generating log file name
logfilename=$file$dt
echo "Current Time - $dt"
echo "Log File Path - $logfilename"
#making log directory
mkdir $logfilename
bash $file > ${logfilename}/$dt
#sleep for 40 seconds
sleep 40
done
echo 'Done!!!'
| true
|
dc1c8b5f47ac00b24eb45045104d9811654a3416
|
Shell
|
santiagomok/dotfiles
|
/macos/config/zsh/zshrc
|
UTF-8
| 1,710
| 3.125
| 3
|
[] |
no_license
|
# git clone --recursive https://github.com/changs/slimzsh.git ~/.config/slimzsh
[ -f $HOME/.config/slimzsh/slim.zsh ] && source "$HOME/.config/slimzsh/slim.zsh"
# exit() {
# # TMUX override builtin exit() function
# if [[ -z $TMUX ]]; then
# builtin exit
# return
# fi
# panes=$(tmux list-panes | wc -l)
# windows=$(tmux list-windows | wc -l)
# count=$(($panes + $windows - 1))
# if [ $count -eq 1 ]; then
# tmux detach
# else
# builtin exit
# fi
# }
# Base16 Shell
# git clone https://github.com/chriskempson/base16-shell.git ~/.config/theme/base16-shell
# BASE16_SHELL="$HOME/.config/base16-shell/"
# [ -n "$PS1" ] && \
# [ -s "$BASE16_SHELL/profile_helper.sh" ] && \
# source "$BASE16_SHELL/profile_helper.sh"
# base16_default-dark
# base16_helios
# base16_onedark
# Local config
config_path="${HOME}/.config/zsh"
fpath=( "$config_path/function" $fpath )
autoload -Uz $config_path/function/**/*
[ -f $config_path/export.zsh ] && source "$config_path/export.zsh"
[ -f $config_path/alias.zsh ] && source "$config_path/alias.zsh"
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/Users/santiago/Developer/miniconda/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/Users/santiago/Developer/miniconda/etc/profile.d/conda.sh" ]; then
. "/Users/santiago/Developer/miniconda/etc/profile.d/conda.sh"
else
export PATH="/Users/santiago/Developer/miniconda/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
| true
|
5b3a3adfa4f620f714343766466684fb73c37794
|
Shell
|
jdesfossez/tracers-benchmark
|
/scripts/calibrate/v3.0/calibrate.sh
|
UTF-8
| 1,630
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
function load_params {
event_rates=($(grep "event rates" < calibrate.param))
event_rates=${event_rates[@]:3}
sample_size=($(grep "sample size:" < calibrate.param))
sample_size=${sample_size[@]:2}
change_cpus_governor_path=($(grep "change cpu governor path" < calibrate.param))
change_cpus_governor_path=${change_cpus_governor_path[@]:4}
load_path=($(grep "load path:" < calibrate.param))
load_path=${load_path[@]:2}
}
function run
{
cmd="./$load_name -d $delay -s $sample_size -t $no_thread" > /dev/null
echo $cmd
$cmd > /dev/null
mean=($(grep mean < statistics))
echo "mean: ${mean[1]}, delay: $delay, no_thread: $no_thread"
echo "${mean[1]},$delay,$no_thread" >> ${load_name}_calibrate.csv
rm sample
rm statistics
}
cd $(dirname $0)
if [ "$1" == "clean" ]; then
rm *.pdf
rm *.csv
rm *.result
exit
fi
no_cpus=$(nproc)
load_name=getuid_pthread
load_params
if [ ! -f "$load_path/$load_name" ]; then make -C $load_path; fi
cp $load_path/$load_name .
no_threads=""
no_thread=1
while [ $no_thread -le $no_cpus ]; do
no_threads="$no_threads $no_thread"
no_thread=$(($no_thread * 2))
done
delays="1 4 16 64 256 1024 4096 16384"
$change_cpus_governor_path performance
echo "mean,delay,no_th" > ${load_name}_calibrate.csv
for no_thread in $no_threads; do
for delay in $delays; do
run
done
done
Rscript fit_delay.R $event_rates > /dev/null
echo "mean,delay,no_th" > ${load_name}_calibrate.csv
for no_thread in $no_threads; do
while read line; do
line=($line)
delay=${line[3]}
run
done < "calibrate_noth=${no_thread}.result"
done
$change_cpus_governor_path powersave
rm $load_name
| true
|
34133138447447680c55f5b6e56ca10cce39e834
|
Shell
|
ap-qio/queryio
|
/queryio-parent/bin/configure-linux.sh
|
UTF-8
| 5,573
| 3.15625
| 3
|
[] |
no_license
|
export LC_CTYPE=C
export LANG=C
USER_INSTALL_DIR="$(dirname "$( cd "$( dirname "$0" )" && pwd )")"
USER_PACKAGE_INSTALL_DIR='$HOME/QueryIOPackage'
echo "Installation Directory: $USER_INSTALL_DIR"
PROP_FILE=$USER_INSTALL_DIR/bin/qio-setup.properties
source $PROP_FILE
LOCAL_IP=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | head -1`
if [ -z "$LOCAL_IP" ]
then
LOCAL_IP='127.0.0.1'
fi
read -t 10 -p "Enter IP(Leave blank if the displayed IP is correct): $LOCAL_IP : " IP_TO_USE
IP_TO_USE=${IP_TO_USE:-$LOCAL_IP}
# echo $IP_TO_USE
IP=$IP_TO_USE
SSH_HOSTNAME=$IP_TO_USE
DB_SSH_HOSTNAME=$IP_TO_USE
#
# echo "$ADD_USER"
# echo "$CustomDBPass"
# echo "$CustomDBUser"
# echo "$DB_PORT1"
# echo "$DB_PORT2"
# echo "$IP"
# echo "$IS_INSTALL"
# echo "$QIO_EMAIL"
# echo "$QIO_FNAME"
# echo "$QIO_LNAME"
# echo "$QIO_PASSWORD"
# echo "$QIO_USER"
# echo "$SHUTDOWN_PORT"
# echo "$STARTUP_PORT"
# echo "$SysDBPass"
# echo "$SysDBUser"
# echo "$SSH_HOSTNAME"
# echo "$DB_SSH_HOSTNAME"
chmod -R +x $USER_INSTALL_DIR
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$USER_INSTALL_DIR\$'"~$USER_INSTALL_DIR"'~g'
echo "Done USER_INSTALL_DIR"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$USER_PACKAGE_INSTALL_DIR\$'"~$USER_PACKAGE_INSTALL_DIR"'~g'
echo "Done USER_PACKAGE_INSTALL_DIR"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$CustomDBPass\$'"~$CustomDBPass"'~g'
echo "Done CustomDBPass"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$CustomDBUser\$'"~$CustomDBUser"'~g'
echo "Done CustomDBUser"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$DB_PORT1\$'"~$DB_PORT1"'~g'
echo "Done DB_PORT1"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$DB_PORT2\$'"~$DB_PORT2"'~g'
echo "Done DB_PORT2"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$IP\$'"~$IP"'~g'
echo "Done IP"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$IS_INSTALL\$'"~$IS_INSTALL"'~g'
echo "Done IS_INSTALL"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$QIO_EMAIL\$'"~$QIO_EMAIL"'~g'
echo "Done QIO_EMAIL"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$QIO_FNAME\$'"~$QIO_FNAME"'~g'
echo "Done QIO_FNAME"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$QIO_LNAME\$'"~$QIO_LNAME"'~g'
echo "Done QIO_LNAME"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$QIO_PASSWORD\$'"~$QIO_PASSWORD"'~g'
echo "Done QIO_PASSWORD"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$QIO_USER\$'"~$QIO_USER"'~g'
echo "Done QIO_USER"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$SHUTDOWN_PORT\$'"~$SHUTDOWN_PORT"'~g'
echo "Done SHUTDOWN_PORT"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$STARTUP_PORT\$'"~$STARTUP_PORT"'~g'
echo "Done STARTUP_PORT"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$SysDBPass\$'"~$SysDBPass"'~g'
echo "Done SysDBPass"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$SysDBUser\$'"~$SysDBUser"'~g'
echo "Done SysDBUser"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$ADD_USER\$'"~$ADD_USER"'~g'
echo "Done ADD_USER"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$SSH_HOSTNAME\$'"~$SSH_HOSTNAME"'~g'
echo "Done SSH_HOSTNAME"
find $USER_INSTALL_DIR -type f \( -name "*.properties" -or -name "*.sh" -or -name "*.script" -or -name "*.xml" -or -name "*.js" \) -print0 | xargs -0 sed -i 's~\$DB_SSH_HOSTNAME\$'"~$DB_SSH_HOSTNAME"'~g'
echo "Done DB_SSH_HOSTNAME"
cp $USER_INSTALL_DIR/bin/.queryio.install $HOME
sh $USER_INSTALL_DIR/bin/mvScript.sh $USER_INSTALL_DIR
| true
|
c4a6e4ff7bd0edaa2eba4d85e0477687b4fa6d68
|
Shell
|
LeonardoRick/bash-scripts
|
/load_env.sh
|
UTF-8
| 291
| 3.71875
| 4
|
[] |
no_license
|
# Export the vars in .env into your shell:
export $(egrep -v '^#' .env | xargs)
# read VERSION from .env
VERSION=$(grep VERSION .env | xargs)
IFS='=' read -ra VERSION <<< "$VERSION"
VERSION=${VERSION[1]}
if [[ -z "$VERSION" ]]; then
echo 'set VERSION variable properly on .env file';
fi
| true
|
0daaf1bfd0580ac08b6eb684e635f4d8b7fcdc00
|
Shell
|
JJK96/dotfiles
|
/bin/hostnames.sh
|
UTF-8
| 423
| 3.515625
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
if [ $# -lt 1 ]; then
echo "Usage: $0 <hostnames_file>"
exit 1
fi
hostnames="$1"
for u in $(cat "$hostnames"); do
lookup=$(nslookup $u 2>&1)
if ! [[ $(echo -n "$lookup" | grep -i "can't find") ]]; then
answer=$(echo -n "$lookup" | grep -A 999999 "answer" | grep "Address" | cut -d " " -f 2)
echo $u
echo "$answer"
echo "---------------------"
fi
done
| true
|
a32d8cf9d1b41ba423179305294b1e4fd6d382b6
|
Shell
|
manuella/submissionScriptCSC261
|
/script/submissionScript
|
UTF-8
| 4,005
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: Evan Manuella
: <<'ExplainForLoop'
Every iteration, var is going to be a new element of the command line arguments
First, we check if it is has the "*]" pattern, if it does
-we compile it (depending on whether it is *scm or *c)
-store the output as a file
-convert the source to .pdf
Elif, we create change them to .pdfs
ExplainForLoop
: <<'ShortTermBugs'
Advice from adam:
run output | a2ps | pdf
Remove the file type for c before adding ".pdf"
*TOBEDELETED are not deleted
Replace TOBEDELETED with a pipe
Make sure that the arrays containing the files are correct
**Test
**
ShortTermBugs
#first argument (including [)
counter_pdf=0
counter_source=0
for var in "$@"
do
file_name=$var
#if a scheme driver
if [ ${file_name%".scm]"} != $file_name ] #if there is a "]" at the end (Driver)
then
driver_with_type=${file_name%]}
driver=${driver_with_type%.scm}
enscript -p - -Escheme -2 -r --color $driver_with_type | ps2pdf - ${driver}.pdf
touch "${driver}Transcript" #Storing the transcript here
mzscheme -l lang/plt-pretty-big-text.ss -f "./${driver_with_type}" > "${driver}Transcript"
enscript -p - -2 -r "${driver}Transcript" | ps2pdf - "${driver}Output.pdf"
rm "${driver}Transcript"
pdf_files[counter_pdf]="${driver}Output.pdf"
((counter_pdf++))
pdf_files[counter_pdf]="${driver}.pdf"
((counter_pdf++))
source_files[counter_source]="$driver.scm"
((counter_source++))
#if a c driver
elif [ ${file_name%.c]} != "$file_name" ]
then
driver_with_type=${file_name%]}
driver=${file_name%.c]%}
touch "${driver}Output" #output of .c file goes here
gcc -o "compiled_${driver}" ./${driver_with_type}
"compiled_${driver}" > "${driver}Output"
a2ps "${driver}Output" -o TempTOBEDELETED
ps2pdf TempTOBEDELETED "${driver}Output.pdf"
rm TempTOBEDELETED
rm "compiled_${driver}"
enscript -p - -2 -r "${driver_with_type}" | ps2pdf - "${driver}.pdf"
rm "${driver}Output" #remove pesky temp file
pdf_files[counter_pdf]="${driver}.pdf"
((counter_pdf++))
pdf_files[counter_pdf]="${driver}Output.pdf"
((counter_pdf++))
source_files[counter_source]=$driver_with_type
((counter_source++))
elif [ "${file_name%.c}" != "$file_name" ] #then *.c
then
enscript -p - -Escheme -2 -r --color $file_name | ps2pdf - "${file_name%.c}.pdf"
#This should be able to handle make as well
pdf_files[counter_pdf]="${file_name%.c}.pdf"
((counter_pdf++))
source_files[counter_source]=$file_name
((counter_source++))
#if a scheme file
elif [ "${file_name%.scm}" != "$file_name" ]; #Convert .scm file into .pdf and make sure it is not a driver
then
enscript -p - -Escheme -2 -r --color ${file_name} | ps2pdf - ${file_name_loc%.scm}.pdf
pdf_files[counter_pdf]=${file_name%.scm}.pdf
((count_pdf++))
source_files[counter_source]=${file_name}
((counter_source++))
#if a c file
elif [ "${file_name%.c}" != "$file_name" ] #if a c file
then
enscript -p - -Ec -2 -r --color ${file_name} | ps2pdf - ${file_name%.c}.pdf
pdf_files[counter_pdf]="${file_name%.c}.pdf"
source_files[counter_source]=${file_name}
((counter_pdf++))
((counter_files++))
else #file is not of recognized format
echo "File ${file_name} does not match recognized formats, Ignored"
status=1
fi
done
echo ${pdf_files[@]}
echo ${source_files[@]}
pdfconcat -o submission.pdf ${pdf_files[@]}
tar cf submission.tar submission.pdf ${source_files[@]}
#Any formatting issues reported via $status
exit $status
| true
|
803c6107c1e04b6c7afe1f28fc050cf1a1e78953
|
Shell
|
AKArrow/ProblemStatements
|
/prob1.sh
|
UTF-8
| 681
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash/ -x
declare -A aa
for((i=1;i<=6;i++))
do
aa+=([$i]=0)
done
for((i=1;i<=10;i++))
do
die=$((RANDOM%6+1))
((aa[$die]++))
done
echo ${!aa[@]}
echo ${aa[@]}
max=0
for((i=1;i<=6;i++))
do
if [ ${aa[$i]} -gt $max ]
then
max=${aa[$i]};
fi
done
for i in "${!aa[@]}"; do
if [[ "${aa[$i]}" = "${max}" ]]; then
loc=${i};
fi
done
echo $loc "Number Reapeated" $max "Times That Maximum"
min=$max
for((i=1;i<=6;i++))
do
if [ ${aa[$i]} -le $min ]
then
min=${aa[$i]};
fi
done
for i in "${!aa[@]}"; do
if [[ "${aa[$i]}" = "${min}" ]]; then
loc=${i};
fi
done
echo $loc "Number Reapeated" $min "Times That Minimum"
| true
|
132ba0ee2b479dba858c3697533889311c270a66
|
Shell
|
kjellalmgren/tflite1
|
/get_pi_requirements.sh
|
UTF-8
| 1,744
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# Get packages required for OpenCV
sudo apt-get -y install libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev
sudo apt-get -y install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
sudo apt-get -y install libxvidcore-dev libx264-dev
sudo apt-get -y install qt4-dev-tools libatlas-base-dev
pip3 install opencv-python==3.4.6.27
##################################################
# Install edge TPU runtime
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo apt-get update
# Install standard
#sudo apt-get install libedgetpu1-std
# Install MAX performance
#sudo apt-get install libedgetpu1-max
##################################################
# Get packages required for TensorFlow
# For now, downloading the TensorFlow builds from lhelontra's "TensorFlow on ARM" repository
# Thanks lhelontra for being super awesome!
# Will change to just 'pip3 install tensorflow' once newer versions of TF are added to piwheels
#pip3 install tensorflow
version=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))')
if [ $version == "3.7" ]; then
wget https://github.com/lhelontra/tensorflow-on-arm/releases/download/v2.0.0/tensorflow-2.0.0-cp37-none-linux_armv7l.whl
pip3 install tensorflow-2.0.0-cp37-none-linux_armv7l.whl
rm tensorflow-2.0.0-cp37-none-linux_armv7l.whl
fi
if [ $version == "3.5" ]; then
wget https://github.com/lhelontra/tensorflow-on-arm/releases/download/v1.14.0/tensorflow-1.14.0-cp35-none-linux_armv7l.whl
pip3 install tensorflow-1.14.0-cp35-none-linux_armv7l.whl
rm tensorflow-1.14.0-cp35-none-linux_armv7l.whl
fi
| true
|
ede131b79a5437804c1b91a756649dcf43ba92e6
|
Shell
|
heavysink/repo
|
/archlinuxcn/eka2l1-git/PKGBUILD
|
UTF-8
| 1,611
| 2.578125
| 3
|
[] |
no_license
|
# Maintainer: heavysink <winstonwu91@gmail.com>
_pkgname=eka2l1
pkgname="${_pkgname}-git"
pkgver=5287.e988060e1
pkgrel=1
pkgdesc="Experimental Symbian OS emulator (GIT version)"
arch=('x86_64')
url="https://github.com/EKA2L1/EKA2L1"
license=('GPL2')
makedepends=(
'boost'
'cmake'
'git'
'ccache'
'glfw'
'vulkan-headers'
'python'
'qt5-tools'
)
depends=(
'boost-libs'
'qt5-base'
'qt5-svg'
'freetype2'
'pango'
'vulkan-icd-loader'
'gtk3'
'sdl2'
)
provides=('eka2l1')
conflicts=('eka2l1')
source=(
"${_pkgname}-git::git+https://github.com/EKA2L1/EKA2L1.git"
"eka2l1"
)
md5sums=('SKIP'
'904f15dfd859ab3c10d7f1b9a78db41d')
pkgver() {
cd "${_pkgname}-git"
printf "%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
prepare() {
cd "${srcdir}/${_pkgname}-git"
git submodule update --recursive --init
sed -i 's/constexpr size_t signal_stack_size = std::max(SIGSTKSZ/const size_t signal_stack_size = std::max<size_t>(SIGSTKSZ/g' src/external/dynarmic/src/backend/x64/exception_handler_posix.cpp
}
build() {
cd "${srcdir}/${_pkgname}-git"
cmake -B build -DCMAKE_BUILD_TYPE=Release -DEKA2L1_NO_TERMINAL=ON -DEKA2L1_ENABLE_UNEXPECTED_EXCEPTION_HANDLER=ON -DEKA2L1_BUILD_VULKAN_BACKEND=OFF -DCMAKE_INSTALL_PREFIX=/opt/eka2l1 .
cd build
make
}
package() {
cd "${srcdir}/${_pkgname}-git/build"
install -d -m 755 "${pkgdir}/opt"
install -d -m 777 "${pkgdir}/opt/eka2l1"
install -d -m 755 "${pkgdir}/usr/bin"
cp -R "${srcdir}/${_pkgname}-git/build/bin/." "${pkgdir}/opt/eka2l1"
install -m 755 ${srcdir}/eka2l1 "${pkgdir}/usr/bin/eka2l1"
}
| true
|
4f9ce0c51432fc5403550c0611a9e2cc027dc716
|
Shell
|
alfredoanton82/io.cfg
|
/scripts/installPhpSysInfo.sh
|
UTF-8
| 959
| 3.71875
| 4
|
[] |
no_license
|
#/bin/bash
# Define configuration path
CONFIG_PATH=/root/@server@.cfg/config
cd /var/www/
echo "Installing PhpSysInfo"
echo ""
prevVer=$(ls -d phpsysinfo-*)
echo "Remove previous version: $prevVer"
rm -rf $prevVer
echo "Downloading PhpSysInfo..."
wget http://sourceforge.net/projects/phpsysinfo/files/latest/download?source=files --no-check-certificate -O phpsysinfo-latest.tar.gz
# Getting filename
dlfile=$(ls phpsysinfo*.tar.gz)
echo ""
echo "Uncompressing PhpSysInfo"
pv $dlfile | tar xfz -
rm -f $dlfile
# Getting folder
currVer=$(ls -d phpsysinfo-*)
# Copy configuration file
echo ""
echo "Setting configuration file"
cp -vf $CONFIG_PATH/var/www/phpsysinfo/phpsysinfo.ini ${currVer}/phpsysinfo.ini
# Link folder to nginx default configuration
ln -sf ../$currVer /var/www/html/phpsysinfo
# Update rights
chown -R www-data:www-data $currVer
chmod -R o-w $currVer
echo ""
echo "Restarting nginx"
systemctl restart nginx
systemctl status nginx
| true
|
7a2f7de9f0e86452ef06cf06aa60c018df64308f
|
Shell
|
lonord/docker-ss-http-proxy
|
/entrypoint.sh
|
UTF-8
| 1,475
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
SS_CONFIG=${SS_CONFIG:-""}
SS_MODULE=${SS_MODULE:-"ss-server"}
RNGD_FLAG=${RNGD_FLAG:-"false"}
PRIVOXY_FLAG=${PRIVOXY_FLAG:-"false"}
PRIVOXY_LISTEN_PORT=${PRIVOXY_LISTEN_PORT:-""}
PRIVOXY_FORWARD_PORT=${PRIVOXY_FORWARD_PORT:-""}
while getopts "s:m:l:f:rp" OPT; do
case $OPT in
s)
SS_CONFIG=$OPTARG;;
m)
SS_MODULE=$OPTARG;;
l)
PRIVOXY_LISTEN_PORT=$OPTARG;;
f)
PRIVOXY_FORWARD_PORT=$OPTARG;;
r)
RNGD_FLAG="true";;
p)
PRIVOXY_FLAG="true";;
esac
done
if [ "${RNGD_FLAG}" == "true" ]; then
echo -e "\033[32mUse /dev/urandom to quickly generate high-quality random numbers......\033[0m"
rngd -r /dev/urandom
fi
if [ "${PRIVOXY_FLAG}" == "true" ] && [ "${PRIVOXY_LISTEN_PORT}" != "" ] && [ "${PRIVOXY_FORWARD_PORT}" != "" ]; then
echo -e "\033[32mStarting privoxy......\033[0m"
cp /etc/privoxy/config ./privoxy_config
sed -i '/^listen-address/d' privoxy_config
echo "forward-socks5 / 127.0.0.1:$PRIVOXY_FORWARD_PORT ." >> privoxy_config
echo "listen-address 0.0.0.0:$PRIVOXY_LISTEN_PORT" >> privoxy_config
privoxy --no-daemon privoxy_config 2>&1 &
else
echo -e "\033[33mPrivoxy not started......\033[0m"
fi
if [ "${SS_CONFIG}" != "" ]; then
echo -e "\033[32mStarting shadowsocks......\033[0m"
${SS_MODULE} ${SS_CONFIG}
else
echo -e "\033[31mError: SS_CONFIG is blank!\033[0m"
exit 1
fi
| true
|
58626b0a21dcd82023f836e23b6bbe01ee155b4a
|
Shell
|
renowncoder/obs-deps
|
/CI/build-qt-macos.sh
|
UTF-8
| 3,468
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##############################################################################
# macOS dependencies build script
##############################################################################
#
# This script pre-compiles all dependencies required to build OBS
#
# Parameters:
# -h, --help : Print usage help
# -q, --quiet : Suppress most build process output
# -v, --verbose : Enable more verbose build process output
# -a, --architecture : Specify build architecture
# (default: universal, alternative: x86_64
# or arm64)\n"
#
##############################################################################
# Halt on errors
set -eE
## SET UP ENVIRONMENT ##
_RUN_OBS_BUILD_SCRIPT=TRUE
PRODUCT_NAME="obs-qt"
REQUIRED_DEPS=(
"qt 5.15.2 3a530d1b243b5dec00bc54937455471aaa3e56849d2593edb8ded07228202240"
)
## MAIN SCRIPT FUNCTIONS ##
print_usage() {
echo -e "Usage: ${0}\n" \
"-h, --help : Print this help\n" \
"-q, --quiet : Suppress most build process output\n" \
"-v, --verbose : Enable more verbose build process output\n" \
"-a, --architecture : Specify build architecture (default: universal, alternative: x86_64 or arm64)\n" \
"-s, --skip-dependency-checks : Skip Homebrew dependency checks (default: off)\n" \
"--skip-unpack : Skip unpacking of Qt archive (default: off)\n"
}
obs-qt-build-main() {
QMAKE_QUIET=TRUE
CHECKOUT_DIR="$(/usr/bin/git rev-parse --show-toplevel)"
source "${CHECKOUT_DIR}/CI/include/build_support.sh"
source "${CHECKOUT_DIR}/CI/include/build_support_macos.sh"
while true; do
case "${1}" in
-h | --help ) print_usage; exit 0 ;;
-q | --quiet ) export QUIET=TRUE; shift ;;
-v | --verbose ) export VERBOSE=TRUE; unset QMAKE_QUIET; shift ;;
-a | --architecture ) ARCH="${2}"; shift 2 ;;
-s | --skip-dependency-checks ) SKIP_DEP_CHECKS=TRUE; shift ;;
--skip-unpack ) SKIP_UNPACK=TRUE; shift ;;
-- ) shift; break ;;
* ) break ;;
esac
done
_build_checks
ensure_dir "${CHECKOUT_DIR}"
FILE_NAME="macos-deps-qt-${CURRENT_DATE}-${ARCH:-${CURRENT_ARCH}}.tar.xz"
ORIG_PATH="${PATH}"
for DEPENDENCY in "${REQUIRED_DEPS[@]}"; do
unset -f _build_product
unset -f _patch_product
unset -f _install_product
unset NOCONTINUE
PATH="${ORIG_PATH}"
set -- ${DEPENDENCY}
trap "caught_error ${DEPENDENCY}" ERR
if [ "${1}" = "swig" ]; then
PCRE_VERSION="8.44"
PCRE_HASH="19108658b23b3ec5058edc9f66ac545ea19f9537234be1ec62b714c84399366d"
fi
PRODUCT_NAME="${1}"
PRODUCT_VERSION="${2}"
PRODUCT_HASH="${3}"
source "${CHECKOUT_DIR}/CI/macos/build_${1}.sh"
done
if [ "${ARCH}" = "universal" ]; then
source "${CHECKOUT_DIR}/CI/package-universal-qt-macos.sh"
else
cd "${CHECKOUT_DIR}/macos/obs-dependencies-qt-${ARCH}"
cp -R "${CHECKOUT_DIR}/licenses" .
step "Create archive ${FILE_NAME}"
XZ_OPT=-T0 tar -cJf "${FILE_NAME}" *
mv ${FILE_NAME} ..
fi
cleanup
}
obs-qt-build-main $*
| true
|
a6f9d52db66a60f6fe2e561eb15460d052f16a0f
|
Shell
|
reshinto/dotfiles
|
/mac/.zshrc
|
UTF-8
| 11,378
| 3.03125
| 3
|
[] |
no_license
|
# get disk size
df / | sed '1d' |
awk '
/^\/dev\/disk1s4s1/ {
size_byte = $2 * 512 # df uses 512 byte blocks
avail_byte = $4 * 512
total_size_gb = size_byte / 1000000000
total_avail_gb = avail_byte / 1000000000
printf "Size: %.1f GB Free: %.1f GB\n", total_size_gb, total_avail_gb
}
'
# Display neofetch application
neofetch
# If you come from bash you might have to change your $PATH.
#export PATH=$HOME/bin:/usr/local/bin:$PATH
# Anaconda path
export PATH="/usr/local/anaconda3/bin:$PATH"
# Mac OS software update path
export PATH="/usr/sbin/:$PATH"
# Homebrew path
export PATH="/usr/local/sbin:$PATH"
export PATH="/usr/local/bin:$PATH"
# port path
export PATH="/opt/local/bin:$PATH"
export PATH="/opt/local/sbin:$PATH"
# Path to your oh-my-zsh installation.
export ZSH="$HOME/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
# ZSH_THEME="robbyrussell"
ZSH_THEME="mytheme"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
export UPDATE_ZSH_DAYS=1
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
python
heroku
osx
zsh_reload
zsh-syntax-highlighting
zsh-autosuggestions
)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
export EDITOR=vim
# Set default git editor
export GIT_EDITOR=vim
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/rsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# python3 path (installed via homebrew)
alias python3="/usr/local/bin/python3"
# update and upgrade everything
# automate sudo command without input password with "echo password | sudo -S command"
alias sue="sudo softwareupdate -i -a && brew update && brew upgrade && brew cleanup && brew cleanup -s && conda clean -ay"
# auto remove everything and clean
alias aur="brew cleanup && brew cleanup -s && conda clean -ay"
# Java home path
# export JAVA_HOME="`/usr/libexec/java_home -v 10`"
# email and password
source $HOME/Dropbox/share/.authentication.sh
# api keys
source $HOME/Dropbox/share/.apikeys.sh
# tor
alias tor="open -a 'tor browser'"
# Dev shortcut
alias sdev="cd /Volumes/Speed/dev"
alias dev="cd $HOME/dev"
# delete Spam emails
alias dspam="python $HOME/my_apps/delete-spam-automation/run.py"
alias pspam="python $HOME/my_apps/delete-spam-automation/run.py manual"
# tensorflow conda env shortcut
alias tf="source activate tf"
# visualization conda env shortcut
alias viz="source activate visualization"
# conda activate
alias sa="source activate"
# conda deactivate
# alias sdc="source deactivate" deprecated
alias cdc="conda deactivate"
# conda list env
alias cel="conda env list"
# youtube viewing and download
alias yt="mpsyt"
# create python2 virtual environment
alias ven2="virtualenv venv"
# create python3 virtual environment
alias ven="virtualenv -p python3 venv"
# activate python virtual environment
alias sva="source venv/bin/activate"
# deactivate python virtual environment
alias dc="deactivate"
# facebook messenger
#alias fb="sudo fb-messenger-cli"
# windscribe vpn connect
alias wsv="open -a windscribe"
# Chrome
alias chrome='open -a "Google Chrome"'
# IntelliJ Idea
alias idea='open -a "IntelliJ IDEA CE"'
# spacemacs
alias evim='emacs -nw'
# compile .cpp files (C++)
alias cppcompile='c++ -std=c++11 -stdlib=libc++'
# alarm clock
alias alarm="python $HOME/my_apps/alarm-volume-control/run_app.py"
# flask tutorial secret key and database
source $HOME/Dropbox/share/.flaskTutorialTemp.sh
# rename file or folder names
alias rename="python $HOME/dev/Python_projects/My_Projects/Automation/rename_files/rename_files.py"
#alias mongod='mongod --dbpath $HOME/data/db'
# webDev yelp-camp tutorial
# export DATABASEURL="mongodb://localhost:27017/yelp_camp"
# CLOUDINARY API NAME KEY AND SECRET
source $HOME/Dropbox/share/.CLOUDINARYAPI.sh
# github username
export githubUser="reshinto"
# python project default path
export pyProject="$HOME/dev/Python_projects/My_Projects/inDevelopment"
# web javascript project default path
export webProject="$HOME/dev/Web_projects/my_projects"
# create new project automatically
alias create="python $HOME/my_apps/Automate_Building_projects/run.py"
# reindent file automatically
alias re="python $HOME/my_apps/Reindent/main.py"
# run python server
alias pserver="python -m http.server"
# run html server
alias server="http-server"
# Sudo Update All (include spam deletion)
alias sua="sudo softwareupdate -i -a && brew update && brew upgrade && brew cleanup && brew cleanup -s && python3 $HOME/my_apps/delete-spam-automation/run.py && npm update -g && pip3 install --upgrade pip && pip3 list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pip3 install -U --user && conda install anaconda -y && conda update --all -y && conda clean -ay"
# mySQL
alias mysqlstart="brew services start mysql"
alias mysqlroot="mysql -u root -p"
# default way of logging in
# alias mysqlme="mysql -u springfield -p"
# Add host if used "identified with mysql_native_password" setting
alias mysqlme="mysql -u springfield -h localhost -p"
alias mysqlrestart="brew services restart mysql"
alias mysqlstop="brew services stop mysql"
# Change mySQL password
# mysqladmin -u root password 'yourpassword'
# postgresql
alias pgstart="brew services start postgresql"
alias pgstop="brew services stop postgresql"
# set default vim
alias vim="mvim -v"
# Anaconda python
alias pythonconda="/usr/local/anaconda3/bin/./python"
# Go to backup directory
alias backup="/Volumes/backup"
# Show current wifi password
alias wifipw="wifi-password"
# Play voice weather and time information
alias sw="python $HOME/my_apps/weather_app/weather.py"
# Search for solutions with cheat.sh
alias cs="cht.sh"
# shortcut for searching on google
alias goo="googler"
# email
alias em="neomutt"
# automate git clone, folder create, and cd
alias clone="source $HOME/my_apps/clone.sh"
# find current folder size
alias sz="du -sh"
# list all available node_modules
alias listnode="find . -name 'node_modules' -type d -prune | xargs du -chs"
# delete all node_modules
alias delnode="find . -name 'node_modules' -type d -prune -exec rm -rf '{}' +"
# login to cloud
#alias sshcloud="ssh reshinto@cloud_ip_address"
# docker node env
#alias dnode="docker run -it --rm -p 5000:5000 -v $(pwd):/app -w='/app' node bash"
# docker node env, arg1: expose port, arg2: docker port, arg3: path
function dnode() { docker run -it --rm -p $1:$2 -v $3:/app -w='/app' node bash }
# docker python env
#alias dpython="docker run -it --rm -p 5001:5001 -v $(pwd):/app -w='/app' python bash"
# docker python env, arg1: expose port, arg2: docker port, arg3: path
function dpython() { docker run -it --rm -p $1:$2 -v $3:/app -w='/app' python bash }
# docker java env
#alias djava="docker run -it --rm -p 5002:5002 -v $(pwd):/app -w='/app' openjdk bash"
# docker java env, arg1: expose port, arg2: docker port, arg3: path
function djava() { docker run -it --rm -p $1:$2 -v $3:/app -w='/app' openjdk bash }
# icloud path
alias icloud="cd ~/Library/Mobile\ Documents/com~apple~CloudDocs/"
# check which app is using port
function portcheck() { lsof -i :$@ }
# kill port with PID
alias killport="kill -9"
# Stop all docker containers (do not uncomment this when docker engine is not turned on)
#alias dstop="docker kill $(docker ps -q)"
# Remove all docker containers (do not uncomment this when docker engine is not turned on)
#alias drm="docker rm $(docker ps -a -q)"
# Remove all docker images (do not uncomment this when docker engine is not turned on)
#alias drmi="docker rmi -f $(docker images -aq)"
# Stop and remove all docker containers and images (do not uncomment this when docker engine is not turned on)
#alias dclearall="dstop && drm && drmi"
# rbenv path
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
# Add .NET Core SDK tools
export PATH="$PATH:$HOME/.dotnet/tools"
# Add Android SDK Location
export ANDROID_SDK=$HOME/Library/Android/sdk
# Add platform-tools
export PATH=$HOME/Library/Android/sdk/platform-tools:$PATH
# Add java javajdk path
# export PATH="/usr/local/opt/openjdk/bin:$PATH"
# Use to manage different java versions
# jEnv configuration
export PATH="$HOME/.jenv/bin:$PATH"
eval "$(jenv init -)"
# required for nvm to manage node versions
export NVM_DIR=~/.nvm
source $(brew --prefix nvm)/nvm.sh
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="$HOME/.sdkman"
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
| true
|
427b44cb9a219d269cf70b4e3a7fe21e02fbf729
|
Shell
|
atong01/config
|
/setup.sh
|
UTF-8
| 1,118
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#setup.sh
#Created By Alex Tong January 28, 2015
#Modified By Alex Tong February 16, 2015
#This script runs the config package to set up a remote linux machine
echo "Running ssh-config.sh"
./ssh-config.sh
echo "copying .vimrc"
cp .vimrc ~
###############################################################################
echo "changing prompt"
printf "Ok to change prompt?"
read X
case "$X" in
y*|Y*)
if [ $SHELL == /bin/tcsh ]; then
echo tcsh
echo "#tcsh prompt change by Alex Tong" >> ~/.cshrc
echo "set prompt = \'%~ > \'" >> ~/.cshrc
elif [ $SHELL == /bin/bash ]; then
echo bash
echo "#bash prompt change by Alex Tong" >> ~/.bashrc
echo "\"PS1=\h:\w $ \"" >> ~/.bashrc
else
echo shell not found
fi
;;
*)
esac
###############################################################################
echo "setup complete"
###############################################################################
#END OF FILE
###############################################################################
| true
|
df29770c5a6ee14349fe70895afb89d5fefd76ce
|
Shell
|
valhallasw/ts-puppet
|
/modules/solaris/files/profile
|
UTF-8
| 1,958
| 3.015625
| 3
|
[] |
no_license
|
# IMPORTANT!!! DO NOT EDIT THIS FILE
#
# Do not edit /etc/profile. Instead, edit the master copy in Puppet, and run
# puppetd -t. This will make sure the change is applied to all servers, and
# not lost if the system is reinstalled. For more information, see
# <https://wiki.toolserver.org/view/Admin:Puppet>.
#ident "@(#)profile 1.19 01/03/13 SMI" /* SVr4.0 1.3 */
# The profile that all logins get before using their own .profile.
trap "" 2 3
export LOGNAME PATH
if [ "$TERM" = "" ]
then
if /bin/i386
then
TERM=sun-color
else
TERM=sun
fi
export TERM
fi
# Login and -su shells get /etc/profile services.
# -rsh is given its environment in its .profile.
case "$0" in
-sh | -ksh | -jsh | -bash)
if [ ! -f .hushlogin ]
then
# Allow the user to break the Message-Of-The-Day only.
trap "trap '' 2" 2
/bin/cat -s /etc/motd
trap "" 2
/bin/mail -E
case $? in
0)
echo "You have new mail."
;;
2)
echo "You have mail."
;;
esac
[ -x /opt/local/bin/acctexp ] && /opt/local/bin/acctexp
[ -x /opt/local/bin/listlogins ] && /opt/local/bin/listlogins
[ -x /opt/local/bin/list_screens ] && /opt/local/bin/list_screens
if [ -x /opt/ts/bin/quota ]; then
/opt/ts/bin/quota
else
/usr/sbin/quota
fi
fi # .hushlogin
esac
MANPATH=/sge/GE/man:/opt/ts/gnu/share/man:/opt/ts/share/man:/opt/ts/perl/5.12/share/man:/opt/ts/python/2.7/share/man:/opt/ts/php/5.3/man:/opt/ts/ruby/1.8/share/man:/opt/ts/tcl/8.5/share/man:/usr/share/man:/usr/sfw/share/man:/usr/postgres/8.3/man:/usr/cluster/man
export MANPATH
if [ "$0" = "-bash" -a -r /etc/bash.bashrc ]; then
. /etc/bash.bashrc
fi
EDITOR=nano
export EDITOR
umask 022
trap 2 3
# TS-430/MNT-75, set default $LANG on login unless it's already set.
if [ -z "$LANG" ]; then
LANG=en_US.UTF-8
export LANG
fi
# Set environment from user's $HOME/.environment
if [ -f "/opt/local/bin/setenv" ]; then
eval `/opt/local/bin/setenv`
fi
# DO NOT EDIT THIS FILE
| true
|
4d8ccee3db519167c357d717f13b7eabbd326f11
|
Shell
|
rtrouton/rtrouton_scripts
|
/rtrouton_scripts/Casper_Scripts/Jamf_Pro_Self_Service_Bookmark_Backup/Jamf_Pro_Self_Service_Bookmark_Backup.sh
|
UTF-8
| 2,816
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is designed to do the following:
#
# 1. If necessary, create a directory for storing backup copies of Jamf Pro Self Service bookmark files.
# 2. Make copies of the Self Service bookmark files.
# 3. Name the copied files using the title of the Self Service bookmark.
# 4. Store the copied bookmarks in the specified directory.
#
# If you choose to specify a directory to save the Self Service bookmarks into,
# please enter the complete directory path into the SelfServiceBookmarkBackupDirectory
# variable below.
SelfServiceBookmarkBackupDirectory=""
# If the SelfServiceBookmarkBackupDirectory isn't specified above, a directory will be
# created and the complete directory path displayed by the script.
error=0
if [[ -z "$SelfServiceBookmarkBackupDirectory" ]]; then
SelfServiceBookmarkBackupDirectory=$(mktemp -d)
echo "A location to store downloaded bookmarks has not been specified."
echo "Downloaded bookmarks will be stored in $SelfServiceBookmarkBackupDirectory."
fi
self_service_bookmark_file="$HOME/Library/Application Support/com.jamfsoftware.selfservice.mac/CocoaAppCD.storedata"
if [[ -r "$self_service_bookmark_file" ]]; then
tmp_dir="/private/tmp/bookmark-workdir-$(date +%y%m%d%H%M%S)"
mkdir -p "$tmp_dir"
# For the next command, add a trailing slash for
# the the tmp_dir variable if it's not there.
length=${#tmp_dir}
last_char=${tmp_dir:length-1:1}
[[ $last_char != "/" ]] && tmp_dir="$tmp_dir/";
sed -n '/SSBOOKMARK/,/object/p' "$self_service_bookmark_file" | awk -v a=$tmp_dir '/SSBOOKMARK/{filename=a""++i".xml"}; {print >filename}' -
#remove trailing slash if needed from the bookmark and tmp directories
SelfServiceBookmarkBackupDirectory=${SelfServiceBookmarkBackupDirectory%%/}
tmp_dir=${tmp_dir%%/}
for file in "$tmp_dir"/*
do
# Add XML declaration to first line if not already present in the file.
# This will allow xmllint to format the XML in human-readable format.
if [[ -z $(cat $file | grep "<?xml version="1.0" encoding="UTF-8"?>") ]]; then
echo -e "<?xml version="\""1.0"\"" encoding="\""UTF-8"\""?>\n$(cat $file)" > $file
fi
bookmark_name=$(cat $file | awk -F '[<>]' '/"name"/{print $3}')
xmllint --format "$file" > "$file"_formatted.xml
mv "$file"_formatted.xml "$SelfServiceBookmarkBackupDirectory/$bookmark_name".xml
if [[ $? -eq 0 ]]; then
echo "$bookmark_name.xml processed and stored in $SelfServiceBookmarkBackupDirectory."
else
echo "ERROR! Problem occurred when processing $self_service_bookmark_file file!"
error=1
fi
done
rm -rf "$tmp_dir"
else
echo "Cannot read $self_service_bookmark_file"
fi
exit $error
| true
|
d43f71a58b45a824ba0a51ed27996017c5da6a31
|
Shell
|
rockry/PcapMonitor
|
/assets/shell/sniffer.sh
|
UTF-8
| 1,760
| 2.625
| 3
|
[] |
no_license
|
#!/system/bin/sh
#echo ""
#echo " This is for QCT monitor mode"
#echo ""
if [ "$1" == "STOP" ]; then
echo 0 > /sys/module/wlan/parameters/con_mode
ifconfig wlan0 up
iwpriv wlan0 monitor 0
echo "QCT SNIFFER MODE EXIT"
return
fi
CHANNEL="$1"
BANDWIDTH="$2"
if [ "$CHANNEL" == "" ]; then
CHANNEL=149
fi
if [ "$BANDWIDTH" == "" ]; then
BANDWIDTH=40
fi
echo "sniffer.sh CHANNEL:$CHANNEL BANDWIDTH:$BANDWIDTH"
stop mpdecision
stop thermald
stop thermal-engine
echo 1 > /sys/devices/system/cpu/cpu0/online
echo 1 > /sys/devices/system/cpu/cpu1/online
echo 1 > /sys/devices/system/cpu/cpu2/online
echo 1 > /sys/devices/system/cpu/cpu3/online
sleep 1
#scaling_available_frequencies (C70, H443)
#200000 400000 533333 800000 998400 1094400 1152000 1209600
#300000 422400 652800 729600 883200 960000 1036800 1190400 1267200 1497600 1574400 1728000 1958400 2150400 (T1)
echo 2265600 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
echo 2265600 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
echo 2265600 > /sys/devices/system/cpu/cpu2/cpufreq/scaling_max_freq
echo 2265600 > /sys/devices/system/cpu/cpu3/cpufreq/scaling_max_freq
echo 2265600 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 2265600 > /sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
echo 2265600 > /sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq
echo 2265600 > /sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq
echo 4 > /sys/module/wlan/parameters/con_mode
ifconfig wlan0 up
iwpriv wlan0 MonitorModeConf $CHANNEL $BANDWIDTH 1 111 0
#iwpriv wlan0 MonitorFilter 00:11:22:34:57:31 1 0 0
iwpriv wlan0 monitor 1
echo "QCT SNIFFER MODE OK"
#setprop wlan.lge.sniffer.ipaddr 192.168.0.100
#setprop wlan.lge.sniffer.ssid .DIR815_5G
| true
|
9ca588b8390b5bcb9748bf0bae4756df8db93522
|
Shell
|
rainisto/jollarecovery
|
/root/init
|
UTF-8
| 995
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
# Minimal Android Debug initrd
# 2013-10-21 Thomas Perl <m@thp.io>
export PATH=/bin:/sbin
# Mount basic file systems
mount -t proc none /proc
mount -t sysfs none /sys
echo 255 > /sys/class/leds/led:rgb_blue/brightness
sleep 3
# Prepare /dev
mount -t tmpfs -o size=64k,mode=0755 tmpfs /dev
mkdir /dev/pts
mount -t devpts devpts /dev/pts
# Populate /dev using Busybox mdev
echo /sbin/mdev > /proc/sys/kernel/hotplug
mdev -s
sleep 3
# Bring up USB Networking
echo 0 > /sys/class/android_usb/android0/enable
#echo 1 > /sys/class/android_usb/android0/f_rndis/on
echo rndis > /sys/class/android_usb/android0/functions
echo 1 > /sys/class/android_usb/android0/enable
echo 0 > /sys/class/leds/led:rgb_blue/brightness
sleep 3
# Configure USB Networking (IP address + DHCP server)
ifconfig rndis0 192.168.42.17
udhcpd /etc/udhcpd.conf
sleep 3
# Start telnet service (to connect: telnet 192.168.2.17)
echo 255 > /sys/class/leds/led:rgb_red/brightness
telnetd -p 23 -l /bin/sh -F
sleep 3600
| true
|
71f8334ee6e65fdc2dac9a1af2abc8582959603d
|
Shell
|
wiw/cutadapt_imcb
|
/unpack_combine2.sh
|
UTF-8
| 530
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
dir=/home/anton/Doc/Documents/SequenceData/DamID_IBprot/export_not_combine/141110
bzip_files=$dir/*.fastq.bz2
cd $dir
for fq in $bzip_files; do
fq_base=`basename ${fq}`
bzcat ${fq_base} > ${fq_base%.bz2}
gzip -c ${fq_base%.bz2} > ${fq_base%.bz2}.gz
rm $fq_base ${fq_base%.bz2}
done
#i=10
#while [ $i -le 10 ]; do
#cat 141023_HSGA.IMKB.IMKB${i}.fastq 141110_HSGA.IMKB.IMKB${i}.fastq > HSGA.IMKB.IMKB${i}.fastq
#gzip -c HSGA.IMKB.IMKB${i}.fastq > HSGA.IMKB.IMKB${i}.fastq.gz
#let i=i+1
#rm *.fastq.bz2 *.fastq
#done
| true
|
b1b86c5d3249a5c8dbcdb5a827c2c08d33aa74e1
|
Shell
|
cosmicd/miscellaneous-scripts
|
/bash/create-ghpages-repo.sh
|
UTF-8
| 995
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Purpose: Delete old and create a new gh-pages repo for static sites
GH_USERNAME=yourUsername
GH_PAGES_NAME=${GH_USERNAME}.github.io
GH_PAGES_REPO_PATH=/path/to/local/$GH_PAGES_NAME # this is the local copy of the build repo
doall(){
delete_remote
delete_local
create; # create new repo
}
delete_remote(){
curl -u ${GH_USERNAME} -X DELETE https://api.github.com/repos/${GH_USERNAME}/${GH_PAGES_NAME}
}
delete_local(){ # You may not have want to delete local as it may have demos.
rm -r $GH_PAGES_REPO_PATH
}
create(){
curl -u ${GH_USERNAME} https://api.github.com/user/repos -d "{\"name\": \"${GH_PAGES_NAME}\", \"description\": \"${REPODESC}\"}"
mkdir -p $GH_PAGES_REPO_PATH
cd $GH_PAGES_REPO_PATH
echo '<a href="https://'${GH_PAGES_NAME}'">'${GH_PAGES_NAME}'</a>' >> README.md
git init
git add .
git commit -a -m "first commit"
git remote add origin https://github.com/${GH_USERNAME}/${GH_PAGES_NAME}.git
git push -u origin master
}
$@
| true
|
96641586a76cf26cff55619935a9524cc5168341
|
Shell
|
cfieber/spinnaker-oss-setup
|
/install.sh
|
UTF-8
| 3,522
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
function print_help {
local current_install_dir=$(source $DIR/files/env.sh && echo $SPINNAKER_WORKSPACE)
echo "usage: install.sh [-hu]"
echo ""
echo "-h show help and exit"
echo "-u proceed with install without GITHUB_TOKEN"
echo " you will be prompted for github"
echo " credentials to create an oauth token"
echo ""
echo ""
echo "Installs required system dependencies for Spinnaker development."
echo "Sets up a development workspace in"
echo " $current_install_dir"
echo ""
echo "Edit $DIR/files/env.sh to customize workspace directory"
exit 1
}
function validate_github {
if [[ -z "${GITHUB_TOKEN}" && "$1" != "-u" ]]; then
echo "GITHUB_TOKEN environment variable not set."
echo "A Github oauth token with repo scope is required"
echo "to use the github-cli (hub). Preferred is if"
echo "you create this yourself:"
echo ""
echo " https://github.com/settings/tokens "
echo ""
echo "You can proceed without a token, by supplying"
echo "the -u flag, and a token will be created and"
echo "saved after you supply your github credentials"
echo ""
echo "see also:"
echo " hub manpage (man hub)"
echo ""
print_help
fi
}
if [[ `uname` -ne "Darwin" ]]; then
echo "Only supports OSX at this point (contribs welcome!)"
echo ""
echo ""
print_help
fi
if [ "$1" == "-h" ]; then
print_help
fi
validate_github $1
if ! type brew > /dev/null; then
echo "Dependency not met: homebrew. Installing..."
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
if ! type bork > /dev/null; then
echo "Dependency not met: bork, installing..."
brew install bork
fi
bork do ok symlink $HOME/.spinnaker-env.sh $DIR/files/env.sh
source $HOME/.spinnaker-env.sh
bork satisfy satisfy/taps.sh
# need a JDK but can't bork cask this due to sudo
brew cask install zulu8
bork satisfy satisfy/osx.sh
bork satisfy satisfy/repos.sh
brew services start redis
brew services start mysql@5.7
export NVM_DIR="$HOME/.nvm"
bork do ok directory $NVM_DIR
source /usr/local/opt/nvm/nvm.sh
nvm install --lts
npm -g install yarn
bork do ok directory $HOME/.spinnaker
bork do ok symlink $HOME/.spinnaker/logback-defaults.xml $DIR/files/logback-defaults.xml
/usr/local/opt/mysql@5.7/bin/mysql -u root < $SPINNAKER_WORKSPACE/orca/orca-sql-mysql/mysql-setup.sql
function update_shell_profile {
local profile_file=$HOME/$1
local expected_entry=$2
grep -q "${expected_entry}" "${profile_file}" || echo "${expected_entry}" >> "${profile_file}"
}
read -d '' CONFIGURE_NVM <<'EOF'
export NVM_DIR="$HOME/.nvm"
[ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh" # This loads nvm
[ -s "/usr/local/opt/nvm/etc/bash_completion" ] && . "/usr/local/opt/nvm/etc/bash_completion" # This loads nvm bash_completion
EOF
SOURCE_SPINNAKER_ENV='source $HOME/.spinnaker-env.sh'
if [[ "$SHELL" =~ ^.*bash$ ]]; then
update_shell_profile ".bash_profile" "${SOURCE_SPINNAKER_ENV}"
update_shell_profile ".bashrc" "${CONFIGURE_NVM}"
elif [[ "$SHELL" =~ ^.*zsh$ ]]; then
update_shell_profile ".zshenv" "${SOURCE_SPINNAKER_ENV}"
update_shell_profile ".zshrc" "${CONFIGURE_NVM}"
else
echo "ACTIONS REQUIRED: "
echo "Add to your shell environment:"
echo "'${SOURCE_SPINNAKER_ENV}'"
echo ""
echo "Add to your shell rc:"
echo "${CONFIGURE_NVM}"
echo ""
fi
| true
|
d2aaa36bfca7225415cbd16c76afde8a11a36eb6
|
Shell
|
almoore/useful-scripts
|
/k8s/check-certs
|
UTF-8
| 726
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
ARGS="$@"
GC="\033[1;32m"
YC="\033[1;33m"
EC="\033[1;0m"
printgreen() {
printf "${GC}%s$@${EC}\n";
}
printyellow() {
printf "${YC}%s$@${EC}\n";
}
# get all tls type secrets
OUTPUT=$(kubectl ${ARGS} get secret -A | grep "kubernetes.io/tls")
# get namespaces
NAMESPACES=$(echo "$OUTPUT" | awk '{print $1}'|uniq)
for namespace in ${NAMESPACES}; do
printyellow "Gathering Secrets in ${namespace}"
SECRETS=$(echo "$OUTPUT" | grep "^$namespace" | awk '{print $2}')
for secret in $SECRETS; do
printgreen "${namespace}/${secret}:"
kubectl ${ARGS} -n ${namespace} get secret ${secret} -o yaml| \
yq '.data["tls.crt"]' -r |base64 -d|openssl x509 -noout -dates
done
done
| true
|
df0508b74fc491279187b8b3e64c63ed9a97bd1c
|
Shell
|
mkimartinez/practice-simple-bash-scripting
|
/system_monitoring/cpu_memory_swap_usage.sh
|
UTF-8
| 590
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
echo "-------------------------------------------"
echo "Server_Name CPU(%) Memory(%) Swap(%) Uptime(%)"
echo "-------------------------------------------"
#for server in `more /opt/scripts/server-list.txt`
#do
scpu=$(cat /proc/stat | awk '/cpu/{printf("%.2f%\n"), ($2+$4)*100/($2+$4+$5)}' | awk '{print $0}' | head -1)
smem=$(free | awk '/Mem/{printf("%.2f%"), $3/$2*100}')
sswap=$(free | awk '/Swap/{printf("%.2f%"), $3/$2*100}')
suptime=$(uptime)
echo "$server $scpu $smem $sswap $suptime"
#done | column -t
echo "-------------------------------------------"
| true
|
13725d302bb36e9e582484f9899a6322e5ba197b
|
Shell
|
JaneliaSciComp/hortacloud
|
/vpc_stack/src/jacs/cleanup.sh
|
UTF-8
| 365
| 2.859375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -xe
# Update with optional user data that will run on instance start.
# Learn more about user-data: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
#
# Output is logged to /var/log/cloud-init-output.log
echo "Cleanup user data"
# remove user data script
rm -rf /var/lib/cloud/instance/scripts/*
echo "Completed user data cleanup"
| true
|
ee1db900ec88741fc1bdbd337d7814016c0f5cec
|
Shell
|
ahpeng/acs-k8s-cassandra-multi-dc
|
/scripts/deploy-multidc.sh
|
UTF-8
| 3,732
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
RESOURCE_GROUP_1=${1}
RESOURCE_GROUP_2=${2}
CLUSTER_DEFINITION_1=./templates/kubernetes.east.json
CLUSTER_DEFINITION_2=./templates/kubernetes.west.json
VNET_NAME=KubernetesCustomVNET
SUBNET_NAME=KubernetesSubnet
VNET_1_FIRST_TWO=10.140
VNET_2_FIRST_TWO=10.240
LOCATION_1=westcentralus
LOCATION_2=westus2
# variables that get set in keys.env
SERVICE_PRINCIPAL=
SP_SECRET=
SSH_PUBLIC_KEY=
. ./scripts/keys.env
# --- Auto populated values. Change at your own risk
VNET_1_ADDRESS_PREFIX_1=${VNET_1_FIRST_TWO}.0.0/16
VNET_2_ADDRESS_PREFIX_1=${VNET_2_FIRST_TWO}.0.0/16
SUBNET_ADDRESS_PREFIX_1=${VNET_1_FIRST_TWO}.0.0/17
SUBNET_ADDRESS_PREFIX_2=${VNET_2_FIRST_TWO}.0.0/17
DNS_PREFIX_1=${RESOURCE_GROUP_1}
DNS_PREFIX_2=${RESOURCE_GROUP_2}
GWSUBNET_ADDRESS_PREFIX_1=${VNET_1_FIRST_TWO}.128.0/29
GWSUBNET_ADDRESS_PREFIX_2=${VNET_2_FIRST_TWO}.128.0/29
GATEWAY_1=GW-${LOCATION_1}
GATEWAY_2=GW-${LOCATION_2}
# --------------
. ./scripts/general-helpers.sh
. ./scripts/acs-helpers.sh
. ./scripts/network-helpers.sh
. ./scripts/cluster-helpers.sh
. ./scripts/gateway-helpers.sh
. ./scripts/cassandra-helpers.sh
check_prereq()
{
check_var_set "VNET_NAME"
check_var_set "SUBNET_NAME"
check_var_set "VNET_1_FIRST_TWO"
check_var_set "VNET_2_FIRST_TWO"
check_var_set "LOCATION_1"
check_var_set "LOCATION_2"
check_var_set "SUBSCRIPTION_ID"
check_var_set "RESOURCE_GROUP_1"
check_var_set "RESOURCE_GROUP_2"
check_var_set "CLUSTER_DEFINITION_1"
check_var_set "CLUSTER_DEFINITION_2"
check_var_set "SSH_PUBLIC_KEY"
check_var_set "SP_SECRET"
check_var_set "SERVICE_PRINCIPAL"
check_var_set "DNS_PREFIX_1"
check_var_set "DNS_PREFIX_2"
check_var_set "SUBNET_ADDRESS_PREFIX_1"
check_var_set "SUBNET_ADDRESS_PREFIX_2"
}
echo Active Subscription:
az account show -o table
SUBSCRIPTION_ID=$(az account show -o tsv --query "id")
check_prereq
fixup_apimodel ${RESOURCE_GROUP_1} ${DNS_PREFIX_1} ${SUBNET_ADDRESS_PREFIX_1} ${CLUSTER_DEFINITION_1}
fixup_apimodel ${RESOURCE_GROUP_2} ${DNS_PREFIX_2} ${SUBNET_ADDRESS_PREFIX_2} ${CLUSTER_DEFINITION_2}
ensure_acsengine
rebuild_armtemplates ${CLUSTER_DEFINITION_1}
rebuild_armtemplates ${CLUSTER_DEFINITION_2}
echo VNET SPACES ${VNET_1_ADDRESS_PREFIX_1} ${SUBNET_ADDRESS_PREFIX_1}
create_rg_vnet_and_gw ${RESOURCE_GROUP_1} ${LOCATION_1} ${VNET_1_ADDRESS_PREFIX_1} ${SUBNET_ADDRESS_PREFIX_1} ${GATEWAY_1} ${GWSUBNET_ADDRESS_PREFIX_1}
create_rg_vnet_and_gw ${RESOURCE_GROUP_2} ${LOCATION_2} ${VNET_2_ADDRESS_PREFIX_1} ${SUBNET_ADDRESS_PREFIX_2} ${GATEWAY_2} ${GWSUBNET_ADDRESS_PREFIX_2}
deploy_cluster ${RESOURCE_GROUP_1} ${LOCATION_1} ${DNS_PREFIX_1}
deploy_cluster ${RESOURCE_GROUP_2} ${LOCATION_2} ${DNS_PREFIX_2}
wait_for_cluster ${RESOURCE_GROUP_1} deploy-${DNS_PREFIX_1}
wait_for_cluster ${RESOURCE_GROUP_1} deploy-${DNS_PREFIX_1}
wait_for_vnet_gateway ${RESOURCE_GROUP_1} ${GATEWAY_1}
wait_for_vnet_gateway ${RESOURCE_GROUP_2} ${GATEWAY_2}
deploy_connection ${RESOURCE_GROUP_1} ${GATEWAY_1} ${RESOURCE_GROUP_2} ${GATEWAY_2}
deploy_connection ${RESOURCE_GROUP_2} ${GATEWAY_2} ${RESOURCE_GROUP_1} ${GATEWAY_1}
set_ssh_exec ${RESOURCE_GROUP_1}
install_helm
branchName=cassandra-multi-dc
get_charts ${branchName}
install_cassandra
set_seed_ip ${RESOURCE_GROUP_1}
set_ssh_exec ${RESOURCE_GROUP_2}
install_helm
get_charts ${branchName}
update_seeds
install_cassandra
# problem is that the ILB currently doesn't work ith CNI clusters.
# Need to revisit or manually configure the ILB
echo Final status
set_ssh_exec ${RESOURCE_GROUP_1}
${SSH_EXEC} 'kubectl exec -it $(kubectl get pods -o jsonpath="{ .items[0].metadata.name }") /usr/local/apache-cassandra-3.11.0/bin/nodetool status'
| true
|
a9d3e833007bf632433d99e0c5de76dacebb2fb1
|
Shell
|
Janeiro2022/JMR_CKF
|
/archive/scripts/process_iteration_plots.sh
|
UTF-8
| 489
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
DATE="20200610";
EXP_CODES="{'A', 'B', 'C', 'D', 'E', 'F'}";
START=10
STOP=25
TRIALS="{"
for i in $(eval echo {$START..$STOP}); do
if (( $i == $START )); then
TRIALS="${TRIALS}'$(printf "%03d" $i)'"
else
TRIALS="${TRIALS},'$(printf "%03d" $i)'"
fi
done
TRIALS="${TRIALS}}"
matlab -nodisplay -nosplash -r "process_iteration_plots('$DATE', $TRIALS, $EXP_CODES);exit;"
# matlab -nodisplay -nosplash -nojvm -r "process_iteration_plots('$DATE', $TRIALS, $EXP_CODES);exit;"
| true
|
e169d4605b2506b5181a3217d86964f7f9322a09
|
Shell
|
vzaicev/bash
|
/2022/07_ernest_knurov/lab03/lab03_z77.sh
|
UTF-8
| 202
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -n "$1" ] && [ "$1" == "-task" ];
then
echo "Задание"
echo "77. Удалить все пустые строки в конце файла"
echo
echo
fi
sed '/^$/d' file.txt
| true
|
f4706f43025fdb17e8b7bc4e8e89662e277f8edb
|
Shell
|
gdubw/gradle-distribution-server
|
/upload-to-nexus.sh
|
UTF-8
| 657
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# shellcheck disable=SC2164
cd "$(dirname "${BASH_SOURCE[0]}")"
# Upload all packages to a Nexus3 server, with a raw repository named 'gradle-distribution
NEXUS_SERVER="${NEXUS_SERVER:-127.0.0.1}"
NEXUS_RAW_REPO="${NEXUS_RAW_REPO:-gradle-distribution}"
DIST_DIR="${1:-build/distributions}"
for file in "${DIST_DIR}"/*; do
file_name="$(basename "${file}")"
curl -v --user "${NEXUS_USER:-admin}:${NEXUS_PASSWORD:-admin123}" --upload-file \
"${file}" \
"http://${NEXUS_SERVER}/repository/${NEXUS_RAW_REPO}/${file_name}"
done
echo "open http://${NEXUS_SERVER}/service/rest/repository/browse/${NEXUS_RAW_REPO}/" for file lists.
| true
|
802edf98306e75d80a069994055fc42d741e4a98
|
Shell
|
bingli7/gittest
|
/lib/launchers/env_scripts/configure_env.sh
|
UTF-8
| 21,789
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
function is_true()
{
for arg
do
[[ x$arg =~ x(1|true) ]] || return 0
done
return 1
}
function is_false()
{
for arg
do
[[ x$arg =~ x(0|false) ]] || return 0
done
return 1
}
function wait_cloud_init()
{
# exit early if cloud-init log does not exist (e.g. cloud-init not installed)
[ -f /var/log/cloud-init.log ] || return 0
systemctl is-enabled cloud-init.service || return 0
while :; do
systemctl show cloud-init |grep -q SubState=exited && break
tail -n5 /var/log/cloud-init.log | grep -q 'Cloud-init .* finished' && break
sleep 1
done
}
function yum_install_or_exit()
{
echo "Openshift V3: yum install $*"
Count=0
while true
do
yum install -y $*
if [ $? -eq 0 ]; then
return
elif [ $Count -gt 3 ]; then
echo "Openshift V3: Command fail: yum install $*"
echo "Openshift V3: Please ensure relevant repos are configured"
exit 1
fi
let Count+=1
done
}
function install_named_pkg()
{
yum_install_or_exit bind
}
function configure_bind()
{
rndc-confgen -a -r /dev/urandom
restorecon /etc/rndc.* /etc/named.*
chown root:named /etc/rndc.key
chmod 640 /etc/rndc.key
# Set up DNS forwarding if so directed.
echo "forwarders { ${nameservers} } ;" > /var/named/forwarders.conf
restorecon /var/named/forwarders.conf
chmod 644 /var/named/forwarders.conf
# Install the configuration file for the OpenShift Enterprise domain
# name.
rm -rf /var/named/dynamic
mkdir -p /var/named/dynamic
chgrp named -R /var/named
chown named -R /var/named/dynamic
restorecon -rv /var/named
# Replace named.conf.
cat <<EOF > /etc/named.conf
// named.conf
//
// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
// server as a caching only nameserver (as a localhost DNS resolver only).
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
options {
listen-on port 53 { any; };
directory "/var/named";
dump-file "/var/named/data/cache_dump.db";
statistics-file "/var/named/data/named_stats.txt";
memstatistics-file "/var/named/data/named_mem_stats.txt";
allow-query { any; };
allow-transfer { "none"; }; # default to no zone transfers
/* Path to ISC DLV key */
bindkeys-file "/etc/named.iscdlv.key";
forward only;
include "forwarders.conf";
recursion yes;
};
logging {
channel default_debug {
file "data/named.run";
severity dynamic;
};
};
// use the default rndc key
include "/etc/rndc.key";
controls {
inet 127.0.0.1 port 953
allow { 127.0.0.1; } keys { "rndc-key"; };
};
include "/etc/named.rfc1912.zones";
EOF
chown root:named /etc/named.conf
chcon system_u:object_r:named_conf_t:s0 -v /etc/named.conf
# actually set up the domain zone(s)
# bind_key is used if set, created if not. both domains use same key.
if ! $USE_OPENSTACK_DNS; then
configure_named_zone ${CONF_HOST_DOMAIN}
add_infra_records
fi
configure_named_zone ${CONF_APP_DOMAIN}
add_route_records
chkconfig named on
# Start named so we can perform some updates immediately.
service named restart
}
function configure_named_zone()
{
zone="$1"
if [ "x$bind_key" = x ]; then
# Generate a new secret key
zone_tolower="${zone,,}"
rm -f /var/named/K${zone_tolower}*
dnssec-keygen -a HMAC-SHA256 -b 256 -n USER -r /dev/urandom -K /var/named ${zone}
# $zone may have uppercase letters in it. However the file that
# dnssec-keygen creates will have the zone in lowercase.
bind_key="$(grep Key: /var/named/K${zone_tolower}*.private | cut -d ' ' -f 2)"
rm -f /var/named/K${zone_tolower}*
fi
# Install the key where BIND and oo-register-dns expect it.
cat <<EOF > /var/named/${zone}.key
key ${zone} {
algorithm "HMAC-SHA256";
secret "${bind_key}";
};
EOF
# Create the initial BIND database.
cat <<EOF > /var/named/dynamic/${zone}.db
\$ORIGIN .
\$TTL 1 ; 1 seconds (for testing only)
${zone} IN SOA ns1.$zone. hostmaster.$zone. (
2011112904 ; serial
60 ; refresh (1 minute)
15 ; retry (15 seconds)
1800 ; expire (30 minutes)
10 ; minimum (10 seconds)
)
IN NS $named_hostname.
IN MX 10 mail.$zone.
\$ORIGIN ${zone}.
ns1 IN A ${CONF_DNS_IP}
EOF
if ! grep ${zone} /etc/named.conf >/dev/null; then
# Add a record for the zone to named conf
cat <<EOF >> /etc/named.conf
include "${zone}.key";
zone "${zone}" IN {
type master;
file "dynamic/${zone}.db";
allow-update { key ${zone} ; } ;
};
EOF
fi
sed -i "/lo -j ACCEPT/a -A INPUT -m state --state NEW -m udp -p udp --dport 53 -j ACCEPT" /etc/sysconfig/iptables
sed -i "/lo -j ACCEPT/a -A INPUT -m state --state NEW -m tcp -p tcp --dport 53 -j ACCEPT" /etc/sysconfig/iptables
}
function configure_dns_resolution()
{
sed -i -e "/search/ d; 1i# The named we install for our OpenShift PaaS must appear first.\\nsearch ${CONF_HOST_DOMAIN}.\\nnameserver ${CONF_DNS_IP}\\n" /etc/resolv.conf
cat <<EOF > /etc/dhcp/dhclient-$interface.conf
prepend domain-name-servers ${CONF_DNS_IP};
prepend domain-search "${CONF_HOST_DOMAIN}";
EOF
systemctl restart network.service
}
function add_infra_records()
{
for host in ${CONF_HOST_LIST//,/ }; do
key=$(echo $host|awk -F":" '{print $1}')
value=$(echo $host|awk -F":" '{print $2}')
if [[ "$value" == *[A-Za-z]* ]]; then
REC_TYPE=CNAME
value+=.
else
REC_TYPE=A
fi
echo "${key} IN $REC_TYPE ${value}" >>/var/named/dynamic/${CONF_HOST_DOMAIN}.db
done
}
function add_route_records()
{
multinode=${CONF_IP_LIST//[^,]}
for host in ${CONF_IP_LIST//,/ }; do
key=$(echo $host|awk -F":" '{print $1}')
value=$(echo $host|awk -F":" '{print $2}')
if [[ "$value" == *[A-Za-z]* ]]; then
REC_TYPE=CNAME
value+=.
else
REC_TYPE=A
fi
# routers will run on masters or nodes, point DNS at them
if [ "$key" == "$CONF_ROUTER_NODE_TYPE" ] ||
[ x"$multinode" == x"" ]; then
echo "* IN $REC_TYPE ${value}" >>/var/named/dynamic/${CONF_APP_DOMAIN}.db
fi
done
}
function configure_repos()
{
cat <<EOF >/etc/yum.repos.d/rhel-jenkins.repo
[rhel-7]
name=RHEL-7
baseurl=${CONF_RHEL_BASE_REPO}/os/
enabled=1
gpgcheck=0
[rhel-7-extra]
name=RHEL-7-extra
baseurl=${CONF_RHEL_BASE_REPO}/extras/os/
enabled=1
gpgcheck=0
[rhel-7-highavailability]
name=RHEL-7-highavailability
baseurl=${CONF_RHEL_BASE_REPO}/highavailability/os/
enabled=1
gpgcheck=0
EOF
}
function clean_repos()
{
rm -rf /etc/yum.repos.d/*
subscription-manager unregister
}
function create_router_registry()
{
oadm registry --credentials=$CONF_CRT_PATH/master/openshift-registry.kubeconfig --images="$CONF_IMAGE_PRE"
#CA=$CONF_CRT_PATH/master
#oadm ca create-server-cert --signer-cert=$CA/ca.crt --signer-key=$CA/ca.key --signer-serial=$CA/ca.serial.txt --hostnames="*.${CONF_APP_DOMAIN}" --cert=cloudapps.crt --key=cloudapps.key
#cat cloudapps.crt cloudapps.key $CA/ca.crt > cloudapps.router.pem
#oc get scc privileged -o yaml >privileged.yaml
#grep "system:serviceaccount:default:default" privileged.yaml || echo "- system:serviceaccount:default:default" >> privileged.yaml
#oc replace -f privileged.yaml
#tmpStr=${CONF_HOST_LIST//[^,]}
#if [ x"$tmpStr" == x"" ]; then
# nodeNum=1
#else
# nodeNum=${#tmpStr}
#fi
#oadm router --default-cert=cloudapps.router.pem --credentials=$CONF_CRT_PATH/master/openshift-router.kubeconfig --images="$CONF_IMAGE_PRE" --replicas=$nodeNum --service-account=default
}
function configure_hosts()
{
for host in ${CONF_HOST_LIST//,/ }; do
tmpKey=$(echo $host|awk -F":" '{print $1}')
tmpIp=$(echo $host|awk -F":" '{print $2}')
grep $tmpip /etc/hosts || echo -e \"$tmpip\t$tmpkey.$DOMAIN_NAME\" >>/etc/hosts
done
}
function configure_shared_dns()
{
if ! $USE_OPENSTACK_DNS; then
configure_named_zone ${CONF_HOST_DOMAIN}
add_infra_records
fi
configure_named_zone ${CONF_APP_DOMAIN}
add_route_records
service named restart
}
function add_skydns_hosts()
{
for host in ${CONF_HOST_LIST//,/ }; do
key=$(echo $host|awk -F":" '{print $1}')
value=$(echo $host|awk -F":" '{print $2}')
curl --cacert $CONF_CRT_PATH/master/ca.crt --cert $CONF_CRT_PATH/master/master.etcd-client.crt --key $CONF_CRT_PATH/master/master.etcd-client.key -XPUT https://master.cluster.local:4001/v2/keys/skydns/local/cluster/$key -d value="{\"Host\": \"$ip\"}"
done
}
function replace_template_domain()
{
for file in $(grep -rl "openshiftapps.com" /usr/share/openshift/examples/*); do
sed -i "s/openshiftapps.com/$CONF_APP_DOMAIN/" $file
oc replace -n openshift -f $file
done
}
function configure_nfs_service()
{
yum_install_or_exit nfs-utils
mkdir -p /var/export/regvol
chown nfsnobody:nfsnobody /var/export/regvol
chmod 700 /var/export/regvol
# add no_wdelay as workaround for BZ#1277356
echo "/var/export/regvol *(rw,sync,all_squash,no_wdelay)" >> /etc/exports
systemctl enable rpcbind nfs-server
systemctl restart rpcbind nfs-server nfs-lock
systemctl restart nfs-idmap
iptables -N OS_NFS_ALLOW
rulenum=$(iptables -L INPUT --line-number|grep REJECT|head -n 1|awk '{print $1}')
iptables -I INPUT $rulenum -j OS_NFS_ALLOW
iptables -I OS_NFS_ALLOW -p tcp -m state --state NEW -m tcp --dport 111 -j ACCEPT
iptables -I OS_NFS_ALLOW -p tcp -m state --state NEW -m tcp --dport 2049 -j ACCEPT
iptables -I OS_NFS_ALLOW -p tcp -m state --state NEW -m tcp --dport 20048 -j ACCEPT
iptables -I OS_NFS_ALLOW -p tcp -m state --state NEW -m tcp --dport 50825 -j ACCEPT
iptables -I OS_NFS_ALLOW -p tcp -m state --state NEW -m tcp --dport 53248 -j ACCEPT
# save rules and make sure iptables service is active and enabled
/usr/libexec/iptables/iptables.init save || exit 1
systemctl is-enabled iptables && systemctl is-active iptables || exit 1
sed -i 's/RPCMOUNTDOPTS=.*/RPCMOUNTDOPTS="-p 20048"/' /etc/sysconfig/nfs
sed -i 's/STATDARG=.*/STATDARG="-p 50825"/' /etc/sysconfig/nfs
echo "fs.nfs.nlm_tcpport=53248" >>/etc/sysctl.conf
echo "fs.nfs.nlm_udpport=53248" >>/etc/sysctl.conf
sysctl -p
systemctl restart nfs
setsebool -P virt_use_nfs=true
}
function configure_registry_to_ha()
{
cat >${CONF_CRT_PATH}/pv.json <<EOF
{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": {
"name": "registry-volume"
},
"spec": {
"capacity": {
"storage": "17Gi"
},
"accessModes": [ "ReadWriteMany" ],
"nfs": {
"path": "/var/export/regvol",
"server": "$(hostname -f)"
}
}
}
EOF
cat >${CONF_CRT_PATH}/pvc.json<<EOF
{
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
"name": "registry-claim"
},
"spec": {
"accessModes": [ "ReadWriteMany" ],
"resources": {
"requests": {
"storage": "17Gi"
}
}
}
}
EOF
oc create -f ${CONF_CRT_PATH}/pv.json
oc create -f ${CONF_CRT_PATH}/pvc.json
oc volume dc/docker-registry --add --overwrite -t persistentVolumeClaim --claim-name=registry-claim --name=registry-storage
oc scale --replicas=2 dc/docker-registry
}
function configure_ldap_source()
{
rm -rf basicauthurl
mkdir -p basicauthurl/
cp /etc/yum.repos.d/rhel.repo basicauthurl/
cat >basicauthurl/Dockerfile <<EOF
FROM $CONF_KERBEROS_BASE_DOCKER_IMAGE
ADD rhel.repo /etc/yum.repos.d/rhel.repo
RUN yum install -y wget mod_ldap tar httpd mod_ssl php mod_auth_kerb mod_auth_mellon mod_authnz_pam
RUN sed -i "/\[realms/,/\[/ s/kdc =.*/kdc = $CONF_KERBEROS_KDC/" /etc/krb5.conf
RUN sed -i "/\[realms/,/\[/ s/admin_server =.*/admin_server = $CONF_KERBEROS_ADMIN/" /etc/krb5.conf
RUN sed -i "s/^#//" /etc/krb5.conf
RUN wget $CONF_KERBEROS_KEYTAB_URL -O /etc/http.keytab
EOF
docker build -t docker.io/basicauthurl basicauthurl/
}
function modify_IS_for_testing()
{
if [ $# -lt 1 ]; then
echo "Usage: $0 [registry-server]"
exit 1
fi
registry_server="${1}"
cmd="oc delete is --all -n openshift"
echo "Command: $cmd"
eval "$cmd"
cmd="oc delete images --all"
echo "Command: $cmd"
eval "$cmd"
if [ -d "/usr/share/openshift/examples/" ]; then
IS_json_base="/usr/share/openshift/examples/"
elif [ -d "/etc/origin/examples/" ]; then
IS_json_base="/etc/origin/examples/"
else
echo "No valid Image Stream json file dir found!"
exit 1
fi
file1="${IS_json_base}/xpaas-streams/jboss-image-streams.json"
file2="${IS_json_base}/image-streams/image-streams-rhel7.json"
[ ! -f "${file1}.bak" ] && cp "${file1}" "${file1}.bak"
[ ! -f "${file2}.bak" ] && cp "${file2}" "${file2}.bak"
#for line_num in $(grep -n 'name' ${file1} | grep -v 'latest' | grep -v '[0-9]",' | grep -v 'jboss-image-streams' | awk -F':' '{print $1}'); do
# sed -i "${line_num}s|\(.*\)|\1,\"annotations\": { \"openshift.io/image.insecureRepository\": \"true\"}|g" ${file1}
#done
#for line_num in $(grep -n 'name' ${file2} | grep -v 'latest' | grep -v '[0-9]",' | awk -F':' '{print $1}'); do
#for line_num in $(grep -n 'name' ${file2} | grep -v 'latest' | grep -v '"[0-9]' | awk -F':' '{print $1}'); do
# sed -i "${line_num}s|\(.*\)|\1\"annotations\": { \"openshift.io/image.insecureRepository\": \"true\"},|g" ${file2}
#done
if openshift version | grep -q "openshift v3.1.[0-9].[0-9]-"; then
# when it is v3.1.z version
for file in ${file1} ${file2}; do
sed -i "s/registry.access.redhat.com/${registry_server}/g" ${file}
oc create -f ${file} -n openshift
done
for i in ruby nodejs perl php python mysql postgresql mongodb jenkins jboss-amq-62 jboss-datagrid65-openshift jboss-decisionserver62-openshift jboss-eap64-openshift jboss-webserver30-tomcat7-openshift jboss-webserver30-tomcat8-openshift; do
oc patch is ${i} -p '{"metadata":{"annotations":{"openshift.io/image.insecureRepository":"true"}}}' -n openshift
done
else
# when it is v3.2 version
oc create -f ${file1} -n openshift
sed -i "s/registry.access.redhat.com/${registry_server}/g" ${file2}
sed -i '/from/i\"importPolicy\"\:\ \{\"insecure\"\:\ true\}\,' ${file2}
oc create -f ${file2} -n openshift
fi
}
function confiugre_kerberos()
{
yum install httpd mod_ssl apr-util-openssl mod_auth_kerb -y
oadm create-api-client-config --certificate-authority='/etc/origin/master/ca.crt' \
--client-dir='/etc/origin/master/authproxy' \
--signer-cert='/etc/origin/master/ca.crt' \
--signer-key='/etc/origin/master/ca.key' \
--signer-serial='/etc/origin/master/ca.serial.txt' \
--user='authproxy'
host_name=$(hostname)
pushd /etc/origin/master
\cp master.server.crt /etc/pki/tls/certs/localhost.crt
\cp master.server.key /etc/pki/tls/private/localhost.key
\cp ca.crt /etc/pki/CA/certs/ca.crt
cat authproxy/authproxy.crt \
authproxy/authproxy.key > \
/etc/pki/tls/certs/authproxy.pem
popd
cat >/etc/httpd/conf.d/openshift.conf <<EOF
#LoadModule auth_form_module modules/mod_auth_form.so
#LoadModule session_module modules/mod_session.so
#LoadModule request_module modules/mod_request.so
# Nothing needs to be served over HTTP. This virtual host simply redirects to
# HTTPS.
<VirtualHost *:80>
DocumentRoot /var/www/html
RewriteEngine On
RewriteRule ^(.*)$ https://%{HTTP_HOST}$1 [R,L]
</VirtualHost>
<VirtualHost *:443>
# This needs to match the certificates you generated. See the CN and X509v3
# Subject Alternative Name in the output of:
# openssl x509 -text -in /etc/pki/tls/certs/localhost.crt
ServerName $host_name
DocumentRoot /var/www/html
SSLEngine on
SSLCertificateFile /etc/pki/tls/certs/localhost.crt
SSLCertificateKeyFile /etc/pki/tls/private/localhost.key
SSLCACertificateFile /etc/pki/CA/certs/ca.crt
SSLProxyEngine on
SSLProxyCACertificateFile /etc/pki/CA/certs/ca.crt
# It's critical to enforce client certificates on the Master. Otherwise
# requests could spoof the X-Remote-User header by accessing the Master's
# /oauth/authorize endpoint directly.
SSLProxyMachineCertificateFile /etc/pki/tls/certs/authproxy.pem
# Send all requests to the console
RewriteEngine On
RewriteRule ^/console(.*)$ https://%{HTTP_HOST}:8443/console$1 [R,L]
# In order to using the challenging-proxy an X-Csrf-Token must be present.
RewriteCond %{REQUEST_URI} ^/challenging-proxy
RewriteCond %{HTTP:X-Csrf-Token} ^$ [NC]
RewriteRule ^.* - [F,L]
<Location /challenging-proxy/oauth/authorize>
# Insert your backend server name/ip here.
ProxyPass https://$host_name:8443/oauth/authorize
</Location>
<Location /login-proxy/oauth/authorize>
# Insert your backend server name/ip here.
ProxyPass https://$host_name:8443/oauth/authorize
# mod_auth_form providers are implemented by mod_authn_dbm, mod_authn_file,
# mod_authn_dbd, mod_authnz_ldap and mod_authn_socache.
AuthType Kerberos
AuthName openshift
ErrorDocument 401 /login.html
</Location>
<ProxyMatch /oauth/authorize>
AuthType Kerberos
KrbMethodNegotiate on
KrbMethodK5Passwd on
KrbServiceName Any
KrbAuthRealms EXAMPLE.COM
Krb5Keytab /etc/origin/http.keytab
KrbSaveCredentials off
AuthName openshift
Require valid-user
RequestHeader set X-Remote-User %{REMOTE_USER}s
# For ldap:
# AuthBasicProvider ldap
# AuthLDAPURL "ldap://ldap.example.com:389/ou=People,dc=my-domain,dc=com?uid?sub?(objectClass=*)"
# It's possible to remove the mod_auth_form usage and replace it with
# something like mod_auth_kerb, mod_auth_gsspai or even mod_auth_mellon.
# The former would be able to support both the login and challenge flows
# from the Master. Mellon would likely only support the login flow.
# For Kerberos
# yum install mod_auth_gssapi
# AuthType GSSAPI
# GssapiCredStore keytab:/etc/httpd.keytab
</ProxyMatch>
</VirtualHost>
RequestHeader unset X-Remote-User
EOF
pushd /var/www/html/
wget https://raw.githubusercontent.com/openshift/openshift-extras/master/misc/form_auth/login.html
wget https://raw.githubusercontent.com/openshift/openshift-extras/master/misc/form_auth/loggedout.html
chmod -Rf 0777 *
popd
wget $CONF_KERBEROS_KEYTAB_URL -O /etc/origin/http.keytab
chown apache:apache /etc/origin/http.keytab
cat > /etc/krb5.conf <<EOF
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
# default_realm = EXAMPLE.COM
default_ccache_name = KEYRING:persistent:%{uid}
[realms]
EXAMPLE.COM = {
kdc = $CONF_KERBEROS_KDC
admin_server = $CONF_KERBEROS_ADMIN
}
[domain_realm]
.example.com = EXAMPLE.COM
example.com = EXAMPLE.COM
EOF
systemctl restart httpd
}
#CONF_HOST_LIST=vaule
#CONF_IP_LIST=value
#CONF_HOST_DOMAIN=value
#CONF_APP_DOMAIN=value
#CONF_RHEL_BASE_REPO=value
#CONF_INTERFACE=value
#CONF_DNS_IP=value
#USE_OPENSTACK_DNS=value
#CONF_AUTH_TYPE=value
#CONF_CRT_PATH=value
#CONF_IMAGE_PRE=value
#CONF_KERBEROS_KDC=value
#CONF_KERBEROS_ADMIN=value
#CONF_KERBEROS_KEYTAB_URL=value
#CONF_KERBEROS_BASE_DOCKER_IMAGE=value
#CONF_ROUTER_NODE_TYPE=value
#CONF_PUDDLE_REPO=value
interface="${CONF_INTERFACE:-eth0}"
nameservers="$(awk '/nameserver/ { printf "%s; ", $2 }' /etc/resolv.conf)"
named_hostname=ns1.$CONF_HOST_DOMAIN
function update_playbook_rpms()
{
cat <<EOF >/etc/yum.repos.d/ose-devel.repo
[ose-devel]
name=ose-devel
baseurl=${CONF_PUDDLE_REPO}
enabled=1
gpgcheck=0
EOF
rpm -q atomic-openshift-utils && yum update openshift-ansible* -y || yum install atomic-openshift-utils -y
}
function garbage_clean_up()
{
cat << EOF > /etc/cron.hourly
oadm prune builds --keep-complete=5 --keep-failed=1 --keep-younger-than=1h0m0s --orphans=true --confirm=true
oadm prune deployments --keep-complete=5 --keep-failed=1 --keep-younger-than=1h0m0s --orphans=true --confirm=true
EOF
}
case $1 in
wait_cloud_init)
wait_cloud_init
;;
configure_dns)
#configure_repos
install_named_pkg
configure_bind
#clean_repos
;;
configure_dns_resolution)
configure_dns_resolution
;;
create_router_registry)
create_router_registry
;;
configure_hosts)
configure_hosts
;;
configure_shared_dns)
configure_shared_dns
;;
add_skydns_hosts)
add_skydns_hosts
;;
replace_template_domain)
replace_template_domain
;;
configure_nfs_service)
configure_nfs_service
;;
configure_registry_to_ha)
configure_registry_to_ha
;;
configure_repos)
clean_repos
configure_repos
yum update -y
;;
modify_IS_for_testing)
modify_IS_for_testing "$2"
;;
update_playbook_rpms)
update_playbook_rpms
;;
garbage_clean_up)
garbage_clean_up
;;
confiugre_kerberos)
confiugre_kerberos
;;
*)
echo "Invalid Action: $1"
esac
| true
|
ffcf6f5f657340e61b4c7a9bcdf33963d673c338
|
Shell
|
bumplzz69/openshift-tools
|
/ansible/roles/openshift_firstboot_scripts/files/insights-register.sh
|
UTF-8
| 328
| 2.8125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
while getopts c:t: option
do
case "${option}"
in
c) CLUSTERNAME=${OPTARG};;
t) NODETYPE=${OPTARG};;
esac
done
INTERNAL_HOSTNAME=$(curl 169.254.169.254/latest/meta-data/local-hostname)
nohup /usr/bin/redhat-access-insights --register --display-name=$CLUSTERNAME-$NODETYPE-$INTERNAL_HOSTNAME --group=$CLUSTERNAME &
| true
|
c10bd7e61041cf65934db4d97828cda4cd20f05b
|
Shell
|
Tubbz-alt/LLNMS
|
/src/core/assets/llnms-scan-asset.sh
|
UTF-8
| 6,518
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# File: llnms-scan-asset.sh
# Author: Marvin Smith
# Date: 12/13/2013
#
# Purpose: Runs the scanners on the specified asset.
#
#----------------------------------------#
#- Print Usage Instructions -#
#----------------------------------------#
usage(){
echo "`basename $0` [options]"
echo ''
echo ' options:'
echo ' -h, --help : Print usage instructions.'
echo ' -v, --version : Print version information.'
echo ' -V, --verbose : Print with verbose output.'
echo ''
echo ' -a, --asset [asset hostname] : Asset hostname to run scans against.'
echo ''
}
#-------------------------------------#
#- Warning Function -#
#- -#
#- $1 - Error Message -#
#- $2 - Line Number (Optional). -#
#- $3 - File Name (Optional). -$
#-------------------------------------#
warning(){
# If the user only gives the warning message
if [ $# -eq 1 ]; then
echo "warning: $1."
# If the user only gives the line number
elif [ $# -eq 2 ]; then
echo "warning: $1. Line: $2, File: `basename $0`"
# If the user gives the line number and file
else
echo "warning: $1. Line: $2, File: $3"
fi
}
#-------------------------------------#
#- Error Function -#
#- -#
#- $1 - Error Message -#
#- $2 - Line Number (Optional). -#
#- $3 - File Name (Optional). -$
#-------------------------------------#
error(){
# If the user only gives the error message
if [ $# -eq 1 ]; then
echo "error: $1."
# If the user only gives the line number
elif [ $# -eq 2 ]; then
echo "error: $1. Line: $2, File: `basename $0`"
# If the user gives the line number and file
else
echo "error: $1. Line: $2, File: $3"
fi
}
#-------------------------------------#
# Version Function #
#-------------------------------------#
version(){
echo "$0 Information"
echo ''
echo " LLNMS Version ${LLNMS_MAJOR}.${LLNMS_MINOR}.${LLNMS_SUBMINOR}"
}
#------------------------------------------#
#- Given an id, get the scanner path -#
#------------------------------------------#
get_scanner_path_from_id(){
# Get a list of scanners
SCANNER_LIST=`llnms-list-scanners -f -l`
for SCANNER in $SCANNER_LIST; do
if [ "`llnms-print-scanner-info -f $SCANNER -i`" = "$1" ]; then
echo $SCANNER
return
fi
done
}
#---------------------------------#
#- Main Function -#
#---------------------------------#
# Source llnms home
if [ "$LLNMS_HOME" = "" ]; then
LLNMS_HOME="/var/tmp/llnms"
fi
# Import the version info
. $LLNMS_HOME/config/llnms-info
# Import asset utilities
. $LLNMS_HOME/config/llnms-config
# Asset name and path
ASSET_HOSTNAME=''
ASSET_PATH=''
ASSET_FLAG=0
VERBOSE_FLAG=0
# Parse Command-Line Options
for OPTION in "$@"; do
case $OPTION in
# Print usage instructions
'-h' | '--help' )
usage
exit 1
;;
# Print verbose output
'-V' | '--verbose' )
VERBOSE_FLAG=1
;;
# Print version information
'-v' | '--version' )
version
exit 1
;;
# Set the asset flag
'-a' | '--asset' )
ASSET_FLAG=1
;;
# Process flag values or print error message
*)
# Grab the asset hostname
if [ $ASSET_FLAG -eq 1 ]; then
ASSET_FLAG=0
ASSET_HOSTNAME=$OPTION
# otherwise, throw the error for an unknown option
else
error "Unknown option $OPTION" "$LINENO"
usage
exit 1
fi
;;
esac
done
#-----------------------------------------------#
#- If no asset specified, then throw error -#
#-----------------------------------------------#
if [ "$ASSET_HOSTNAME" = '' ]; then
error "No asset specified." "$LINENO"
usage
exit 1
fi
#-------------------------------------#
#- Make sure the asset exists -#
#-------------------------------------#
# get a list of assets
ASSET_LIST=`llnms-list-assets -l -path`
for ASSET_FILE in $ASSET_LIST; do
# check the hostname. if they match, then retrieve the asset filename
if [ "`llnms-print-asset-info -f $ASSET_FILE -host`" = "$ASSET_HOSTNAME" ]; then
ASSET_PATH=$ASSET_FILE
fi
done
# if the asset path is blank, then the asset was not found.
if [ "$ASSET_PATH" = '' ]; then
error "Asset with hostname ($ASSET_HOSTNAME) does not exist." "$LINENO"
usage
exit 1
fi
#-----------------------------------------------------#
#- Get the list of scanners and run each -#
#-----------------------------------------------------#
# - get a list of registered scanners
ASSET_SCANNERS=`llnms-print-asset-info -f $ASSET_PATH -s`
for ASSET_SCANNER in $ASSET_SCANNERS; do
# get the file pathname for the scanner
SCANNER_PATH=`get_scanner_path_from_id $ASSET_SCANNER`
# Get the command we have to run
SCANNER_CMD=`llnms-print-scanner-info -f $SCANNER_PATH -c`
SCANNER_BASE_PATH=`llnms-print-scanner-info -f $SCANNER_PATH -b`
# Get the number of arguments to query
NUMARGS=`llnms-print-scanner-info -f $SCANNER_PATH -num`
# get the argument-list for the scanner
ARGC=`llnms-print-asset-info -f $ASSET_PATH -sac $ASSET_SCANNER`
ARGLIST=''
for ((x=1; x<=$ARGC; x++ )); do
# Get the arg value
ARGFLG=`llnms-print-asset-info -f $ASSET_PATH -san $ASSET_SCANNER $x`
ARGVAL=`llnms-print-asset-info -f $ASSET_PATH -sav $ASSET_SCANNER $x`
ARGLIST="$ARGLIST --$ARGFLG $ARGVAL"
done
# merge all variables into a single command
COMMAND_RUN="$SCANNER_BASE_PATH/$SCANNER_CMD $ARGLIST"
# Running command
echo "Running $COMMAND_RUN"
CMD_OUTPUT=`$COMMAND_RUN`
echo $CMD_OUTPUT &> $LLNMS_HOME/log/llnms-scan-asset.log
# Grab the output
RESULT="$?"
if [ "$VERBOSE_FLAG" = '1' ]; then
echo "$CMD_OUTPUT"
else
if [ "$RESULT" = '0' ]; then
echo 'PASSED'
else
echo 'FAILED'
fi
fi
done
| true
|
f76848a6bbdc4c24563b9f728ad4ec7b4d5e99a3
|
Shell
|
apereo/cas
|
/ci/tests/puppeteer/scenarios/delegated-login-saml2-with-sso/init.sh
|
UTF-8
| 339
| 2.671875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
echo -e "Removing previous SAML metadata directory, if any"
rm -Rf "${PWD}/ci/tests/puppeteer/scenarios/${SCENARIO}/saml-md"
echo -e "Creating SAML metadata directory"
mkdir "${PWD}/ci/tests/puppeteer/scenarios/${SCENARIO}/saml-md"
chmod +x "${PWD}/ci/tests/saml2/run-saml-server.sh"
"${PWD}/ci/tests/saml2/run-saml-server.sh"
| true
|
7ea1e81950e794dd89499e80f111516c207185b9
|
Shell
|
karlzheng/vimrc
|
/install.sh
|
UTF-8
| 712
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash -
#===============================================================================
#
# FILE: install.sh
#
# USAGE: ./install.sh
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Karl Zheng (), ZhengKarl#gmail.com
# COMPANY: Meizu
# CREATED: 2013年12月02日 20时03分52秒 CST
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
if [ ! -f ${HOME}/.vimrc ];then
ln -s $(pwd)/.vimrc ${HOME}/.vimrc
fi
if [ ! -d ${HOME}/.vim ];then
ln -s $(pwd)/.vim ${HOME}/.vim
fi
| true
|
bfbb70ae175b56159845e6aacffe3bedb8300653
|
Shell
|
obino/appscale
|
/util/gen_release_notes.sh
|
UTF-8
| 1,295
| 4.25
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Helper script to gather all git log messages from the last tag
# and construct a preliminary RELEASE file for the release.
#
# Assumes the new release from the VERSION file.
#
if [ ! -e RELEASE ]; then
echo "Unable to locate RELEASE file, is your working dir the top level of the repo"
exit 1
fi
echo "Generating RELEASE file (changelog)"
# header
head -7 RELEASE > t_release
echo "New Release version: $(tail -1 VERSION) - released $(date +'%B %Y')"
# release line
echo "$(tail -1 VERSION), released $(date +'%B %Y')" >> t_release
echo "Highlights of features and defects fixed in this release:" >> t_release
echo -n "Gathering git logs"
# Git logs from last tag (eg 3.4.0)
git log $(git describe --tags --abbrev=0)..HEAD | grep -A5 'Merge pull request' | grep -v 'commit ' | grep -v Author: | grep -v -- '--' | grep -v "Merge" | grep -v -e '^[[:space:]]*$' | sed 's/[[:space:]]*/ - /' >> t_release
echo "" >> t_release
echo "" >> t_release
echo "...done"
echo "Known Issues:" >> t_release
echo "" >> t_release
echo -n "Appending old release notes"
tail -n+7 RELEASE >> t_release
echo "...done"
echo -n "Constructing new RELEASE file"
cp t_release RELEASE
rm -f t_release
echo "...done"
echo "Be sure to read through the RELEASE file before commiting the changes"
| true
|
75b70a40edeaf5c0e86f20526804c5c3e8fe88ee
|
Shell
|
yucca-t-k/togoid-config
|
/config-suspend/hint-uniprot/update.sh
|
UTF-8
| 719
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# SPARQL query
QUERY='PREFIX hnt: <http://purl.jp/10/hint/>
PREFIX bp3: <http://www.biopax.org/release/biopax-level3.owl#>
PREFIX uni: <http://identifiers.org/uniprot/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
SELECT (replace(str(?hint), "http://purl.jp/10/hint/", "") as ?hint_id) (replace(str(?uniprot), "http://identifiers.org/uniprot/", "") as ?uniprot_id)
FROM <http://med2rdf.org/graph/hint>
where{
?hint a bp3:MolecularInteraction ;
bp3:participant / obo:BFO_0000051 ?uniprot .
}
ORDER BY ?hint_id'
# curl -> format -> delete header
curl -s -H "Accept: text/csv" --data-urlencode "query=$QUERY" http://sparql.med2rdf.org/sparql | sed -e 's/\"//g; s/,/\ /g' | sed -e '1d' > link.tsv
| true
|
c60ac7e65ccaa3571cc0b5a32efd8c24cc834dd1
|
Shell
|
Jf-Chen/FRN-main
|
/trained_model_weights/download_weights.sh
|
UTF-8
| 453
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
. ../utils/gdownload.sh
. ../utils/conditional.sh
# this section is for downloading the trained_model_weights.tar
# md5sum for the downloaded trained_model_weights.tar should be 0dde61f0b520ae9c17c9aae6dcc64b55
echo "downloading trained_model_weights..."
gdownload 1n6zg5Bkj5FzSHpRbJdzlUkSVDtmjgobm trained_model_weights.tar
conditional_tar trained_model_weights.tar 0dde61f0b520ae9c17c9aae6dcc64b55
echo "trained_model_weights downloaded"
| true
|
4853eafc5923469a7cf5393934ea8f20dd504d3d
|
Shell
|
tobieniepodam/apache-ssl
|
/view
|
UTF-8
| 355
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$#" -lt 1 ]; then
echo "Usage: $0 file-to-view"
exit 1
fi
if [ ! -f "$1" ]; then
echo "File '$1' doesn't exist!"
exit 1
fi
case ${1##*.} in
key) openssl rsa -noout -text -in $1 ;;
crt) openssl x509 -noout -text -in $1 ;;
csr) openssl req -noout -text -in $1 ;;
*)
echo 'Unknown file format' >&2
exit 1
esac
| true
|
9dcfc8f06256dd319e678d601931abc177af6d2f
|
Shell
|
tholum/phpjs
|
/_tools/compile_node.sh
|
UTF-8
| 520
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
output="nodejs_php.js"
echo "Starting to compile all Functions, it may take a minute or two"
echo "module.exports = { " > $output
for dir in `ls functions`;do
for file in `ls functions/$dir`;do
#sed 's/function\s\(.*\)(/\1: function(/g' functions/$dir/$file >> $output
sed '0,/function/s/function\s\(.*\)(/\1: function(/g' functions/$dir/$file >> $output
echo "," >> $output
done
done
sed -i '$ d' $output
echo "} " >> $output
echo "Compiler Script by Tim Holum"
echo "nodejs Compatible file $output"
| true
|
fd6045f8cc0903d9e897f9682903d6a46e2f9998
|
Shell
|
ExpediaGroup/styx
|
/travis/deploy.sh
|
UTF-8
| 2,191
| 3.390625
| 3
|
[
"Apache-2.0",
"EPL-1.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (C) 2013-2021 Expedia Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -x
function deployRelease() {
RELEASE_VARS='GPG_PUBLIC_KEYS GPG_SECRET_KEYS GPG_KEY_NAME GPG_PASSPHRASE DOCKER_USERNAME DOCKER_PASSWORD SONATYPE_JIRA_USERNAME SONATYPE_JIRA_PASSWORD'
for var in ${RELEASE_VARS[@]}
do
if [ -z ${!var} ] ; then
echo "Variable $var is not set. Cannot continue with release. Needed vars: $RELEASE_VARS"
exit 1
fi
done
export GPG_DIR="$(pwd)/gpg-temp"
mkdir -p $GPG_DIR
echo $GPG_PUBLIC_KEYS | base64 --decode >> ${GPG_DIR}/pubring.gpg
echo $GPG_SECRET_KEYS | base64 --decode >> ${GPG_DIR}/secring.gpg
echo "Deploying Release to sonatype and docker hub"
#Ensure a correct version was configured in the pom files.
mvn versions:set -DnewVersion=$TRAVIS_TAG
#Deploy to sonatype
mvn deploy --settings travis/mvn-settings.xml -B -U -P sonatype-oss-release,linux -DskipTests=true -Dmaven.test.skip=true
#Deploy to dockerhub
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
mvn install -f distribution/pom.xml -B -U -P docker -Dstyxcore.docker.image=hotelsdotcom/styx
docker push hotelsdotcom/styx
#Prepare macosx bundle for github releases
mvn install -B -U -P macosx,release -DskipTests=true -Dmaven.test.skip=true -Dgpg.skip=true
}
function deploySnapshot() {
echo "Deploying snapshot to sonatype"
mvn deploy --settings travis/mvn-settings.xml -B -U -P sonatype-oss-release,linux -DskipTests=true -Dmaven.test.skip=true -Dgpg.skip=true -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn
}
if [[ -n "$TRAVIS_TAG" ]]; then
deployRelease
else
deploySnapshot
fi
| true
|
dc7d46613168914c0c805908ccf9014cdb86ac44
|
Shell
|
djheisterberg/test-jobs
|
/abaqus/rsh
|
UTF-8
| 226
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
cluster=`hostname -s`
cluster=${cluster:0:1}
RCMD=/usr/bin/ssh
if [ "$cluster" == "n" ]; then
RCMD=/usr/bin/rsh
elif [ "$cluster" == "r" ]; then
RCMD=/usr/bin/rsh
fi
echo ">>> $RCMD $(date) $@"
exec $RCMD "$@"
| true
|
b4c82a24e6f659e4734772efa80a59e651b45876
|
Shell
|
havleoto/kplts
|
/run/icbink.sh
|
UTF-8
| 219
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
#ICBINK=~/src/kernel/icbink/entry_point-c
# Allow user to set their own.
if [ -z $ICBINK ] ; then
ICBINK=~/klisp/icbink/entry_point-c
fi
cd $(dirname $0)/../tests
$ICBINK ../run/icbink-compatibility.k
| true
|
1fff5d47d20c3e32acb53b62e3605b2b26822120
|
Shell
|
jamesmacdoel/Scripts
|
/Zimbra-LockAccountBySendRate.sh
|
UTF-8
| 559
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
RATE=`/opt/zimbra/libexec/zmmsgtrace -s "@foo.com" 2> /dev/nul |grep "foo.com \-\->" |sort |uniq -c`
RATE=`echo $RATE |sed -e 's/ --> /,/g'`
OLDIFS=$IFS
IFS=','
aRATE=($RATE)
for line in "${aRATE[@]}" ;do
COUNT=`echo $line |awk '{print $1}'`
EMAIL=`echo $line |awk '{print $2}'`
#echo $line
#echo $COUNT
#echo $EMAIL
if [ ${COUNT:-0} -gt 1500 ]; then
echo lock account $EMAIL
/opt/zimbra/bin/zmprov ma $EMAIL zimbraAccountStatus lock
fi
done
IFS=$OLDIFS
| true
|
bbddc56cf34cb681eef5367aced149b50ed72f1c
|
Shell
|
jonathanwilsonami/MySQL-Student-Grades-Project
|
/SQL/HW8/createTxt8.sh
|
UTF-8
| 571
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
#This script just helps me create my submission:
printf '1. This is load_students.sh\n\n' > hw8.txt
cat load_students.sh >> hw8.txt
printf '2. This is insert_template_students.awk\n\n' >> hw8.txt
cat insert_template_students.awk >> hw8.txt
printf '3. This is table_schema_students.sql\n\n' >> hw8.txt
cat table_schema_students.sql >> hw8.txt
printf '4. This is read_domain.sql.\n\n' >> hw8.txt
printf 'Head\n\n' >> hw8.txt
cat read_students.sql | tail -n +17 | head -n 10 >> hw8.txt
printf 'Tail\n\n' >> hw8.txt
tail -n2 read_students.sql >> hw8.txt
| true
|
4c3ff7b6a42dc21360f067b88aa415eda2ddccef
|
Shell
|
SheldonRay/lab_2_3308
|
/#lab2_scripts.sh#
|
UTF-8
| 379
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# Authors : Sheldon Ray
# Date: 1/30/2019
#Problem 1 Code:
#Make sure to document how you are solving each problem!
echo "Please enter a regular expression along with a text file name"
read regularEx
read fileName
grep -n $regularEx $fileName * < command_results.txt
grep -n '@geocities.com' $fileName * < email_results.txt
grep -n '303-' $filename * < phone_results.txt
| true
|
6db8b4ff54042f5414b07f736e3c565afcd12eea
|
Shell
|
andrei4ka/fuel-main-redhat
|
/utils/jenkins/fuel-ci/update_jobs.sh
|
UTF-8
| 138
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
find $1 -type f | while read FILENAME; do
jenkins-jobs --conf /etc/jenkins_jobs/jenkins_jobs.ini update "$FILENAME"
done
| true
|
dd8d89864ffbb3dadebe837602b73d5f1ceb404e
|
Shell
|
jifeng/git-script
|
/basic.sh
|
UTF-8
| 2,517
| 2.953125
| 3
|
[] |
no_license
|
#git的eclipse插件
eclipse git plugin
http://www.eclipse.org/egit/download/
git on windows
Step 0
#Git Bash命令行工具
#下载windows客户端 http://windows.github.com/
#跟linux下的命令行一样,有个别命令无法使用 比如 $> ll,窗口中的内容无法拷贝(右键无效)
Step 1
#安装完成后设置账号,邮箱,提交的时候显示的名称。
git config --global user.name "jifeng"
git config --global user.email "wade428@163.com"
#查看设置情况
git config --get user.name
git config --get user.email
Step 2
#创建sshkey用于连接服务器的时候认证
cd .ssh
# 保证.ssh目录下名称为id_rsa, id_rsa.pub的文件是唯一的,如果已经存在的话先备份一下。
mkdir back_rsa
cp id_rsa* back_rsa
rm id_rsa*
ssh-keygen -t rsa -C "wade428@163.com"
#之后一路回车即可
#复制id_rsa.pub中的内容添加到github中
#登陆github系统。点击右上角的 Account Settings--->SSH Public keys ---> add another public keys
#把你本地生成的密钥复制到里面(key文本框中), 点击 add key 就ok了
#测试是否连接成功
ssh -t git@github.com
Hi coolme200/top! You've successfully authenticated, but GitHub does not provide shell access.'
Step 3
#创建工作目录
mkdir helloworld
cd helloworld (到workspace的项目目录下执行)
#初始化,否则这不会被认为是一个git项目
git init
#设置项目远程版本库地址
#例1(适用github)
git remote add origin master http://github.com/coolme200/hello
#例2(适用gitlab)
git remote add origin master git@gitlab.taobao.ali.com:varworld.git
#错误提示:
fatal: remote origin already exists.
#解决办法:
git remote rm origin
Step 4
#获取代码,这里不要太偷懒,直接git pull 会把master和branch的代码全部拿过来
git pull origin master
Step 5
#修改代码
#提交代码至本地版本库,即使代码已经加到版本库,但做过改动,依然需要add
git add test.js
git commit -m 'commit'
#或者 可以减少add这一步
git commit -a
Step 6
#提交至服务器,这里不要太偷懒,不然会往master和各个branch发送一次push
git push origin master
#错误提示:
error: failed to push som refs to ........
#解决办法,先pull 下来 再push 上去
git pull origin master
ex:1
git push origin
default to pushing only the current branch to <code> origin </code> use <code> git config remote.origin.push HEAD </code>.
| true
|
84b61870684586b20cf94ae563103d6d5aa52994
|
Shell
|
ccoachecu/csp2101
|
/workshops/ws4/ws4e.sh
|
UTF-8
| 198
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
# retrieve all lines that end in IP http://192.168.5.162/ and wtire them to a file called
# 162attempts.txt
cat sampledata.txt | grep 'http://192.168.5.162/$' > 162attempts.txt
exit 0
| true
|
b5406153f2b6f73732282a1f94c0c39f4bb9f337
|
Shell
|
ethersphere/bee-staging
|
/install.sh
|
UTF-8
| 238
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
set -euxo pipefail
if [[ -z $NAMESPACE ]]; then
echo "Environment variable NAMESPACE is required."
echo "Example: NAMESPACE=test ./install.sh"
exit 1
fi
echo "INSTALLING BEE NODES"
helmsman -apply -f bee.yaml
| true
|
a1eedb2f74ead8919e15cdc5f8c30d9d396185d7
|
Shell
|
redhat-cip/edeploy-roles
|
/puppet-master.install
|
UTF-8
| 6,904
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) 2013-2014 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Puppetmaster node (passenger ready)
#
src="$1"
dir="$2"
version="$3"
ROLE=puppet-master
ORIG=$(cd $(dirname $0); pwd)
. ${ORIG}/functions
rh_install_passenger () {
# Install all dependencies to run Puppet in passenger mode
install_packages_disabled $dir puppet-server git augeas ntp httpd puppetdb-terminus python-pip mod_wsgi apr-util-devel apr-devel httpd-devel zlib-devel openssl-devel libcurl-devel gcc-c++ gcc mod_ssl ruby-devel
# passenger is not packaged in el7, let's install & configure it
do_chroot ${dir} gem install rack passenger
do_chroot ${dir} passenger-install-apache2-module -a
mkdir -p ${dir}/usr/share/puppet/rack/puppetmasterd
mkdir -p ${dir}/usr/share/puppet/rack/puppetmasterd/public ${dir}/usr/share/puppet/rack/puppetmasterd/tmp
cp ${dir}/usr/share/puppet/ext/rack/config.ru ${dir}/usr/share/puppet/rack/puppetmasterd/
do_chroot ${dir} chown puppet:puppet /usr/share/puppet/rack/puppetmasterd/config.ru
# Bug https://tickets.puppetlabs.com/browse/PUP-1386
cat >> ${dir}/usr/share/puppet/rack/puppetmasterd/config.ru <<EOF
Encoding.default_external = Encoding::UTF_8
Encoding.default_internal = Encoding::UTF_8
EOF
passenger_version=$(do_chroot ${dir} gem list | grep passenger | awk '{print $2}' |sed 's/[)(]//g')
# do not enable by default as without cert it'll fail
cat > ${dir}/etc/httpd/conf.d/puppetmaster.conf.disabled <<EOF
LoadModule passenger_module /usr/local/share/gems/gems/passenger-${passenger_version}/buildout/apache2/mod_passenger.so
PassengerRoot /usr/local/share/gems/gems/passenger-${passenger_version}
PassengerRuby /usr/bin/ruby
PassengerHighPerformance on
PassengerMaxPoolSize 12
PassengerPoolIdleTime 1500
PassengerStatThrottleRate 120
Listen 8140
<VirtualHost *:8140>
SSLEngine on
SSLProtocol ALL -SSLv2
SSLCipherSuite ALL:!aNULL:!eNULL:!DES:!3DES:!IDEA:!SEED:!DSS:!PSK:!RC4:!MD5:+HIGH:+MEDIUM:!LOW:!SSLv2:!EXP
SSLHonorCipherOrder on
SSLCertificateFile /var/lib/puppet/ssl/certs/mycert.pem
SSLCertificateKeyFile /var/lib/puppet/ssl/private_keys/mycert.pem
SSLCertificateChainFile /var/lib/puppet/ssl/certs/ca.pem
SSLCACertificateFile /var/lib/puppet/ssl/certs/ca.pem
SSLVerifyClient optional
SSLVerifyDepth 1
SSLOptions +StdEnvVars +ExportCertData
RequestHeader unset X-Forwarded-For
RequestHeader set X-SSL-Subject %{SSL_CLIENT_S_DN}e
RequestHeader set X-Client-DN %{SSL_CLIENT_S_DN}e
RequestHeader set X-Client-Verify %{SSL_CLIENT_VERIFY}e
DocumentRoot /usr/share/puppet/rack/puppetmasterd/public/
RackBaseURI /
<Directory /usr/share/puppet/rack/puppetmasterd/>
Options None
AllowOverride None
Order allow,deny
allow from all
</Directory>
</VirtualHost>
EOF
}
install_puppet () {
case "$OS" in
"Debian")
local repository=$(add_main_repository $DIST)
cat > ${dir}/etc/apt/sources.list.d/$RELEASE-backport.list <<EOF
deb $repository ${RELEASE}-backports main
EOF
;;
"Ubuntu")
;;
esac
update_repositories $dir
case "$OS" in
"Debian" | "Ubuntu")
do_chroot ${dir} wget --no-verbose http://apt.puppetlabs.com/puppetlabs-release-$RELEASE.deb
do_chroot ${dir} dpkg -i puppetlabs-release-$RELEASE.deb
do_chroot ${dir} rm puppetlabs-release-$RELEASE.deb
do_chroot ${dir} apt-get update
PACKAGES="puppetmaster puppetmaster-passenger puppet augeas-tools git ntp puppetdb-terminus"
install_packages_disabled $dir $PACKAGES
do_chroot ${dir} a2dissite puppetmaster
;;
"RedHatEnterpriseServer")
add_puppet_repository $DIST
add_epel_repository $DIST
attach_pool_rh_cdn $dir $RHN_CDN_POOL_ID
add_rh_cdn_repo $dir rhel-7-server-optional-rpms
rh_install_passenger
remove_epel_repository $DIST
remove_puppet_repository $DIST
;;
"CentOS")
add_puppet_repository $DIST
add_epel_repository $DIST
rh_install_passenger
remove_epel_repository $DIST
remove_puppet_repository $DIST
;;
*)
fatal_error "OS ($OS) or Release $(RELEASE) not supported"
;;
esac
do_chroot ${dir} rm -rf /var/lib/puppet/ssl/* || :
}
install_puppet
# Puppet Board
mkdir -p $dir/var/www/puppetboard
cat > $dir/var/www/puppetboard/wsgi.py <<EOF
from __future__ import absolute_import
import os
# Needed if a settings.py file exists
os.environ['PUPPETBOARD_SETTINGS'] = '/var/www/puppetboard/settings.py'
from puppetboard.app import app as application
EOF
case "$OS" in
"Debian" | "Ubuntu")
install_packages $dir python-pip libapache2-mod-wsgi
do_chroot $dir pip install puppetboard
cat > $dir/etc/apache2/sites-available/puppetboard <<EOF
NameVirtualHost *:82
Listen 82
<VirtualHost *:82>
WSGIDaemonProcess puppetboard user=www-data group=www-data threads=5
WSGIScriptAlias / /var/www/puppetboard/wsgi.py
ErrorLog /var/log/apache2/puppetboard.error.log
CustomLog /var/log/apache2/puppetboard.access.log combined
Alias /static /usr/local/lib/python2.7/dist-packages/puppetboard/static
<Directory /usr/local/lib/python2.7/dist-packages/puppetboard>
WSGIProcessGroup puppetboard
WSGIApplicationGroup %{GLOBAL}
Order deny,allow
Allow from all
</Directory>
</VirtualHost>
EOF
;;
"CentOS"|"RedHatEnterpriseServer")
# TODO FL [Sat Jun 7 17:13:39 2014]
# need to find a way to install mod-wsgi and pip
cat > $dir/etc/httpd/conf.d/puppetboard <<EOF
NameVirtualHost *:82
Listen 82
<VirtualHost *:82>
WSGIDaemonProcess puppetboard user=apache group=apache threads=5
WSGIScriptAlias / /var/www/puppetboard/wsgi.py
ErrorLog /var/log/httpd/puppetboard.error.log
CustomLog /var/log/httpd/puppetboard.access.log combined
Alias /static /usr/lib/python2.6/site-packages/puppetboard/static
<Directory /usr/lib/python2.6/site-packages/puppetboard>
WSGIProcessGroup puppetboard
WSGIApplicationGroup %{GLOBAL}
Require all granted
</Directory>
</VirtualHost>
EOF
;;
esac
| true
|
009c0f213579ffc641cb59e5237fb94e5480d9c6
|
Shell
|
soma115/wikikracja
|
/scripts/post_install.sh
|
UTF-8
| 2,435
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DOMAIN=`grep ALLOWED_HOSTS zzz/settings_custom.py | cut -d "'" -f 2`
HOME=/home/user/${DOMAIN}/
# DEPLOY DB
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations article;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations chat;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations customize;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations elibrary;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations glosowania;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations obywatele;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemigrations;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py migrate;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemessages -l 'en' --ignore=.git/* --ignore=static/* --ignore=.mypy_cache/*;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py makemessages -l 'pl' --ignore=.git/* --ignore=static/* --ignore=.mypy_cache/*;
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py compilemessages --ignore=.git/* --ignore=static/* --ignore=.mypy_cache/*;
# FIXTURES
# Carefull - it will duplicate existing records if run second time - especialy if you'll use: "pk": null,
# TODO: grep settings.py to find out which language should be deployed
# Dump: ./manage.py dumpdata customize.customize > customize.json # customize.customize is a table
# ./manage.py loaddata article/fixtures/articles.json
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py loaddata ${HOME}/${DOMAIN}/customize/fixtures/customize.json
# SITE NAME
# Change example.com to actuall domain name
sqlite3 db.sqlite3 "update django_site set name='${DOMAIN}', domain='${DOMAIN}'"
# CERTBOT
certbot --nginx --quiet --agree-tos --domains ${DOMAIN}
# VOTINGS
# Fixtures z początkowymi głosowaniami do wiki
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py loaddata ${HOME}/${DOMAIN}/glosowania/fixtures/votings.json
# COLLECT STATIC
cd ${HOME}/${DOMAIN}; ${HOME}/venv/bin/python ${HOME}/${DOMAIN}/manage.py collectstatic --no-input -v 0 --no-post-process -i *bootstrap.css
# RESTART INSTANCE
supervisorctl restart ${DOMAIN}:asgi0
| true
|
f78a7b9289e70bdf9a76852a6000740217143a67
|
Shell
|
tnici/hetzner-k8s-builder
|
/scripts/download-cluster-config.sh
|
UTF-8
| 2,949
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#-------------------------------------------------------------------------------
#
# Downloads the cluster settings from Rancher and place them into ~/.kube/config.
#
#-------------------------------------------------------------------------------
# Stop immediately if any of the deployments fail
set -e
# ------------------------------------------------------------
function checkResult() {
if [[ $? != 0 ]]
then
echo "${1} failed"
exit 1
fi
}
function echoSection() {
echo
echo "-----------------------------------------------------"
echo ${1}
echo "-----------------------------------------------------"
echo
}
# ------------------------------------------------------------
# Stop immediately if any of the operations fail
set -e
echo "Loading variables"
echoSection "Validating parameters"
if [[ ! ${RANCHER_API_TOKEN} ]]
then
echo "ERROR: RANCHER_API_TOKEN env var is not defined."
echo "Please, define it with the token generated in Rancher in the form of 'token-iu6fg:lhvhildfkgjdlfkgjdfdfágládfdfgpxp5vb'."
exit 1
fi
if [[ ! ${RANCHER_CLUSTER_ID} ]]
then
echo "ERROR: RANCHER_CLUSTER_ID env var is not defined."
echo "Please, define it with the cluster id of the Terraform output in the form of 'c-hmmwr'. Originally, it was printed by apply-cluster.sh."
exit 1
fi
if [[ ! ${RANCHER_HOST} ]]
then
echo "ERROR: RANCHER_HOST env var is not defined."
echo "Please, define it with the FQN of your Rancher install in the form of 'rancher.example.com'. (Host must be accessible via https)"
exit 1
fi
# ------------------------------------------------------------
# Backing up the current Kubectl config
if [[ -f ~/.kube/config ]]
then
echo "Backing up current kubectl config"
TS="$(date --iso-8601=seconds)"
mv ~/.kube/config ~/.kube/config_${TS}
fi
echoSection "Downloading the new Kubectl config file"
curl -u "${RANCHER_API_TOKEN}" \
-X POST \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
-d '{}' \
-o ~/.kube/config \
"https://${RANCHER_HOST}/v3/clusters/${RANCHER_CLUSTER_ID}?action=generateKubeconfig"
# The response in JSON is with encoded characters and extra unneeded content.
# - Needs to be decoded
# - only the text in the "config" attribute is needed
#
sed -i 's/{"baseType":"generateKubeConfigOutput","config":"//g' ~/.kube/config
sed -i 's/\\n/\n/g' ~/.kube/config
sed -i 's/\\"/"/g' ~/.kube/config
sed -i 's/","type":"generateKubeConfigOutput"}//g' ~/.kube/config
sed -e '1h;2,$H;$!d;g' -e 's/\\\\\n //g' ~/.kube/config > /tmp/kubeconfig
mv /tmp/kubeconfig ~/.kube/config
# ------------------------------------------------------------------------------
echoSection "Testing kubectl with a node query"
kubectl get nodes
echo "---------------------------------------------------"
echo
echo "SUCCESS. "
echo
echo "Check the above node list, it should contain the nodes of the new cluster."
echo
| true
|
35cf67a479e284755bc5b37c6bd7294f17e5d6ba
|
Shell
|
tectronics/zakboekje
|
/src/main/scripts/zakboekje.sh
|
UTF-8
| 147
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
rm pdf/*.pdf
for fn in svg/*.svg; do
pdf=`basename "$fn" ".svg"`
echo $pdf
inkscape --export-pdf=pdf/$pdf.pdf $fn
done
cd java
ant
| true
|
c809fa009f52327da2730453011f1320d9192284
|
Shell
|
davidjsanders/cowbull-k8s
|
/scripts/validate-configmap.sh
|
UTF-8
| 763
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
config_map=$(kubectl -n cowbull get configmaps --no-headers $1 2> /dev/null)
ret_stat=$?
if [ "$ret_stat" != "0" ]
then
if [ -f .local/$1.yaml ]
then
short_banner "Loading $1 configuration from local manifest"
kubectl apply -n cowbull -f .local/$1.yaml &> /dev/null
if [ "$?" != "0" ]
then
short_banner "Unable to apply configuration map $1 manifest!"
exit 1
fi
else
short_banner "Local manifest for $1 was not found."
short_banner "It needs to exist before running the loader as a configmap or a file: .local/$1.yaml"
short_banner "It must contain..."
cat examples/$1.example
exit $ret_stat
fi
else
short_banner "Found configmap: $1"
fi
| true
|
9ebac962da41de7a0426716a109f082054b18e62
|
Shell
|
bentsherman/pbs-toolkit
|
/modules/install-fastqc.sh
|
UTF-8
| 1,091
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
MODULEDIR="${HOME}/modules"
SOFTWAREDIR="${HOME}/software"
MODULE_NAME="fastqc"
MODULE_VERSION="0.11.7"
MODULE_PATH="${SOFTWAREDIR}/${MODULE_NAME}/${MODULE_VERSION}"
# make sure user is not on the login node
if [ ${HOSTNAME} = "login001" ]; then
echo "error: please use a compute node to install this module"
exit -1
fi
# download and extract fastQC
mkdir -p ${SOFTWAREDIR}/${MODULE_NAME}
wget -q https://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.7.zip
unzip -q fastqc_v0.11.7.zip
# HACK: fix fastqc permissions
chmod 755 FastQC/fastqc
rm -rf ${MODULE_PATH}
mv FastQC ${MODULE_PATH}
rm -rf fastqc_v0.11.7.zip
# create modulefile
mkdir -p ${MODULEDIR}/${MODULE_NAME}
cat > "${MODULEDIR}/${MODULE_NAME}/${MODULE_VERSION}" <<EOF
#%Module1.0
##
## ${MODULE_NAME}/${MODULE_VERSION} modulefile
##
module-whatis "Set up environment for ${MODULE_NAME}"
# for Tcl script use only
set version "3.2.6"
# Make sure no other hpc modulefiles are loaded before loading this module
eval set [ array get env MODULESHOME ]
prepend-path PATH ${MODULE_PATH}
EOF
| true
|
269ee9849f88ef9ebde0664f25201b4993516a7b
|
Shell
|
0x78f1935/BotZilla_LEGACY
|
/database/build-design.sh
|
UTF-8
| 292
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
ENV=`mktemp -d`
trap 'rm -rf $ENV' EXIT
virtualenv --python=python3 $ENV
{
. $ENV/bin/activate
python --version
pip install git+git://github.com/hendrikx-itc/pg-db-tools
compile-db-schema sql --if-not-exists design/schema-design.yml > src/00_base.sql
}
exit 0
| true
|
44fe355901892e372b7066b3512ab72e16e61081
|
Shell
|
janost/locutus
|
/locutus.sh
|
UTF-8
| 5,945
| 4.25
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -Eeuo pipefail
CONFIG_VARS=(REPO_DIR PW_FILE BACKUP_NAME BACKUP_LIST BORG_CREATE_OPTIONS BORG_PRUNE_OPTIONS PWGEN_CMD BORG_INIT_CMD BORG_PASSCOMMAND REPO_SYNC_COMMAND)
print_info() {
local GREEN='\033[0;32m'
local NC='\033[0m'
local MSG="$1"
if [ -t 1 ]; then
echo -e "${GREEN}[INFO]${NC} ${MSG}"
else
echo "[INFO] ${MSG}"
fi
}
print_err() {
local RED='\033[0;31m'
local NC='\033[0m'
local MSG="$1"
if [ -t 1 ]; then
echo -e "${RED}[ERROR]${NC} ${MSG}"
else
echo "[ERROR] ${MSG}"
fi
}
print_warn() {
local YELLOW='\033[0;33m'
local NC='\033[0m'
local MSG="$1"
if [ -t 1 ]; then
echo -e "${YELLOW}[WARN]${NC} ${MSG}"
else
echo "[ERROR] ${MSG}"
fi
}
check_dependencies() {
command -v borg > /dev/null 2>&1 || { print_err "You need borg to use this utility."; exit 1; }
command -v pwgen > /dev/null 2>&1 || { print_err "You need pwgen to use this utility."; exit 1; }
}
load_config() {
SCRIPT_DIR="$(dirname "$BASH_SOURCE")"
if [ -f "${SCRIPT_DIR}/.env" ]; then
set -o allexport
source .env
set +o allexport
else
print_err "You must configure this script via a .env file."
exit 1
fi
}
check_var() {
local VAR_NAME="$1"
if [ -z "${!VAR_NAME}" ]; then
print_err "Configuration error: ${VAR_NAME} needs to be set."
exit 1
fi
}
check_config() {
set +u
for i in "${CONFIG_VARS[@]}"; do
check_var "$i"
done
set -u
}
generate_password() {
print_info "Password file doesn't exists at ${PW_FILE}, generating..."
PW_DIR="$(dirname "${PW_FILE}")"
mkdir -p "${PW_DIR}"
chmod 0700 "${PW_DIR}"
if ${PWGEN_CMD} > "${PW_FILE}"; then
print_info "Password successfully generated to ${PW_FILE}."
else
print_err "Failed to generate password. Aborting."
exit 1
fi
chmod 0400 "${PW_FILE}"
print_warn "Password has been generated to ${PW_FILE}. You might want to save it in a password manager."
}
init_repo() {
print_info "Borg repo doesn't exists at ${REPO_DIR}, creating..."
mkdir -p "${REPO_DIR}"
chmod 0700 "${REPO_DIR}"
if ${BORG_INIT_CMD}; then
print_info "Borg repo has been created in ${REPO_DIR} using keyfile encryption."
print_warn "You might want to backup the repo key in a password manager: \`borg key export ${REPO_DIR} <file>\`."
else
print_err "Failed to create borg repository in ${REPO_DIR}. Aborting."
exit 1
fi
}
borg_backup() {
local BACKUP_NAME="$1"
print_info "Creating backup ${BACKUP_NAME} with borg..."
if borg create ${BORG_CREATE_OPTIONS} "${REPO_DIR}::${BACKUP_NAME}" ${BACKUP_LIST}; then
print_info "Backup has been created successfully."
else
print_err "Failed to create backup with borg. Aborting."
exit 1
fi
}
sync_repo() {
print_info "Syncing borg repo to remote storage..."
if ${REPO_SYNC_COMMAND}; then
print_info "Backup repository has been synced to remote storage successfully."
else
print_err "Failed to sync backup repository to remote storage. Aborting."
exit 1
fi
}
borg_list() {
if [ "$#" -gt 0 ]; then
print_info "Listing contents of backup \"$1\"..."
borg list "${REPO_DIR}"::"$1"
else
print_info "Listing backups..."
borg list "${REPO_DIR}"
fi
}
borg_delete() {
local BACKUP_NAME=$1
print_info "Deleting backup ${BACKUP_NAME} from repo ${REPO_DIR}"
borg delete --stats "${REPO_DIR}::${BACKUP_NAME}"
}
borg_info() {
if [ "$#" -gt 0 ]; then
borg info "${REPO_DIR}"::"$1"
else
borg info "${REPO_DIR}"
fi
}
borg_prune() {
print_info "Pruning backups..."
if borg prune ${BORG_PRUNE_OPTIONS} "${REPO_DIR}"; then
print_info "Backup repository has been pruned."
else
print_err "Failed to prune backups. Aborting."
exit 1
fi
}
borg_check() {
print_info "Checking backup repository for corruption..."
if borg check --verify-data "${REPO_DIR}"; then
print_info "Backup repository has been checked, no errors found."
else
print_err "Backup repository seems to be corrupted."
exit 1
fi
}
borg_mount() {
local BORG_MOUNT_POINT="$1"
print_info "Mounting backup in foreground... Press CTRL+C to unmount the repository."
borg mount --foreground "${REPO_DIR}" "${BORG_MOUNT_POINT}"
}
borg_export_tar() {
local BORG_ARCHIVE=$1
local TARGET_FILE=$2
print_info "Exporting tar archive from backup ${BORG_ARCHIVE} to file ${TARGET_FILE}..."
if borg export-tar --tar-filter=auto "${REPO_DIR}"::"${BORG_ARCHIVE}" "${TARGET_FILE}"; then
print_info "Tar file has been successfully exported."
else
print_err "Error during tar file export. Aborting."
exit 1
fi
}
if [ "$#" -lt 1 ]; then
print_info "Please specify the action you want to perform."
print_info "Valid actions: create, list, list <BACKUPNAME>, delete <BACKUPNAME>, info, info <BACKUPNAME>, check, prune, sync, mount <MOUNTPOINT>, export-tar <BACKUPNAME> <FILENAME>."
exit 0
fi
check_dependencies
load_config
check_config
if [ ! -f "${PW_FILE}" ]; then
generate_password
fi
if [ ! -d "${REPO_DIR}" ]; then
init_repo
fi
case "$1" in
create)
borg_backup "${BACKUP_NAME}"
borg_prune
sync_repo
;;
list)
if [ "$#" -gt 1 ]; then
borg_list "$2"
else
borg_list
fi
;;
delete)
if [ ! "$#" -eq 2 ]; then
print_err "Please provide a backup to delete."
exit 1
fi
borg_delete "$2"
;;
prune)
borg_prune
;;
sync)
sync_repo
;;
info)
if [ "$#" -gt 1 ]; then
borg_info "$2"
else
borg_info
fi
;;
check)
borg_check
;;
mount)
if [ ! "$#" -eq 2 ]; then
print_err "Please provide a mountpoint. Example: locutus.sh mount /mount/point"
exit 1
fi
borg_mount "$2"
;;
export-tar)
if [ ! "$#" -eq 3 ]; then
print_err "Please provide a backup archive name and a target file. Example: locutus.sh 20190223-121420 /target/file/name.tar.xz"
exit 1
fi
borg_export_tar "$2" "$3"
;;
*)
print_err "Unknown action: $1"
;;
esac
| true
|
678cef277722e46432461c1c4df15d879bce14d8
|
Shell
|
eushaun/nine-board-tic-tac-toe
|
/testing.sh
|
UTF-8
| 355
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# Play agent against specified program 100 times
# Example:
# ./playpy.sh lookt 12345
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <player> <port>" >&2
exit 1
fi
# p is the port number
p=11300
i=0
while ((i < 100))
do
./servt -p $p & sleep 0.1
./agent.py -p $p & sleep 0.1
./$1 -p $p
((p++))
((i++))
done
| true
|
3fcf720610fe66d6fd490802808f96cbd8673fc0
|
Shell
|
OProtalk/Protalk
|
/Script-sh/ble_host.sh
|
UTF-8
| 2,500
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# ble host setup
# written by nanite 2017-03-04
# ref: https://developer.nordicsemi.com/nRF5_IoT_SDK/doc/0.9.0/html/a00092.html
NC='\033[0m' # no color
GREEN='\033[0;32m'
CYAN='\033[0;36m'
YELLOW='\033[1;33m'
echo -e ${CYAN}-------------- `basename "$0"` -------------${NC}
# Mount the debugfs file system to /sys/kernel/debug. You can ls to check the contents of the folder.
#sudo mount -t debugfs none /sys/kernel/debug
#ls /sys/kernel/debug
# Load 6LoWPAN module. You might want to lsmod to verify that the module is indeed loaded.
sudo modprobe bluetooth_6lowpan
#lsmod | grep bluetooth_6lowpan # Enable the bluetooth 6lowpan module. The PSM value should be set to 0x23(35) to enable the module, if using Linux kernel version less than 4.0.
#echo 35 | sudo tee /sys/kernel/debug/bluetooth/6lowpan_psm
echo 1 | sudo tee /sys/kernel/debug/bluetooth/6lowpan_enable > /dev/null
# HCI commands such as hciconfig and hcitool are used to configure Bluetooth devices.
# The device name hciX is assigned to the (Bluetooth) device installed in the system.
echo
echo -e "${YELLOW}Instructions${NC}"
echo
echo -e "To discover all advertising devices, scan using following hcitool command."
echo -e "${YELLOW}>>> ${GREEN}sudo hcitool lescan${NC}"
echo -e "Then connect to the device; an IPv6 router sending out RA messages."
echo -e "${YELLOW}>>> ${GREEN}echo \"connect ${CYAN}(Bluetooth Device Address; MAC address)${GREEN} 1\" | sudo tee /sys/kernel/debug/bluetooth/6lowpan_control${NC}"
echo -e "List all connected BLE devices after that, to see the connection to the router has successfully been made."
echo -e "${YELLOW}>>> ${GREEN}sudo hcitool con${NC}"
echo
echo -e "Check if you have established a connection. You should be able to find out your IPv6 address on bt0 interface."
echo -e "${YELLOW}>>> ${GREEN}ifconfig${NC}"
echo -e "Try to ping the device."
echo -e "${YELLOW}>>> ${GREEN}ping6 -I bt0 ${CYAN}(IPv6 address of the device)${GREEN} -c 5${NC}"
echo -e "Or you can do the multicast."
echo -e "${YELLOW}>>> ${GREEN}ping6 -I bt0 ff02::1${NC}"
echo
echo -e "To disconnect from the device."
echo -e "${YELLOW}>>> ${GREEN}echo \"disconnect ${CYAN}(Bluetooth Device Address; MAC address)${GREEN}\" | sudo tee /sys/kernel/debug/bluetooth/6lowpan_control${NC}"
echo -e "Check if there are active connections left."
echo -e "${YELLOW}>>> ${GREEN}ifconfig${NC}"
echo
echo "There you go. Good luck."
echo
echo -e ${CYAN}---------- end of `basename "$0"` ----------${NC}
| true
|
e125c05e8f4f36c6de8f7d0579a5a42df3f88db0
|
Shell
|
isliulin/disp
|
/shell/killsysxl.sh
|
UTF-8
| 484
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "`date '+%Y/%m/%d %k:%M:%S'` system killprocess BEGIN..."
pidof_bin="/usr/sbin/pidof"
[ ! -f $pidof_bin ] && echo "$pidof_bin is not exist" && exit 1;
pids=`$pidof_bin -x sysxlj`
if [ -z "$pids" ]; then
echo "`date '+%Y/%m/%d %k:%M:%S'` system is not start..,exit";
exit 0;
fi
for i in $pids
do
kill -9 $i
echo "`date '+%Y/%m/%d %k:%M:%S'` send SIGTERM to $i and it is stopped.."
done
sleep 1;
echo "`date '+%Y/%m/%d %k:%M:%S'` system kill OK..."
exit 0
| true
|
91d1b01664ca3bae16aa3ce4ac1799f9d2071ab4
|
Shell
|
satago/aws-candy-tools
|
/src/main/bundle/bin/fleet
|
UTF-8
| 2,248
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P )
FLEET_DIR=${SCRIPT_PATH}/../fleet
CMD=$1
GROUP=$2
case "$CMD" in
groups)
if [[ -z ${GROUP} ]]; then
$0 groups -all
elif [[ "${GROUP}" == "all" ]]; then
$0 groups -all
elif [[ ${GROUP} =~ (.*)-all ]]; then
GROUP=${BASH_REMATCH[1]}
for name in $(find ${FLEET_DIR} -type f -name "${GROUP}*"); do
$0 groups $(basename ${name})
done
elif [[ -f ${FLEET_DIR}/${GROUP} ]]; then
echo ${GROUP}
fi
;;
stacks)
STACKS=()
for name in $($0 groups ${GROUP}); do
FILE=${FLEET_DIR}/${name}
if [[ -s ${FILE} ]]; then
for stack in $(cat ${FILE}); do
if [[ ! " ${STACKS[@]} " =~ " ${stack} " ]]; then
STACKS+=(${stack})
fi
done
else
# File is empty, its name is the same as name of a stack
stack=$(basename ${FILE})
if [[ ! " ${STACKS[@]} " =~ " ${stack} " ]]; then
STACKS+=(${stack})
fi
fi
done
for stack in "${STACKS[@]}"; do
echo ${stack}
done
;;
deploy)
if [[ "${GROUP}" =~ (.*)all ]]; then
echo Multigroup deployments not supported as order of stacks cannot be strictly defined
echo Define separate group for deployment if needed
exit 1
fi
for stack in $($0 stacks ${GROUP}); do
${SCRIPT_PATH}/deploy push ${stack}
done
;;
run)
for stack in $($0 stacks ${GROUP}); do
${SCRIPT_PATH}/stack run ${stack} "${@:3}"
done
;;
*)
echo $"Usage: $0 <command> <args>
$0 deploy <group-name>
$0 groups [[<filter>]-all]
$0 run [[<filter>]-all] <command>
$0 stacks [[<filter>]-all]"
exit 1
;;
esac
| true
|
e8261d7ef6742caace5ad8ec05e374ba735e9c5d
|
Shell
|
marssystems/bitcoin-tutorials
|
/electrs/electrum_wallet.sh
|
UTF-8
| 1,890
| 3.375
| 3
|
[] |
no_license
|
# Download and run this script to the Linux desktop:
# $ wget https://raw.githubusercontent.com/openoms/bitcoin-tutorials/master/electrs/4_electrum_install && bash 4_electrum_install.sh
# https://electrum.org/#download
# Install dependencies:
sudo apt-get install -y python3-pyqt5
# Download package:
wget https://download.electrum.org/3.3.8/Electrum-3.3.8.tar.gz
#Verify signature:
wget https://raw.githubusercontent.com/spesmilo/electrum/master/pubkeys/ThomasV.asc
gpg --import ThomasV.asc
wget https://download.electrum.org/3.3.8/Electrum-3.3.8.tar.gz.asc
verifyResult=$(gpg --verify Electrum-3.3.8.tar.gz.asc 2>&1)
goodSignature=$(echo ${verifyResult} | grep 'Good signature' -c)
echo "goodSignature(${goodSignature})"
if [ ${goodSignature} -lt 1 ]; then
echo ""
echo "!!! BUILD FAILED --> PGP Verify not OK / signature(${goodSignature})"
exit 1
fi
# Run without installing: tar -xvf Electrum-3.3.8.tar.gz
# python3 Electrum-3.3.8/run_electrum
# Install with PIP:
sudo apt-get install -y python3-setuptools python3-pip
python3 -m pip install --user Electrum-3.3.8.tar.gz[fast]
# add install dir to PATH (and make persist)
PATH=$PATH:~/.local/bin
touch ~/.profile
export PATH
~/.profile
echo "Type the LAN IP ADDRESS of your RaspiBlitz followed by [ENTER]:"
read RASPIBLITZ_IP
# Make Electrum config persist (editing ~/.electrum/config)
# sudo nano ~/.electrum/config
# "rpcuser": "raspibolt",
# "server": "192.168.1.239:50001:t",
electrum setconfig oneserver true
electrum setconfig server $RASPIBLITZ_IP:50001:t
electrum --oneserver --server $RASPIBLITZ_IP:50001:t
echo "To start again: run \`electrum\` in the terminal."
echo "To connect through SSL:"
echo "Run: \`electrum --oneserver --server $YOUR_DOMAIN:50002:s\`"
echo "edit ~/.electrum/config: \"server\": \"<your_domain_or_dynDNS>:50002:s\""
electrum --oneserver --server $RASPIBLITZ_IP:50001:t
| true
|
2f0af4472f3158de6c70c3bdf5fd721525fdd97c
|
Shell
|
NeilBetham/dotfiles
|
/bootstrap
|
UTF-8
| 1,489
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euf -o pipefail
# Check which platform we are on
echo -n "Detected OS: "
if [ "$(uname)" == "Darwin" ]; then
echo "MacOS"
export MACOS=1
elif [ "$(uname)" == "Linux" ]; then
echo "Linux"
export LINUX=1
fi
# Check if the shell is interactive
if [ -t 0 ]; then
echo "Running with Shell"
export INTERACTIVE=1
fi
# Find the current directory location of the dotfiles repo so that we can link to it later
export CWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export UTILS="${CWD}/utils"
MODULE_REGEX="^[\./]+$|\.git"
function sym_link {
ln -sf $1 $2
}
function get_modules {
find $1 ! -path ${CWD} -type d | egrep -v "${MODULE_REGEX}"
}
function link_files {
FILES_TO_LINK=$(find $1 -regex '.*\.link$')
for FILE in ${FILES_TO_LINK}
do
DEST="${HOME}/.$(basename ${FILE} .link)"
echo "Linking ${FILE} to ${DEST}"
sym_link ${FILE} ${DEST}
done
}
function link_modules {
for MODULE in $(get_modules $1)
do
link_files "${MODULE}"
done
}
function bootstrap_module {
BOOTSTRAPS=$(find $1 -regex '.*\.run$' | sort -V)
for BOOTSTRAP in ${BOOTSTRAPS}
do
echo -n "Running bootstrap $BOOTSTRAP... "
if $BOOTSTRAP; then
echo "completed"
else
echo "failed"
fi
done
}
function bootstrap_modules {
for MODULE in $(get_modules $1)
do
bootstrap_module "${MODULE}"
done
}
link_modules "$CWD"
bootstrap_modules "$CWD"
# Add flag for dealing with git signatures
| true
|
ba378fde6241c26c3ca4cce891ece2ec3206166d
|
Shell
|
msgpo/wok
|
/get-algobox/stuff/get-algobox
|
UTF-8
| 4,179
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# get-package - create and install SliTaz package algobox
#
# (C) 2020 SliTaz - GNU General Public License v3.
# Author : unknown
# modified by HGT on 2020-02-10
#
# === Initialisations ===
PKGS_DB="/var/lib/tazpkg" # packages database directory
PACKAGE="algobox"
WEB_SITE="https://www.xm1math.net/algobox/"
CATEGORY="misc"
SHORT_DESC="Algorithm creation tool for education."
DEPENDS="libQtWebkit libQtXml libQtGui libQtCore gcc-lib-base"
URL="${WEB_SITE}download.html"
# Declare functions check_root, status, ...
. /lib/libtaz.sh
# and make commandline options (if any) available as variables
is_installed()
{
if [ -d $ROOT$PKGS_DB/installed/$PACKAGE ]
then #package is deemed to be installed
return 0
else
return 1
fi
}
# Show commandline options, if requested by --help
if [ "$help" == "yes" ]
then
echo "Commandline options:
$0
--version=<version>
--root=<path-to-root>
--install=yes|no
--keep=no|yes
--tmpdir=<directory-to-build-package>"
exit
fi
# Check for system administrator privileges
check_root
title "Package $PACKAGE will be build as SliTaz package and installed"
# Fetch latest version, unless version is set by option --version
[ -z "$version" ] && version="latest"
# Install SliTaz package, unless inhibited by option --install=no
[ -z "$install" ] && install="yes"
# Delete SliTaz package file $PACKAGE-$VERSION.tazpkg after installation,
# unless option --keep=yes is given
[ -z "$keep" ] && keep="no"
# Directory for temporary files
TMP_DIR="$tmpdir"
[ -z "$tmpdir" ] && TMP_DIR="/tmp/get-$PACKAGE"
# Logging file (unused by now)
LOG=$TMP_DIR/get-$PACKAGE.log
cat <<EOT
Options in use:
root : $root/
version : $version
install package: $install
keep tazpkg : $keep
build directory: $TMP_DIR
EOT
separator; newline
# === Remove package, if installed ===
if is_installed
then
action "Removing installed version..."
tazpkg remove $PACKAGE --root="$root/"
[ ! is_installed ] &&
die "Can't remove installed version. Exiting."
fi
# === Fetch archive file, if not existing ===
if [ "$version" == "latest" ]
then
FILE=$(wget -q -O - $URL | sed '/debian_10_amd64.deb/!d; s|.*href=.\([^"]*\).*|\1|')
WGET_URL="${WEB_SITE}$FILE"
FILE=$(basename $FILE)
else
die "Can fetch latest version only. Exiting."
fi
CUR_DIR=$(pwd)
mkdir -p $TMP_DIR
cd $TMP_DIR
if [ -f $FILE ]
then
echo "Using existing archive file $FILE"
else
action "Fetching the archive"
newline
wget --no-check-certificate $WGET_URL
if [ ! -f $FILE ]
then
cd $CUR_DIR
rm -rf $TMP_DIR
echo "Could not transfer $FILE from $URL. Exiting."
exit 1
fi
fi
# === Extract files from archive ===
action "Extracting the archive"
mkdir $PACKAGE
# Extract metadata from Debian package
dpkg-deb -e $FILE $PACKAGE/meta
# Extract files from Debian package
dpkg-deb -x $FILE $PACKAGE/fs
status
# Remove archive file
rm -f $FILE
# === Create SliTaz package ===
# Prepare metadata for SliTaz package
if ! grep -q "insert long description" $PACKAGE/meta/control
then
sed '/^Description:/,$!d;s/^Description://' \
< $PACKAGE/meta/control > $PACKAGE/description.txt
fi
SHORT_DESC="$(sed '/^Description:/!d; s/.*: //' $PACKAGE/meta/control)"
MAINTAINER="$(sed '/^Maintainer:/!d; s/.*: //' $PACKAGE/meta/control)"
VERSION="$( sed '/^Version:/!d; s/.*: //' $PACKAGE/meta/control)"
# rename build directory
mv $PACKAGE $PACKAGE-$VERSION
cd $PACKAGE-$VERSION
# Create recipe for SliTaz package
cat > receipt <<EOT
# SliTaz package receipt.
PACKED_SIZE=""
UNPACKED_SIZE=""
PACKAGE="$PACKAGE"
VERSION="$VERSION"
CATEGORY="$CATEGORY"
TAGS="$TAGS"
SHORT_DESC="$SHORT_DESC"
MAINTAINER="$MAINTAINER"
LICENSE="non-free"
WEB_SITE="$WEB_SITE"
DEPENDS="$DEPENDS"
EOT
action "Creating the package $PACKAGE..."
# Pack
cd ..
tazpkg pack $PACKAGE-$VERSION
# Remove package tree
rm -rf $PACKAGE-$VERSION
# === Install the SliTaz package ===
[ "$install" == "yes" ] &&
tazpkg install $PACKAGE-$VERSION.tazpkg --root="$root"
# === Cleanup ===
# Preserve package file, if requested
[ "$keep" == "yes" ] && mv $PACKAGE-$VERSION.tazpkg $CUR_DIR
# Remove temporary build directory
cd $CUR_DIR
rm -rf $TMP_DIR
| true
|
1a5379ec38a674e9ee85f7292b031728f5ac772c
|
Shell
|
ArqueologiaDigital/Software_de_um_Minicomputador__Sistema_Basico_de_Controle__BenicioJSouza_1976
|
/PDF/make_pdf.sh
|
UTF-8
| 349
| 2.78125
| 3
|
[] |
no_license
|
echo "Generating lowres pages:"
for f in ../originais/*.jpg
do
echo "Converting $f ..."
convert $f -rotate 90 -resize 800 $f.lowres.jpg
done
echo "And now generating the PDF..."
convert ../originais/*.lowres.jpg Software_de_um_Minicomputador__Sistema_Basico_de_Controle__BenicioJSouza_1976.pdf
echo "done."
rm ../originais/*.lowres.jpg -f
| true
|
00b7634fdaead8588d2d264a79e3a54e896bbc48
|
Shell
|
nagya11/resflash
|
/host/upgrade.sh
|
UTF-8
| 4,009
| 3.84375
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Upgrade the inactive root partition with a new filesystem and activate it
# Copyright Brian Conway <bconway@rcesoftware.com>, see LICENSE for details
set -o errexit
set -o nounset
if set -o|fgrep -q pipefail; then
set -o pipefail
fi
BINDIR=$(dirname ${0})
. ${BINDIR}/resflash.sub
MACHINE=$(machine)
set_attr_by_machine ${MACHINE}
# Parse out disks and partitions
duid=$(fgrep ' / ' /etc/fstab|awk -safe '{ print $1 }'|awk -F . -safe \
'{ print $1 }')
currpart=$(fgrep ' / ' /etc/fstab|awk -safe '{ print $1 }'|awk -F . -safe \
'{ print $2 }')
if [ ${currpart} == 'd' ]; then
newpart=e
else
newpart=d
fi
diskpart=$(mount|fgrep ' on / '|awk -safe '{ print $1 }'|awk -F / -safe \
'{ print $3 }')
currdisk=${diskpart%?}
# Leave one set of logs for debugging
rm -rf /tmp/resflash.??????
MNTPATH=$(mktemp -t -d resflash.XXXXXX)
# Write filesystem to the inactive partition
echo 'Writing filesystem to inactive partition...'
(tee /dev/fd/3|dd of=/dev/r${currdisk}${newpart} ibs=8k obs=1m >> \
${MNTPATH}/00.upgrade.dd 2>&1;) 3>&1|cksum -a ${ALG} -q|cut -c 1-80
# Verify the newly written partition
echo 'Checking filesystem...'
if ! fsck -fp /dev/${currdisk}${newpart}; then
echo 'Filesystem failure, was the filesystem correctly sized?'
exit 1
fi
# Update fstab for the current duid and new partition
mkdir -p ${MNTPATH}/fs
mount -o noatime /dev/${currdisk}${newpart} ${MNTPATH}/fs
mount -s /mbr
trap "sync; umount ${MNTPATH}/fs; umount /mbr; exit 1" ERR INT
echo 'Updating fstab...'
fsduid=$(fgrep ' / ' ${MNTPATH}/fs/etc/fstab|awk -safe '{ print $1 }'|awk -F . \
-safe '{ print $1 }')
sed -e "s/${fsduid}/${duid}/" \
-e "/^${duid}.d/s/${duid}.d/${duid}.${newpart}/" \
${MNTPATH}/fs/etc/fstab >> ${MNTPATH}/fstab.new
cp ${MNTPATH}/fstab.new ${MNTPATH}/fs/etc/fstab
# Update MBR, biosboot(8), boot(8) on amd64/i386, kernels on octeon/macppc, and
# boot.conf most places
if [ -f ${MNTPATH}/fs/usr/mdec/mbr ]; then
echo 'Updating MBR...'
fdisk -uy -f ${MNTPATH}/fs/usr/mdec/mbr ${currdisk} >> \
${MNTPATH}/01.upgrade.fdisk 2>&1
fi
if [ ${MACHINE} == 'amd64' -o ${MACHINE} == 'i386' ]; then
echo 'Updating biosboot(8) and boot(8)...'
installboot -r /mbr ${duid} ${MNTPATH}/fs/usr/mdec/biosboot \
${MNTPATH}/fs/usr/mdec/boot >> ${MNTPATH}/02.upgrade.installboot 2>&1
elif [ ${MACHINE} == 'octeon' ]; then
mount -s /${DOSMNT}
echo "Updating /${DOSMNT} kernels..."
cp ${MNTPATH}/fs/bsd.${newpart} /${DOSMNT}
cp ${MNTPATH}/fs/bsd.${newpart} /${DOSMNT}/bsd
rm -f ${MNTPATH}/fs/bsd
ln ${MNTPATH}/fs/bsd.${newpart} ${MNTPATH}/fs/bsd
if [ -f ${MNTPATH}/fs/bsd.rd ]; then
cp ${MNTPATH}/fs/bsd.rd /${DOSMNT}
fi
sync
umount /${DOSMNT}
elif [ ${MACHINE} == 'macppc' ]; then
echo 'Updating /mbr kernels and regenerating random.seed...'
cp -p ${MNTPATH}/fs/bsd.${newpart} /mbr
rm -f ${MNTPATH}/fs/bsd
ln ${MNTPATH}/fs/bsd.${newpart} ${MNTPATH}/fs/bsd
if [ -f ${MNTPATH}/fs/bsd.rd ]; then
cp -p ${MNTPATH}/fs/bsd.rd /mbr
fi
dd if=/dev/random of=/mbr/etc/random.seed bs=512 count=1 status=none
fi
# Update ${DOSBOOTBIN} bootloader, if applicable
if disklabel ${currdisk}|grep -q 'i:.*MSDOS' && fgrep -q /${DOSMNT} /etc/fstab \
&& [ -f ${MNTPATH}/fs/usr/mdec/${DOSBOOTBIN} ]; then
mount -s /${DOSMNT}
echo "Updating ${DOSBOOTBIN} bootloader..."
cp ${MNTPATH}/fs/usr/mdec/${DOSBOOTBIN} /${DOSMNT}/${DOSBOOTDIR}
sync
umount /${DOSMNT}
fi
sync
umount ${MNTPATH}/fs
# Set the new partition active
echo 'Everything looks good, setting the new partition active...'
if [ ${MACHINE} == 'amd64' -o ${MACHINE} == 'i386' ]; then
sed "/^set device hd0/s/hd0[a-p]/hd0${newpart}/" /mbr/etc/boot.conf >> \
${MNTPATH}/boot.conf.new
cp ${MNTPATH}/boot.conf.new /mbr/etc/boot.conf
elif [ ${MACHINE} == 'macppc' ]; then
sed "/^set image/s/bsd.[de]/bsd.${newpart}/" /mbr/etc/boot.conf >> \
${MNTPATH}/boot.conf.new
cp ${MNTPATH}/boot.conf.new /mbr/etc/boot.conf
fi
sync
umount /mbr
echo 'Upgrade complete!'
| true
|
2412d36e935bf71d5a4f61c2c6e3a133b2b6f987
|
Shell
|
kimleeju/JellyFishDB
|
/src_cpp/perf_result/perf_result_count/1_cas_failure.sh
|
UTF-8
| 1,932
| 2.828125
| 3
|
[] |
no_license
|
#srcfile="perf_1000000_uni.rslt"
skiplists="BlockedSpinSkipList BlockedCVSkipList ConcurrentSkipList JellyFishSkipList"
#skiplists="ConcurrentSkipList JellyFishSkipList"
threads="1 2 4 8 16"
ops="put get range_query"
ops="put get"
ops="get"
num="1000000"
workloads="uni zipf_1.2"
for op in $ops; do
for wk in $workloads; do
srcfile="perf_"$op"_"$wk"_"$num".rslt"
echo "$srcfile"
datafile="perf_"$op"_"$wk"_"$num".dat"
echo "thread $skiplists" > $datafile
for th in $threads; do
echo "$th" | tr "\n" " " >>$datafile
for sk in $skiplists; do
#line=$line`grep $sk $srcfile | grep "run" | awk -v sl="$sk" '$1==sl && $7=='"${th}"' {print $NF, s}'`
#grep $sk $srcfile | grep "run" | awk '$7=='"${th}"' && $1=='"${sk}"' {print $1}'
#line=$line grep $sk $srcfile | grep "comparator" | awk '{print $5}'
#cat "$srcfile" | awk '{if($5 == "th=$th")}'
if [ "$th" -eq 1 ]; then
head -n 24 "$srcfile" | grep "CAS failure" | grep "$sk" |awk '{print $6}' | tr "\n" " " >> $datafile
elif [ "$th" -eq 2 ]; then
head -n 48 "$srcfile" | tail -n 24 | grep "CAS failure" | grep "$sk" |awk '{print $6}' | tr "\n" " " >> $datafile
elif [ "$th" -eq 4 ]; then
head -n 72 "$srcfile" | tail -n 24 | grep "CAS failure" | grep "$sk" |awk '{print $6}' | tr "\n" " " >> $datafile
elif [ "$th" -eq 8 ]; then
head -n 96 "$srcfile" | tail -n 24 | grep "CAS failure" | grep "$sk" |awk '{print $6}' | tr "\n" " " >> $datafile
elif [ "$th" -eq 16 ]; then
head -n 120 "$srcfile" | tail -n 24 | grep "CAS failure" | grep "$sk" |awk '{print $6}' | tr "\n" " " >> $datafile
fi
#cat "$srcfile" | grep "comparator" | grep "$sk" | awk '{print $5}' | tr "\n" " " >> $datafile
#line+= cat "$srcfile" | grep "comparator" | awk '{print $1 $5}'
#echo $line >> $datafile
done
#echo $line >> $datafile
echo "" >> $datafile
done
cat $datafile
done
done
| true
|
8034690727b78d7e666690ce969cd58eeb6443db
|
Shell
|
i1i1/dotfiles
|
/bin/ddict
|
UTF-8
| 928
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
ask() { dmenu -i -l 6 -fn Go-Mono -p "$1" -it "$2"; }
export IFS=$'\n'
cachedir=~/.cache/dict/
mkdir -p $cachedir
for d in $STARDICT_DATA_DIR/dic/*.idx; do
c=$cachedir/$(basename "$d" .idx)
[ -f "$c" ] || generdictcache "$d" >$c
done
word="$(cat $cachedir/* | ask 'Translate:' $(xclip -out -selection clipboard))"
echo "$word" >> $cachedir/searched
sdcv -e -n "$word" | awk 'NR > 4' | awk '
function push(arr, ent) { arr[++arr[0]] = ent }
function pop(arr) { return arr[arr[0]--] }
BEGIN { names[0] = bodys[0] = 0 }
/^$/ { push(names, name); push(bodys, a); a = name = ""; next }
name == "" { name=$0; next }
a == "" { a=$0; next }
{ a=a"\n"$0; next }
END {
maxln = 400
while (names[0]) {
body = pop(bodys)
if (length(body) > maxln) body=substr(body, 1, maxln)"..."
system("notify-send -u critical \""pop(names)"\" \""body"\"")
}
}
'
| true
|
aeb63bcf5675f96758d343434afb2c9e0f19645c
|
Shell
|
ebi-gene-expression-group/scxa-control-workflow
|
/bin/deriveCellMetadata.sh
|
UTF-8
| 3,524
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/sh
expName=$1
isDroplet=$2
barcodesFile=$3
sampleMetadataFile=${4}
dropletMetadataFile=${5:-''}
if [ "$isDroplet" = 'False' ]; then
# For SMART experiments, grep out the metadata lines we need for each run.
# If matching metadata can't be found then there's something wrong
cellMetaFile=$metaFile
head -n 1 $sampleMetadataFile > cell_metadata.tsv.tmp && cat $barcodesFile | while read -r l; do
grep -P "^$l\t" $sampleMetadataFile
if [ $? -ne 0 ]; then
echo "Missing metadata for $l" 1>&2
exit 1
fi
done >> cell_metadata.tsv.tmp
else
# For droplet experiments life is complicated. Cell IDs are of the form
# SAMPLE-BARCODE, so we split the sample ID out and use that to to
# duplicate sample info per row. Where a .cells.tsv file is available, this
# can be used to add cell-wise metadata.
# 1. Duplicate library-wise metadata across component cells, looking for
# the sample identifier of each cell ID in the first column of the sample
# metadata
echo "Copying library-wise metadata across cells..."
head -n 1 $sampleMetadataFile > sample_metadata.tsv && cat $barcodesFile | awk -F'-' '{print $1}' | while read -r l; do
grep -P "^$l\t" $sampleMetadataFile
if [ $? -ne 0 ]; then
echo "Missing metadata for \$l" 1>&2
exit 1
fi
done >> sample_metadata.tsv
# 2. Derive a cell list that will be the first column of the final output,
# and paste in front of the sample metadata
echo "cell" > cells.tsv.tmp
cat $barcodesFile >> cells.tsv.tmp
if [ "$(cat cells.tsv | wc -l)" = "$(cat sample_metadata.tsv | wc -l)" ]; then
paste -d "\t" cells.tsv.tmp sample_metadata.tsv > cells_samples.tsv.tmp
rm -f cells.tsv.tmp
else
echo "Number of cells not equal to number of lines in cell-expanded sample metadata" 1>&2
exit 1
fi
# 3. Now take cell-wise metadata found in the *.cells.tsv file (where
# present), find the matching lines for each cell identifier, and add the
# resulting columns to the output
type=$(echo $expName | awk -F'-' '{print $2}')
cells_file_name="$SCXA_WORKFLOW_ROOT/metadata/$type/${expName}/${expName}.cells.txt"
if [ -e "$cells_file_name" ]; then
echo "Found cell metadata at $cells_file_name, adding to output metadata table..."
# Barcodes without entries in the annotation, print the right number of
# delimiters such that we get empty fields
emptystring=$(head -n 1 $cells_file_name | sed s/[^\\t]//g)
head -n 1 $cells_file_name > droplet_cell_metadata.tsv.tmp && cat $barcodesFile | while read -r l; do
grep -P "^$l\t" $cells_file_name
if [ $? -ne 0 ]; then
echo -e "$emtpyString"
fi
done >> droplet_cell_metadata.tsv.tmp
if [ "$(cat cell_metadata.tsv.tmp | wc -l)" = "$(cat droplet_cell_metadata.tsv.tmp | wc -l)" ]; then
paste -d "\t" cells_samples.tsv.tmp droplet_cell_metadata.tsv.tmp > cell_metadata.tsv.tmp
rm droplet_cell_metadata.tsv.tmp
else
echo "Inconsistent number of lines derived from cell metadata" 1>&2
exit 1
fi
else
cp cells_samples.tsv.tmp cell_metadata.tsv.tmp
"No cells file present at $cells_file_name"
fi
rm cells_samples.tsv.tmp
fi
mv cell_metadata.tsv.tmp cell_metadata.tsv
| true
|
993137c0ec1004808cc067097441d5aefb757425
|
Shell
|
Fahima1994/USP_Tutorials
|
/unix2/args.sh
|
UTF-8
| 164
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#show the number of arguments passed to this script and the list or arguments
echo "Running: $0"
echo "Arguments count = $#"
echo "Arguments list: $*"
| true
|
71147a7634c21924284db9b8dee79833ba7f1830
|
Shell
|
maximejf42/Netron
|
/netron
|
UTF-8
| 438
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Build and start Netron as a Python web server from local directory
source=$(dirname ${0})
pushd ${source} > /dev/null
[ -d node_modules ] || npm install
rm -rf ./build/python
python ./setup.py --quiet build
popd > /dev/null
PYTHON_VERSION=$(python -c "import sys; print('.'.join(str(x) for x in sys.version_info[:2]))")
PYTHONPATH=${source}/build/python/lib python ${source}/build/python/scripts-${PYTHON_VERSION}/netron $@
| true
|
9b711ea46cdcc440d8c0af69f9bcd0b812103ca9
|
Shell
|
shdowofdeath/core
|
/jenkins/schedule/madcore_schedule_del.sh
|
UTF-8
| 159
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Delete job name: '$Name'"
JOBS_DIR="/opt/jenkins/schedules"
JOB_BASE_NAME="madcore_schedule_${Name}"
rm $JOBS_DIR/$JOB_BASE_NAME*
| true
|
bf2b8b74cfc8aeebbc57dff8060c5ae01eab3c43
|
Shell
|
rubixlinux/rubixlinux
|
/l/perl-dbd-mysql/PKGBUILD
|
UTF-8
| 1,373
| 2.75
| 3
|
[] |
no_license
|
# Maintainer: Joshua Rubin <joshua@rubixlinux.org>
PERL=5.8.8
pkgname=perl-dbd-mysql
pkgver=3.0002
pkgrel=1
pkgdesc="DBD::mSQL / DBD::mysql - mSQL and mysql drivers for the Perl5 Database Interface (DBI)"
url="http://search.cpan.org/~jwied/Msql-Mysql-modules-1.2219/mysql/lib/DBD/mysql.pm"
depends=('perl-dbi' 'mysql')
source=(http://search.cpan.org/CPAN/authors/id/C/CA/CAPTTOFU/DBD-mysql-$pkgver.tar.gz)
md5sums=('df70ba084c97f5f7c2a997c3de2f0ad0')
## Todo:
## None
## Notes:
## None
## Changelog:
## rel1: upgraded to 3.002, rebuilt for perl 5.8.8 and mysql 5.0 2006.03.06
## rel1: upgraded to 2.9008 and rebuilt for perl 5.8.7 2005.06.13
## rel2: Removed .packlist file 2005.05.12
## rel1: Initial Rubix release 2005.04.25
build() {
cd $startdir/src/DBD-mysql-$pkgver
perl Makefile.PL || return 1
make || return 1
#make test || return 1
make install DESTDIR=$startdir/pkg
mkdir -p $startdir/pkg/usr/doc/perl-$PERL/DBD-mysql-$pkgver
cp -a INSTALL.html README TODO $startdir/pkg/usr/doc/perl-$PERL/DBD-mysql-$pkgver
chmod 644 $startdir/pkg/usr/doc/perl-$PERL/DBD-mysql-$pkgver/*
rm -r $startdir/pkg/usr/lib/perl5/$PERL
find $startdir/pkg -perm 444 -exec chmod 644 {} \;
find $startdir/pkg -perm 555 -exec chmod 755 {} \;
mv $startdir/pkg/usr/share/man $startdir/pkg/usr
rmdir $startdir/pkg/usr/share
find $startdir/pkg -name .packlist -exec rm {} \;
}
| true
|
af6b22483492900c24414f0378a0530526b49673
|
Shell
|
silky/godothecorrectthing
|
/godothecorrectthing.sh
|
UTF-8
| 1,242
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
set -e
set -u
set -x
editor=subl
guesscwdwithmagic () {
cwd=$HOME
wintitle=$(xdotool getactivewindow getwindowname)
case $wintitle in
nixos:*:*)
cwd=`echo $wintitle | cut -d : -f 3-`
;;
*Sublime\ Text)
cwd=`echo $wintitle | cut -d ' ' -f 1`
cwd=`dirname $cwd`
;;
esac
case $cwd in
~*)
cwd=$HOME/$(echo $cwd | cut -c 2-)
;;
esac
echo $cwd
}
cwd=$(guesscwdwithmagic)
manualexpand () {
# hack to expand from string without eval
case $1 in
/*)
echo $1
;;
~*)
echo $HOME/$(echo $1 | cut -c 2-)
;;
*)
echo $cwd/$1
;;
esac
}
cwd=$(manualexpand $cwd)
text=$(xclip -o | head -n 1)
case $text in
http://* | https://*)
exec xdg-open $text
;;
esac
if echo $text | grep -q -E '^[a-zA-Z/~ \.]+(:[0-9]*)*:?'
then
fwithpos=$(manualexpand $text)
# strip trailing :, go error messages are one place this happens
case $(echo $fwithpos | rev) in
:*)
fwithpos=$(echo $fwithpos | rev | cut -c 2- | rev)
;;
esac
fnopos=$fwithpos
if echo $fwithpos | grep -q -E ':'
then
fnopos=`echo $fnopos | cut -d : -f 1`
fi
if test -f $fnopos
then
case $fnopos in
*)
exec $editor $fwithpos
;;
esac
fi
if test -d $fnopos
then
exec xdg-open $fnopos
fi
fi
| true
|
35f55dfc61f2c4610a7fa962ed7df649ec54d26b
|
Shell
|
ocaml/oasis2opam
|
/.travis-ci.sh
|
UTF-8
| 664
| 3.03125
| 3
|
[] |
no_license
|
OPAM_PKGS="oasis base-bytes"
OPAM_PKGS_TEST="ounit qcheck"
export OPAMYES=1
if [ -f "$HOME/.opam/config" ]; then
opam update
opam upgrade
else
opam init
fi
if [ -n "${OPAM_SWITCH}" ]; then
opam switch ${OPAM_SWITCH}
fi
eval `opam config env`
opam install $OPAM_PKGS
export OCAMLRUNPARAM=b
oasis setup
if [ "$OPAM_SWITCH" = "3.12.1" ]; then
ocaml setup.ml -configure
ocaml setup.ml -build
else
opam install $OPAM_PKGS_TEST
ocaml setup.ml -configure --enable-tests
ocaml setup.ml -build
ocaml setup.ml -test
fi
opam pin add oasis2opam .
oasis2opam --version
opam remove oasis2opam
[ -z "`which oasis2opam`" ] || exit 1
| true
|
cb9ec29fd554ccd674ff1b22a9b83189e33dc98d
|
Shell
|
AliciaBentley-NOAA/verf_precip.v4.4.0
|
/scripts/exverf_precip_fss_06h.sh.ecf
|
UTF-8
| 4,314
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/ksh
###############################################################################
# Name of Script: exverf_precip_verfgen_06h.sh.sms
# Purpose of Script: To generate 06h precipitation Fractions
# Skill Scores statistics for various operational models to be used by the
# Forecast Verification System
# Arguments: exverf_precip_fss_06h.sh.sms $yyyy$mm$dd ($vday)
# Make FSS computations for 06h analysis (CCPA) ending at 00/06/12/18Z $vday
# note that the 18Z analysis would be made/remade a day later than 00/06/12Z,
# Called by jobs/JVERF_PRECIP_FSS_06H
# 1) make a list of fss06 domain parm files that contain only those
# models to be verified for this 06h FSS job
# 2) Remove existing 06h FSS records from model_$vday.vsdb
# 3) Submit POE job (using exverf_precip_fss_06h_poe.sh.ecf as basis for
# POE script)
# 4) For each model, assemble the individual pieces of VSDBs into a single
# fss06 vsdb file, then add it to the existing model_$vday.vsdb in the
# VSDB directory.
#
# Log history:
# 2015-06-29 Copied over from exverf_precip_fss_24h.sh.ecf
###############################################################################
set -x
# Create a file called fss.domains that contains domain info of only the models
# to be verified for FSS_06H in this job:
cat $PARMverf_precip/verf_precip_fss_06h.domains |while read tmp
do
first_char=`echo $tmp |cut -c1`
if [ ! "$first_char" = "#" ] # Skip comment line
then
modnam=`echo $tmp |awk -F"|" '{print $1}'`
let "runmod=run_$modnam"
if [ $runmod = 1 ]; then echo $tmp >> fss06.domains; fi
fi
done
# Create a list of models to be verified ('sort -u' ensures that model names
# will be unique - in case of multiple entries in the domain parm file
# (e.g. nam 00/12Z cycles have different bucket length from the 06/18Z cycles)
cat fss06.domains | awk -F"|" '{print $1}' | sort -u > fss06.model.list
# Go to VSDB directory to remove lines containing 'FSS' and 'APCP/06':
for modnam in `cat fss06.model.list`
do
if ! [ -d $COMVSDB/$modnam ]
then
mkdir -p $COMVSDB/$modnam
elif [ -s $COMVSDB/$modnam/${modnam}_${vday}.vsdb ]
then
sed -e "/FSS<.*APCP\/06/d" $COMVSDB/$modnam/${modnam}_${vday}.vsdb >$COMVSDB/$modnam/${modnam}_${vday}.vsdb1
mv $COMVSDB/$modnam/${modnam}_${vday}.vsdb1 $COMVSDB/$modnam/${modnam}_${vday}.vsdb
fi
done # looping through fss06.model.list
export vgrid=240
#
# Create a script to be poe'd for 6-hours ending at 00/06/12/18:
if [ -e $DATA/poescript ]; then
rm $DATA/poescript
fi
vhours="00 06 12 18"
for vhr in $vhours
do
echo $HOMEverf_precip/scripts/exverf_precip_fss_06h_poe.sh.ecf $vhr >> $DATA/poescript
done
echo
echo Here is the poescript for fss06:
cat $DATA/poescript
echo
#############################################################
# Execute the script.
#############################################################
mpirun -l cfp poescript
if [ "$envir" != dev ]; then
export err=$?; err_chk
fi
for vhr in $vhours
do
set +x
echo "######################################"
echo " BEGIN FSS06 PROGRAM OUTPUT for validation at ${vday}$vhr}"
echo "######################################"
set -x
cat $pgmout.fss06.$vhr
set +x
echo "######################################"
echo " END FSS06 PROGRAM OUTPUT for validation at ${vday}$vhr}"
echo "######################################"
set -x
done
mkdir -p $DATA/vsdb
cd $DATA
for model in `cat fss06.model.list`
do
for vhr in 00 06 12 18
do
cat $vhr/vsdb/subtotal.${vhr}z/${model}_${vday}.vsdb >> $DATA/vsdb/${model}_${vday}.vsdb
done
cat $DATA/vsdb/${model}_${vday}.vsdb >> $COMVSDB/$model/${model}_${vday}.vsdb
done
cd $DATA/vsdb
tar cvf ../vsdbfss06.$vday.tar .
cd ..
if [ $SENDCOM = YES ]
then
cp vsdbfss06.$vday.tar ${COMOUT}.${vday}/.
if [ $RUN_ENVIR = dev ]
then
if [ ! -d $ARCH45DAY ]; then mkdir -p $ARCH45DAY; fi
cp vsdbfss06.$vday.tar $ARCH45DAY/.
fi
fi
#####################################################################
# GOOD RUN
set +x
echo "**************$job COMPLETED NORMALLY on `date`"
set -x
#####################################################################
msg="HAS COMPLETED NORMALLY!"
echo $msg
postmsg "$jlogfile" "$msg"
############## END OF SCRIPT #######################
| true
|
d85c88113bba1a915b394d66a7be084964a1a2c7
|
Shell
|
rsk0315/codefolio
|
/ShellScript/latest
|
UTF-8
| 348
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
# -*- mode: sh; sh-shell: bash -*-
# ~/bin/latest
# We shall fail with names that contain whitespaces
# If you need it, use GNU ls, sed, xargs, etc.
ext="${1:+\.(}$1${1:+)$}"
perm="-${2:-.}"
perm="^${perm//[^rwx.-]/}" # do not forget '^'
res=$(ls -ltA | grep -E "$ext" | grep -E -- "$perm" | sed q | awk '$0=$9')
[[ -f "$res" ]] && echo "$res"
| true
|
df2832bd8f7f8ec12439117e71cb9928199b33b5
|
Shell
|
udaiyamnmGITHUB/retail-app-in-MEAN-stack
|
/northwind-mongodb-master/scripts/mongo-import-json.sh
|
UTF-8
| 196
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
for f in *.json
do
filename=$(basename "$f")
extension="${filename##*.}"
filename="${filename%.*}"
mongoimport -d Northwind -c "$filename" --type json --jsonArray --file "$f"
done
| true
|
428c6f17a2e1df3175535ff69ca0511d847d1128
|
Shell
|
dssquared/VideoStreaming
|
/fs-overlay/build-fs-overlay.sh
|
UTF-8
| 528
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
CURRENT_UID=$(id -u)
CURRENT_GID=$(id -g)
echo "Setting root ownership of ./fs..."
sudo chown -R 0:0 ./fs
echo -e "\nSetting pi:pi ownership of ./fs/home/pi..."
sudo chown -R 1000:1000 ./fs/home/pi
echo -e "\nCreating tarball..."
sudo bsdtar -C ./fs -cvzf ./fs-overlay.tar.gz .
echo -e "\nSetting proper tarball permissions..."
sudo chown -R $CURRENT_UID:$CURRENT_GID ./fs-overlay.tar.gz
echo -e "\nChanging ownership of ./fs back to current user/group..."
sudo chown -R $CURRENT_UID:$CURRENT_GID ./fs
| true
|
c374141b86e20afbda2dc0a48a76ee60c960acc6
|
Shell
|
lgrellie/gnl_tester
|
/run_tests.sh
|
UTF-8
| 442
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$#" -ne 1 ]]; then
echo "./run_tests \$BUFFER_SIZE";
else
./clean.sh
gcc -fsanitize=address main.c get_next_line.c get_next_line_utils.c -D BUFFER_SIZE=$1;
./generate_tests.sh;
[ -d output ] || mkdir output;
./a.out tests/*.txt;
for file in tests/*.txt; do
diff -q "$file" "output/output_$(basename $file)" >> diffs;
done
[ -s diffs ] && cat diffs || echo "No diffs found between input and output. GG <3 !!";
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.