blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1fce8fa15624cbb27edf7819d34305d8675b049f
|
Shell
|
aryantaheri/scripts
|
/network/create-inf.sh
|
UTF-8
| 1,213
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
from=$1
to=$2
cidr="172.16"
netmask="24"
# eth1 ip address: 255.255.255.x
# network num: a
# interface ip address: 172.16.a.x
x=$(ip a show eth1 | sed -nr 's/.*inet (addr:)?(([0-9]*\.){3}([0-9]*)).*/\4/p')
output="/etc/network/interfaces"
## append template to /etc/network/interfaces
#iface ex-secnet1 inet manual
# up ip link set up dev ex-secnet1
# up ip addr add 192.168.1.21/24 brd + dev ex-secnet1
## up ip route add default via w.x.y.z dev ex-secnet1
# down ip addr del 192.168.1.21/24 dev ex-secnet1
# down ip link set down dev ex-secnet1
for a in $(seq $from $to); do
echo $a
ipa="$cidr.$a.$x/$netmask"
echo $ipa
echo "" >> $output
echo "#--------- Virtual Interfaces for OS+ODL Dedicated Bridges ---------#" >> $output
echo "iface ex-secnet$a inet manual" >> $output
echo " up ip link set up dev ex-secnet$a" >> $output
echo " up ip addr add $ipa brd + dev ex-secnet$a" >> $output
echo "# up ip route add default via w.x.y.z dev ex-secnet$a" >> $output
echo " down ip addr del $ipa dev ex-secnet$a" >> $output
echo " down ip link set down dev ex-secnet$a" >> $output
echo "" >> $output
done
| true
|
cb806c38f02030d40a5246066427b79f5f59a02a
|
Shell
|
thorgate/django-project-template
|
/{{cookiecutter.repo_name}}/scripts/deploy/create-sentry-release.sh
|
UTF-8
| 359
| 2.78125
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. ${DIR}/sentry-env.sh
echo_cyan "Creating new release ${CI_COMMIT_TAG} for ${SENTRY_ORGANIZATION}/${SENTRY_PROJECT}"
echo_cyan " Api root: ${SENTRY_URL}"
sentry-cli releases --org="${SENTRY_ORGANIZATION}" new -p "${SENTRY_PROJECT}" "${CI_COMMIT_TAG}"
| true
|
f023d272ad8a26cfa145e841a33e69f5b90d6603
|
Shell
|
bah-insignia/zcmd
|
/devutils/s3get-sudo.sh
|
UTF-8
| 752
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! $# -eq 2 ]; then
echo "$0 SOURCE_OBJECT_KEY DESTINATION_FILEPATH [SET_OWNER_GROUPNAME]"
exit 1
fi
KEY=$1
LOCALNAME=$2
SET_OWNER_GROUPNAME=$3
source $HOME/zcmd/devutils/default-docker-env.txt
#Get the path now because SUDO ROOT MIGHT not have it on the path!
AWS_PATH=$(which aws)
CMD="sudo $AWS_PATH s3 cp ${SHARED_S3_BUCKET}/${KEY} ${LOCALNAME}"
echo "COPY COMMAND: $CMD"
eval "$CMD"
if [ $? -ne 0 ]; then
echo "ERROR ON $CMD"
exit 2
fi
WHOAMI=$(whoami)
if [ ! -z "$SET_OWNER_GROUPNAME" ]; then
(sudo chown $WHOAMI:$SET_OWNER_GROUPNAME ${LOCALNAME})
if [ $? -ne 0 ]; then
echo "ERROR ON changing ownership of ${LOCALNAME} to user=$WHOAMI group=$SET_OWNER_GROUPNAME"
exit 2
fi
fi
| true
|
0bd8ec0124d194a87cb3a9eb03480e0b33e5b3bc
|
Shell
|
c0sco/zsh-ezcolors
|
/zsh-ezcolors.plugin.zsh
|
UTF-8
| 344
| 2.71875
| 3
|
[] |
no_license
|
# From https://pthree.org/2009/12/18/add-colors-to-your-zsh-scripts/
autoload colors
if [[ "$terminfo[colors]" -gt 8 ]]; then
colors
fi
for COLOR in RED GREEN YELLOW BLUE MAGENTA CYAN BLACK WHITE; do
eval export $COLOR='$fg_no_bold[${(L)COLOR}]'
eval export BOLD_$COLOR='$fg_bold[${(L)COLOR}]'
done
eval export RESET='$reset_color'
| true
|
241c2905b7219f6a46892aaed166e6f799498dc7
|
Shell
|
sshao/549-gusty
|
/fg_bg_segm/end-end-lat.sh
|
UTF-8
| 437
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p end-end-lat
for (( w = 1; w <= 10; w += 1 ))
do
mkdir -p end-end-lat/run-$w
./blobdetect | awk '$1=="sending"{print $4,$5,$6,$7,$8}' >> end-end-lat/run-$w/run.txt & sleep 60; pkill blobdetect; sleep 5
# YO KEEYOUNG
# idk why but that sleep 5 at the end fixed things
# runs just fine now, no NaN
awk '{sum+=$4}END{print sum/NR}' end-end-lat/run-$w/run.txt >> end-end-lat/run-$w/avg.txt
done
| true
|
e39344403f58c65d1270668e2bd3b5433beb7e87
|
Shell
|
huzhifeng/express-gallary
|
/tools/thumbnailer.sh
|
UTF-8
| 1,067
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
photo_dir='.'
if test $# -gt 0; then
photo_dir=$1
fi
thumbnail_dir='_thumbnail'
# Create thumbnail for Images by epeg
for file in `find $photo_dir -type f -name "*.jpg"`
do
if [[ "$file" == *thumbnail* ]]; then continue; fi
thumbnail_file="${file%/*}/${thumbnail_dir}/${file##*/}"
if [ -e $thumbnail_file ]; then continue; fi
if [ ! -e "${file%/*}/${thumbnail_dir}" ]; then mkdir -p "${file%/*}/${thumbnail_dir}"; fi
echo "Create thumbnail for image $file"
`epeg -w 30% -h 30% -q 80 $file $thumbnail_file > /dev/null 2>&1`
done
# Create thumbnail for Videos by ffmpeg
for file in `find $photo_dir -type f -name "*.mp4"`
do
if [[ "$file" == *thumbnail* ]]; then continue; fi
thumbnail_file="${file%/*}/${thumbnail_dir}/${file##*/}.jpg"
if [ -e $thumbnail_file ]; then continue; fi
if [ ! -e "${file%/*}/${thumbnail_dir}" ]; then mkdir -p "${file%/*}/${thumbnail_dir}"; fi
echo "Create thumbnail for video $file"
`ffmpeg -i $file -ss 00:00:01.000 -f image2 -vframes 1 $thumbnail_file > /dev/null 2>&1`
done
| true
|
ea578477db1e5cb76dac58f11bf30656e878d5d9
|
Shell
|
KyWa/dotfiles
|
/install.sh
|
UTF-8
| 1,931
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# Check for existing Bashrc
if [[ -f $HOME/.bashrc ]];then
mv $HOME/.bashrc $HOME/.bashrc.bak
fi
# Check for existing Vimrc
if [[ -f $HOME/.vimrc ]];then
mv $HOME/.vimrc $HOME/.vimrc.bak
fi
# Check for existing Ansiblecfg
if [[ -f $HOME/.ansible.cfg ]];then
mv $HOME/.ansible.cfg $HOME/.ansible.cfg.bak
fi
# Check for existing TMUX Config
if [[ -f $HOME/.tmux.conf ]];then
mv $HOME/.tmux.conf $HOME/.tmux.conf.bak
fi
# Check for existing bash_profile
if [[ -f $HOME/.bash_profile ]];then
mv $HOME/.bash_profile $HOME/.bash_profile.bak
fi
# Check for existing k8s Prompt
if [[ -f $HOME/.tmux.conf ]];then
mv $HOME/.k8sprompt.sh $HOME/.k8sprompt.sh.bak
fi
## Check and setup for Mac OS
os=`uname`
if [[ ${os} == Darwin ]];then
./mac_setup.sh
fi
# Create symlinks to ~/dotfiles
ln -sv ~/dotfiles/.vimrc ~
ln -sv ~/dotfiles/.vim ~
ln -sv ~/dotfiles/.bashrc ~
ln -sv ~/dotfiles/.bash_profile ~
ln -sv ~/dotfiles/.tmux.conf ~
ln -sv ~/dotfiles/.k8sprompt.sh ~
ln -sv ~/dotfiles/.ansible.cfg ~
# Get Git Bash Completion
curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-completion.bash -o ~/.git-completion.bash
# Add KyWa repositories
# Also assumes you pulled this repo with an ssh key
echo "Would you like to add KyWa repositories?: y/n"
read repo_install
case $repo_install in
[yY][eE][sS][|[yY])
mkdir -p ~/Working/kywa
cd ~/Working/kywa
git clone git@github.com:KyWa/dockerbuilds
git clone git@github.com:KyWa/kywa.git
git clone git@github.com:KyWa/kywa-ahoy.git
git clone git@github.com:KyWa/kywa-argo
git clone git@github.com:KyWa/kywa-lab
git clone git@github.com:KyWa/kywa-learn
git clone git@github.com:KyWa/kywa-website
git clone git@github.com:KyWa/yamlzone
echo "Repos cloned and all done!"
;;
[nN][oO]|[nN])
echo "All done!"
exit
;;
esac
source ~/.bash_profile
| true
|
57e97fe72fc69890fd5eb2c854d120f2b0a09fb7
|
Shell
|
bigharshrag/FileSync
|
/lsyncScript.sh
|
UTF-8
| 489
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt-get install lsyncd
# make the log files
sudo mkdir /var/log/lsyncd
sudo touch /var/log/lsyncd/lsyncd.{log,status}
sudo mkdir /etc/lsyncd
echo -n "Enter the path to the source folder to sync :"
read source_path
sed -i "s#replacesource#$source_path#g" custom.lua
read -p "Enter SSH username :" user_name
sed -i "s#replacename#$user_name#g" custom.lua
sed -i "s#replaceuser#$USER#g" custom.lua
sudo mv custom.lua /etc/lsyncd/lsyncd.conf.lua
sudo service lsyncd start
| true
|
21e3cf5937bb4f5273073d9c7f4a87a6cba742ba
|
Shell
|
rkhozinov/vyos-ami-build-scripts
|
/push-image-to-aws
|
UTF-8
| 1,474
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -u
set -e
if [ ! -f $1 ]; then
echo "Usage: $0 <image.vhd>"
exit 1
fi
image=$1
# How to push a VHD file to EC2
# TODO: make an S3 bucket; I'm assuming that trickv-vyos-import exists
import_output=`mktemp /tmp/import_output.XXXXXX`
ec2-import-volume $image -f VHD --region ap-southeast-1 -z ap-southeast-1b -b trickv-vyos-import --prefix $image -o $AWS_ACCESS_KEY -w $AWS_SECRET_KEY | tee $import_output
import_id=`cat $import_output | grep 'The disk image for import-vol' | awk '{print $5}'`
# Wait ages - this is where the VHD is sent to Amazon and can take an hour.
# Looking for string: The disk image for import-vol-fg53puyf has been uploaded to Amazon S3
set +e
while true; do
echo "Waiting for $import_id to be ready"
ec2-describe-conversion-tasks $import_id | fgrep completed
if [ $? -eq 0 ]; then
break
fi
sleep 10
done
set -e
#ec2-describe-conversion-tasks | grep import-vol-fg53puyf
#TaskType IMPORTVOLUME TaskId import-vol-fg53puyf ExpirationTime 2014-02-26T03:58:38Z Status completed
#DISKIMAGE DiskImageFormat VHD DiskImageSize 255921664 VolumeId vol-fdab9af2 VolumeSize 3 AvailabilityZone ap-southeast-1aApproximateBytesConverted 255921664
volume_id=`ec2-describe-conversion-tasks $import_id | grep VolumeId | awk '{print $7}'`
./volume-to-ami $volume_id $image
# At this point, you can just run ./volume-to-ami
| true
|
0951f71aba54f4012386469864c41002f7a899c5
|
Shell
|
wcventure/PERIOD
|
/evaluation/NEW/lrzip/build.sh
|
UTF-8
| 2,287
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# For Mac
if [ $(command uname) == "Darwin" ]; then
if ! [ -x "$(command -v greadlink)" ]; then
brew install coreutils
fi
BIN_PATH=$(greadlink -f "$0")
ROOT_DIR=$(dirname $(dirname $(dirname $(dirname $BIN_PATH))))
# For Linux
else
BIN_PATH=$(readlink -f "$0")
ROOT_DIR=$(dirname $(dirname $(dirname $(dirname $BIN_PATH))))
fi
export ROOT_DIR=${ROOT_DIR}
export PATH=${ROOT_DIR}/clang+llvm/bin:${ROOT_DIR}/tool/SVF/Release-build/bin:$PATH
export LD_LIBRARY_PATH=${ROOT_DIR}/clang+llvm/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}
echo "Environment variables is OK. Everything's fine!"
cd ${ROOT_DIR}/evaluation/NEW/lrzip/
if ! [ -d "${ROOT_DIR}/evaluation/NEW/lrzip/lrzip" ]; then
git clone https://github.com/ckolivas/lrzip.git lrzip
cd lrzip
git checkout 465afe8
cd $ROOT_DIR/evaluation/NEW/lrzip/lrzip/
sed -i '305a{' runzip.c
sed -i '306asleep(1);' runzip.c
sed -i '308a}' runzip.c
sed -i '386c //clear_rulist(control);' runzip.c
sed -i '386ausleep(50000);' runzip.c
sed -i '387apthread_t Dele;' runzip.c
sed -i '388aif (unlikely(!create_pthread(control, &Dele, NULL, clear_rulist, control))) {' runzip.c
sed -i '389areturn -1;' runzip.c
sed -i '390a}' runzip.c
sed -i '391apthread_join(Dele, NULL);' runzip.c
fi
set -ux
cd ${ROOT_DIR}/evaluation/NEW/lrzip/
./cleanDIR.sh
cd $ROOT_DIR/evaluation/NEW/lrzip/lrzip/
./autogen.sh
CC=wllvm CXX=wllvm++ CFLAGS="-g -O0" CXXFLAGS="-g -O0" ./configure --enable-static-bin
make
extract-bc ./lrzip
$ROOT_DIR/tool/staticAnalysis/staticAnalysis.sh lrzip
sed -i '/stream.c\|runzip.c/!d' ConConfig.lrzip
sed -i '1astream.c:449' ConConfig.lrzip
sed -i '1astream.c:456' ConConfig.lrzip
sed -i '1astream.c:457' ConConfig.lrzip
sed -i '1arunzip.c:255' ConConfig.lrzip
sed -i '1arunzip.c:256' ConConfig.lrzip
make distclean
rm -rf *.bc
CC=wllvm CXX=wllvm++ CFLAGS="-g -O0 -fsanitize=address" CXXFLAGS="-g -O0 -fsanitize=address" ./configure --enable-static-bin
make
extract-bc ./lrzip
export Con_PATH=$ROOT_DIR/evaluation/NEW/lrzip/lrzip/ConConfig.lrzip
$ROOT_DIR/tool/staticAnalysis/DBDS-INSTRU/dbds-clang-fast++ -g -o lrzip lrzip.bc -lpthread -lm -lz -lbz2 -llzo2 -llz4 -lasan
# test
# $ROOT_DIR/tool/DBDS/run_PDS.py -d 3 -t 5 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libasan.so.4:$LD_PRELOAD ./lrzip -t -p2 ../POC
| true
|
a9a90aed56d9154f064177faad085085e10202f7
|
Shell
|
shunlir/opengrok-docker
|
/fs/opt/bin/grok_index
|
UTF-8
| 743
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# TODO: --noIndex makes the webinterface project list empty
#
show_info=true
if [ "$1" = "-h" ]; then
echo "Usage: $(basename $0) [--noIndex]"
exit 0
elif [ "$1" == "--init" ]; then
show_info=false
shift
fi
[ -f "$(dirname $0)/init" ] && . "$(dirname $0)/init"
indexer_exit_if_running
[ "$show_info" = "false" ] || grok_add_info main "indexing in progress..."
opengrok-indexer -l debug -J=-Djava.util.logging.config.file=/var/opengrok/etc/logging.properties -J=-d64 -J=-server \
-a $GROK_JAR \
-- \
-s /var/opengrok/src -d /var/opengrok/data \
-H -P -S -G \
-W /var/opengrok/etc/configuration.xml -U http://localhost:8080/source $R_ARG $*
[ "$show_info" = "false" ] || grok_remove_info main
| true
|
bb423187b568f30c305e78974b879c88c05f8f28
|
Shell
|
agolovanov/Quill
|
/quill3d/run.sh
|
UTF-8
| 1,059
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# we do not want the script to continue if anything fails
set -e
if [ ! -f ../quill3d-conf/quill.conf"$1" ]; then
echo "Config file ../quill3d-conf/quill.conf$1 not found! Create it or copy from ../quill3d-conf/example"
exit 1
fi
# Create data folder if not exist; copy config file into it
folder=`./parse.py "$1" | grep -A2 data_folder | tail -n1`
echo "Data folder: $folder"
sleep 1.5 # so the user has time to see the data folder
# Determine the number of threads specified in the config
threads=`./parse.py "$1" | grep -A1 n_sr | tail -n1`
if [[ -z "$threads" ]]
then
threads=8
fi
echo "Threads: $threads"
mkdir -p $folder
cp ../quill3d-conf/quill.conf"$1" $folder/
# 1. Parsing config file (parse.sh)
# 2. Running quill with input from step 1
# 3. Adding timestamps to the Quill's console output
# 4. Duplicating output to a log file (quill_log.txt)
mpirun -n $threads bash -c "stdbuf -o 0 ./parse.py \"$1\" | { ./quill; } 2>&1" | awk '{ print strftime("%Y-%m-%d %H:%M:%S\t"), $0; fflush(); }' | tee $folder/quill_log.txt
| true
|
ad174d99f8581b6f0324b6b84184cb44e5759d86
|
Shell
|
beaukode/docker-cron-backup
|
/dobackup.sh
|
UTF-8
| 304
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source /env.sh
export BACKUP_PREFIX=`date +%Y-%m-%d_%H-%M-%S`
export BACKUP_TMP="/tmp/$BACKUP_PREFIX"
echo "[`date`] Starting backup"
echo "[`date`] Création temp directory : $BACKUP_TMP"
mkdir $BACKUP_TMP
/dodump.sh
/docompress.sh
/dosend.sh
rm -Rf $BACKUP_TMP
echo "[`date`] Backup done"
| true
|
5340a7fbd17902bc46d30a50ae38047f41bde705
|
Shell
|
fsx950223/addons
|
/tools/ci_build/builds/release_linux.sh
|
UTF-8
| 1,642
| 2.8125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e -x
PYTHON_VERSIONS="python2.7 python3.5 python3.6 python3.7"
ln -sf /usr/bin/python3.5 /usr/bin/python3 # Py36 has issues with add-apt
curl -sSOL https://bootstrap.pypa.io/get-pip.py
add-apt-repository -y ppa:deadsnakes/ppa
apt-get -y -qq update
for version in ${PYTHON_VERSIONS}; do
export PYTHON_VERSION=${version}
apt-get -y -qq install ${PYTHON_VERSION}
${PYTHON_VERSION} get-pip.py -q
${PYTHON_VERSION} -m pip --version
#Link TF dependency
yes 'y' | ./configure.sh --quiet
# Build
bazel build \
--noshow_progress \
--noshow_loading_progress \
--verbose_failures \
--test_output=errors \
build_pip_pkg
# Package Whl
bazel-bin/build_pip_pkg artifacts --nightly
# Uncomment and use this command for release branches
#bazel-bin/build_pip_pkg artifacts
done
# Clean up
rm get-pip.py
# Verify Wheels
./tools/ci_build/builds/wheel_verify.sh
| true
|
f78813bf26788eedbe99098784ef6da9cac28ce7
|
Shell
|
alexanderzgg/Project1
|
/makefile.sh
|
UTF-8
| 811
| 3.5
| 4
|
[] |
no_license
|
#makefile for lab 2 pass off cases
NUM?=2
numbers_80=0 1 2 3 4 5 6 7 8
numbers_100=0 1 2
tests=Lab$(NUM)PassOffCases
.SILENT:run
run:
(echo "BUCKET 80") ; \
for number in $(numbers_80) ; \
do \
echo "Running input$$number" ; \
./lab$(NUM) $(tests)/$(NUM)-80/input$$number.txt > $(tests)/out.txt ; \
diff -w $(tests)/$(NUM)-80/answer$$number.txt $(tests)/out.txt || (echo "diff failed on test $$number \n") ; \
done \
(echo "BUCKET 100") ; \
for number in $(numbers_100) ; \
do \
echo "Running input$$number" ; \
./lab$(NUM) $(tests)/$(NUM)-100/input$$number.txt > $(tests)/out.txt ; \
diff -w $(tests)/$(NUM)-100/answer$$number.txt $(tests)/out.txt || (echo "diff failed on test $$number \n") ; \
done \
rm $(tests)/out.txt
compile:
g++ -Wall -Werror -std=c++17 -g *.cpp -o lab$(NUM)
| true
|
dc322116c5bff15e23dea42f373bdcaab3ea3742
|
Shell
|
petr-tik/petr-tik.github.io
|
/content-org/remote_dev.sh
|
UTF-8
| 2,151
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
is_connected () {
echo $(nc -z $VPS_IP 22)
}
is_project_big () {
# get a list of directories with interesting rust code - either src or tests
DIRECTORIES=$(find . -maxdepth 1 -type d | grep "src\|test" | cut -c 3-);
# get the total LoC in any .rs files in these directories
LOC_IN_RUST_FILES=$(find $DIRECTORIES -type f -name "*.rs" | xargs wc -l | tail -1 | cut -d" " -f3);
THRESHOLD_TO_COMPILE_REMOTELY=25000;
if [ $LOC_IN_RUST_FILES -ge $THRESHOLD_TO_COMPILE_REMOTELY ]; then
echo "true"
else
echo "false"
fi
}
server_setup() {
echo "Setting up server..."
UTILS_CMD="
echo apt-get install build-essential -y
echo curl https://sh.rustup.rs -sSf \\| sh
echo /root/.cargo/bin/rustup toolchain add stable
echo /root/.cargo/bin/rustup default stable
"
echo "$UTILS_CMD" | m_ssh "$ID" "$IP" " bash -ls"
echo "$ID $IP" > "$LOCAL_LOCKFILE"
}
cmd_cmd() {
assert_lockfile_present
DIRNAME=$(basename "$PWD")
# sync files over
m_rsync $ID $IP $DIRNAME
CMD=" cd $DIRNAME; $@"
m_ssh $ID $IP " bash -ls -c \"$CMD\""
}
#!/usr/bin/env bash
m_rsync () {
declare -a FLAGS
FLAGS+=("-r" "-t" "-p" "-R" "-a" "-i" "-z") # t - timestamps, p - permissions
## You have to include first before excluding everything else. Order matters!
# Copy all the files needed to build
# Cargo.toml
# rust_toolchain
# src/*
# tests/*
# FLAGS+=("--exclude=.git*")
FLAGS+=("--include=Cargo.toml")
FLAGS+=("--include=rust_toolchain")
FLAGS+=("--include=src/***")
if [ -d tests ]; then
FLAGS+=("--include=tests/***")
fi
# Exclude EVERYTHING else
FLAGS+=("--exclude=*")
rsync "${FLAGS[@]}" --list-only . | tail -20
# if [ "$FILE_CHANNEL_REUSE" != "0" ]; then
# FLAGS+=("-e"
# "ssh -o ControlMaster=auto -o ControlPersist=600 -o ControlPath=~/.ssh/master-$1 -i $KEY_PATH")
# fi
# rsync "${FLAGS[@]}"
# FLAGS+=("." "root@$2:~/cloudy/")
}
sync_files_over() {
m_rsync $ID $IP &
run_init
}
| true
|
b76c80c86a7ac8c68fe82a9297353476999e62ef
|
Shell
|
williangringo/ngx_pagespeed
|
/test/nginx_system_test.sh
|
UTF-8
| 89,960
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: jefftk@google.com (Jeff Kaufman)
#
#
# Runs pagespeed's generic system test and nginx-specific system tests. Not
# intended to be run on it's own; use run_tests.sh instead.
#
# Exits with status 0 if all tests pass.
# Exits with status 1 immediately if any test fails.
# Exits with status 2 if command line args are wrong.
# Exits with status 3 if all failures were expected.
# Exits with status 4 if instructed not to run any tests.
# Inherits the following from environment variables:
: ${USE_VALGRIND:?"Set USE_VALGRIND to true or false"}
: ${NATIVE_FETCHER:?"Set NATIVE_FETCHER to off or on"}
: ${PRIMARY_PORT:?"Set PRIMARY_PORT"}
: ${SECONDARY_PORT:?"Set SECONDARY_PORT"}
: ${MOD_PAGESPEED_DIR:?"Set MOD_PAGESPEED_DIR"}
: ${NGINX_EXECUTABLE:?"Set NGINX_EXECUTABLE"}
PRIMARY_HOSTNAME="localhost:$PRIMARY_PORT"
SECONDARY_HOSTNAME="localhost:$SECONDARY_PORT"
SERVER_ROOT="$MOD_PAGESPEED_DIR/src/install/"
# We need check and check_not before we source SYSTEM_TEST_FILE that provides
# them.
function handle_failure_simple() {
echo "FAIL"
exit 1
}
function check_simple() {
echo " check" "$@"
"$@" || handle_failure_simple
}
function check_not_simple() {
echo " check_not" "$@"
"$@" && handle_failure_simple
}
# Argument list:
# host_name, path, post-data
# Runs 5 keepalive requests both with and without gzip for a few times.
# Curl will use keepalive when running multiple request with one command.
# When post-data is empty, a get request will be executed.
function keepalive_test() {
HOST_NAME=$1
URL="$SECONDARY_HOSTNAME$2"
CURL_LOG_FILE="$1.curl.log"
NGX_LOG_FILE="$1.error.log"
POST_DATA=$3
for ((i=0; i < 100; i++)); do
for accept_encoding in "" "gzip"; do
if [ -z "$POST_DATA" ]; then
curl -m 2 -S -s -v -H "Accept-Encoding: $accept_encoding" \
-H "Host: $HOST_NAME" $URL $URL $URL $URL $URL > /dev/null \
2>>"$TEST_TMP/$CURL_LOG_FILE"
else
curl -X POST --data "$POST_DATA" -m 2 -S -s -v \
-H "Accept-Encoding: $accept_encoding" -H "Host: $HOST_NAME"\
$URL $URL $URL $URL $URL > /dev/null \
2>>"$TEST_TMP/$CURL_LOG_FILE"
fi
done
done
# Filter the curl output from unimportant messages
OUT=$(cat "$TEST_TMP/$CURL_LOG_FILE"\
| grep -v "^[<>]"\
| grep -v "^{ \\[data not shown"\
| grep -v "^\\* About to connect"\
| grep -v "^\\* Closing"\
| grep -v "^\\* Connected to"\
| grep -v "^\\* Re-using"\
| grep -v "^\\* Connection.*left intact"\
| grep -v "^} \\[data not shown"\
| grep -v "^\\* upload completely sent off"\
| grep -v "^\\* connected"\
| grep -v "^\\* Trying.*\\.\\.\\.")
# Nothing should remain after that.
check [ -z "$OUT" ]
# Filter the nginx log from our vhost from unimportant messages.
OUT=$(cat "$TEST_TMP/$NGX_LOG_FILE"\
| grep -v "closed keepalive connection$" \
| grep -v ".*Cache Flush.*")
# Nothing should remain after that.
check [ -z "$OUT" ]
}
this_dir="$( cd $(dirname "$0") && pwd)"
# stop nginx
killall nginx
TEST_TMP="$this_dir/tmp"
rm -r "$TEST_TMP"
check_simple mkdir "$TEST_TMP"
PROXY_CACHE="$TEST_TMP/proxycache"
TMP_PROXY_CACHE="$TEST_TMP/tmpproxycache"
ERROR_LOG="$TEST_TMP/error.log"
ACCESS_LOG="$TEST_TMP/access.log"
# Check that we do ok with directories that already exist.
FILE_CACHE="$TEST_TMP/file-cache"
check_simple mkdir "$FILE_CACHE"
# And directories that don't.
SECONDARY_CACHE="$TEST_TMP/file-cache/secondary/"
SHM_CACHE="$TEST_TMP/file-cache/intermediate/directories/with_shm/"
VALGRIND_OPTIONS=""
if $USE_VALGRIND; then
DAEMON=off
else
DAEMON=on
fi
if [ "$NATIVE_FETCHER" = "on" ]; then
RESOLVER="resolver 8.8.8.8;"
else
RESOLVER=""
fi
# set up the config file for the test
PAGESPEED_CONF="$TEST_TMP/pagespeed_test.conf"
PAGESPEED_CONF_TEMPLATE="$this_dir/pagespeed_test.conf.template"
# check for config file template
check_simple test -e "$PAGESPEED_CONF_TEMPLATE"
# create PAGESPEED_CONF by substituting on PAGESPEED_CONF_TEMPLATE
echo > $PAGESPEED_CONF <<EOF
This file is automatically generated from $PAGESPEED_CONF_TEMPLATE"
by nginx_system_test.sh; don't edit here."
EOF
cat $PAGESPEED_CONF_TEMPLATE \
| sed 's#@@DAEMON@@#'"$DAEMON"'#' \
| sed 's#@@TEST_TMP@@#'"$TEST_TMP/"'#' \
| sed 's#@@PROXY_CACHE@@#'"$PROXY_CACHE/"'#' \
| sed 's#@@TMP_PROXY_CACHE@@#'"$TMP_PROXY_CACHE/"'#' \
| sed 's#@@ERROR_LOG@@#'"$ERROR_LOG"'#' \
| sed 's#@@ACCESS_LOG@@#'"$ACCESS_LOG"'#' \
| sed 's#@@FILE_CACHE@@#'"$FILE_CACHE/"'#' \
| sed 's#@@SECONDARY_CACHE@@#'"$SECONDARY_CACHE/"'#' \
| sed 's#@@SHM_CACHE@@#'"$SHM_CACHE/"'#' \
| sed 's#@@SERVER_ROOT@@#'"$SERVER_ROOT"'#' \
| sed 's#@@PRIMARY_PORT@@#'"$PRIMARY_PORT"'#' \
| sed 's#@@SECONDARY_PORT@@#'"$SECONDARY_PORT"'#' \
| sed 's#@@NATIVE_FETCHER@@#'"$NATIVE_FETCHER"'#' \
| sed 's#@@RESOLVER@@#'"$RESOLVER"'#' \
>> $PAGESPEED_CONF
# make sure we substituted all the variables
check_not_simple grep @@ $PAGESPEED_CONF
# start nginx with new config
if $USE_VALGRIND; then
(valgrind -q --leak-check=full --gen-suppressions=all \
--show-possibly-lost=no --log-file=$TEST_TMP/valgrind.log \
--suppressions="$this_dir/valgrind.sup" \
$NGINX_EXECUTABLE -c $PAGESPEED_CONF) & VALGRIND_PID=$!
trap "echo 'terminating valgrind!' && kill -s sigterm $VALGRIND_PID" EXIT
echo "Wait until nginx is ready to accept connections"
while ! curl -I "http://$PRIMARY_HOSTNAME/mod_pagespeed_example/" 2>/dev/null; do
sleep 0.1;
done
echo "Valgrind (pid:$VALGRIND_PID) is logging to $TEST_TMP/valgrind.log"
else
TRACE_FILE="$TEST_TMP/conf_loading_trace"
$NGINX_EXECUTABLE -c $PAGESPEED_CONF >& "$TRACE_FILE"
if [[ $? -ne 0 ]]; then
echo "FAIL"
cat $TRACE_FILE
if [[ $(grep -c "unknown directive \"proxy_cache_purge\"" $TRACE_FILE) == 1 ]]; then
echo "This test requires proxy_cache_purge. One way to do this:"
echo "Run git clone https://github.com/FRiCKLE/ngx_cache_purge.git"
echo "And compile nginx with the additional ngx_cache_purge module."
fi
rm $TRACE_FILE
exit 1
fi
fi
if $RUN_TESTS; then
echo "Starting tests"
else
if $USE_VALGRIND; then
# Clear valgrind trap
trap - EXIT
echo "To end valgrind, run 'kill -s quit $VALGRIND_PID'"
fi
echo "Not running tests; commence manual testing"
exit 4
fi
# run generic system tests
SYSTEM_TEST_FILE="$MOD_PAGESPEED_DIR/src/net/instaweb/system/system_test.sh"
if [ ! -e "$SYSTEM_TEST_FILE" ] ; then
echo "Not finding $SYSTEM_TEST_FILE -- is mod_pagespeed not in a parallel"
echo "directory to ngx_pagespeed?"
exit 2
fi
PSA_JS_LIBRARY_URL_PREFIX="ngx_pagespeed_static"
# An expected failure can be indicated like: "~In-place resource optimization~"
PAGESPEED_EXPECTED_FAILURES="
~IPRO-optimized resources should have fixed size, not chunked.~
"
# Some tests are flakey under valgrind. For now, add them to the expected failures
# when running under valgrind.
if $USE_VALGRIND; then
PAGESPEED_EXPECTED_FAILURES+="
~combine_css Maximum size of combined CSS.~
~prioritize_critical_css~
~IPRO flow uses cache as expected.~
~IPRO flow doesn't copy uncacheable resources multiple times.~
"
fi
# The existing system test takes its arguments as positional parameters, and
# wants different ones than we want, so we need to reset our positional args.
set -- "$PRIMARY_HOSTNAME"
source $SYSTEM_TEST_FILE
STATISTICS_URL=http://$HOSTNAME/ngx_pagespeed_statistics
# Define a mechanism to start a test before the cache-flush and finish it
# after the cache-flush. This mechanism is preferable to flushing cache
# within a test as that requires waiting 5 seconds for the poll, so we'd
# like to limit the number of cache flushes and exploit it on behalf of
# multiple tests.
# Variable holding a space-separated lists of bash functions to run after
# flushing cache.
post_cache_flush_test=""
# Adds a new function to run after cache flush.
function on_cache_flush() {
post_cache_flush_test+=" $1"
}
# Called after cache-flush to run all the functions specified to
# on_cache_flush.
function run_post_cache_flush() {
for test in $post_cache_flush_test; do
$test
done
}
# nginx-specific system tests
start_test Test pagespeed directive inside if block inside location block.
URL="http://if-in-location.example.com/"
URL+="mod_pagespeed_example/inline_javascript.html"
# When we specify the X-Custom-Header-Inline-Js that triggers an if block in the
# config which turns on inline_javascript.
WGET_ARGS="--header=X-Custom-Header-Inline-Js:Yes"
http_proxy=$SECONDARY_HOSTNAME \
fetch_until $URL 'grep -c document.write' 1
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $WGET_ARGS $URL)
check_from "$OUT" fgrep "X-Inline-Javascript: Yes"
check_not_from "$OUT" fgrep "inline_javascript.js"
# Without that custom header we don't trigger the if block, and shouldn't get
# any inline javascript.
WGET_ARGS=""
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $WGET_ARGS $URL)
check_from "$OUT" fgrep "X-Inline-Javascript: No"
check_from "$OUT" fgrep "inline_javascript.js"
check_not_from "$OUT" fgrep "document.write"
# Tests related to rewritten response (downstream) caching.
if [ "$NATIVE_FETCHER" = "on" ]; then
echo "Native fetcher doesn't support PURGE requests and so we can't use or"
echo "test downstream caching."
else
CACHABLE_HTML_LOC="${SECONDARY_HOSTNAME}/mod_pagespeed_test/cachable_rewritten_html"
TMP_LOG_LINE="proxy_cache.example.com GET /purge/mod_pagespeed_test/cachable_rewritten_"
PURGE_REQUEST_IN_ACCESS_LOG=$TMP_LOG_LINE"html/downstream_caching.html.*(200)"
# Number of downstream cache purges should be 0 here.
CURRENT_STATS=$($WGET_DUMP $STATISTICS_URL)
check_from "$CURRENT_STATS" egrep -q \
"downstream_cache_purge_attempts:[[:space:]]*0"
# The 1st request results in a cache miss, non-rewritten response
# produced by pagespeed code and a subsequent purge request.
start_test Check for case where rewritten cache should get purged.
WGET_ARGS="--header=Host:proxy_cache.example.com"
OUT=$($WGET_DUMP $WGET_ARGS $CACHABLE_HTML_LOC/downstream_caching.html)
check_not_from "$OUT" egrep -q "pagespeed.ic"
check_from "$OUT" egrep -q "X-Cache: MISS"
fetch_until $STATISTICS_URL \
'grep -c downstream_cache_purge_attempts:[[:space:]]*1' 1
while [ x"$(grep "$PURGE_REQUEST_IN_ACCESS_LOG" $ACCESS_LOG)" == x"" ] ; do
echo "waiting for purge request to show up in access log"
sleep .2
done
check [ $(grep -ce "$PURGE_REQUEST_IN_ACCESS_LOG" $ACCESS_LOG) = 1 ];
# The 2nd request results in a cache miss (because of the previous purge),
# rewritten response produced by pagespeed code and no new purge requests.
start_test Check for case where rewritten cache should not get purged.
BLOCKING_WGET_ARGS=$WGET_ARGS" --header=X-PSA-Blocking-Rewrite:psatest"
OUT=$($WGET_DUMP $BLOCKING_WGET_ARGS \
$CACHABLE_HTML_LOC/downstream_caching.html)
check_from "$OUT" egrep -q "pagespeed.ic"
check_from "$OUT" egrep -q "X-Cache: MISS"
CURRENT_STATS=$($WGET_DUMP $STATISTICS_URL)
check_from "$CURRENT_STATS" egrep -q \
"downstream_cache_purge_attempts:[[:space:]]*1"
check [ $(grep -ce "$PURGE_REQUEST_IN_ACCESS_LOG" $ACCESS_LOG) = 1 ];
# The 3rd request results in a cache hit (because the previous response is
# now present in cache), rewritten response served out from cache and not
# by pagespeed code and no new purge requests.
start_test Check for case where there is a rewritten cache hit.
OUT=$($WGET_DUMP $WGET_ARGS $CACHABLE_HTML_LOC/downstream_caching.html)
check_from "$OUT" egrep -q "pagespeed.ic"
check_from "$OUT" egrep -q "X-Cache: HIT"
fetch_until $STATISTICS_URL \
'grep -c downstream_cache_purge_attempts:[[:space:]]*1' 1
check [ $(grep -ce "$PURGE_REQUEST_IN_ACCESS_LOG" $ACCESS_LOG) = 1 ];
fi
start_test Check for correct default X-Page-Speed header format.
OUT=$($WGET_DUMP $EXAMPLE_ROOT/combine_css.html)
check_from "$OUT" egrep -q \
'^X-Page-Speed: [0-9]+[.][0-9]+[.][0-9]+[.][0-9]+-[0-9]+'
start_test pagespeed is defaulting to more than PassThrough
fetch_until $TEST_ROOT/bot_test.html 'grep -c \.pagespeed\.' 2
start_test 404s are served and properly recorded.
NUM_404=$(scrape_stat resource_404_count)
echo "Initial 404s: $NUM_404"
WGET_ERROR=$($WGET -O /dev/null $BAD_RESOURCE_URL 2>&1)
check_from "$WGET_ERROR" fgrep -q "404 Not Found"
# Check that the stat got bumped.
NUM_404_FINAL=$(scrape_stat resource_404_count)
echo "Final 404s: $NUM_404_FINAL"
check [ $(expr $NUM_404_FINAL - $NUM_404) -eq 1 ]
# Check that the stat doesn't get bumped on non-404s.
URL="http://$HOSTNAME/mod_pagespeed_example/styles/"
URL+="W.rewrite_css_images.css.pagespeed.cf.Hash.css"
OUT=$(wget -O - -q $URL)
check_from "$OUT" grep background-image
NUM_404_REALLY_FINAL=$(scrape_stat resource_404_count)
check [ $NUM_404_FINAL -eq $NUM_404_REALLY_FINAL ]
start_test Non-local access to statistics fails.
# This test only makes sense if you're running tests against localhost.
if [ "$HOSTNAME" = "localhost:$PRIMARY_PORT" ] ; then
NON_LOCAL_IP=$(ifconfig | egrep -o 'inet addr:[0-9]+.[0-9]+.[0-9]+.[0-9]+' \
| awk -F: '{print $2}' | grep -v ^127 | head -n 1)
# Make sure pagespeed is listening on NON_LOCAL_IP.
URL="http://$NON_LOCAL_IP:$PRIMARY_PORT/mod_pagespeed_example/styles/"
URL+="W.rewrite_css_images.css.pagespeed.cf.Hash.css"
OUT=$(wget -O - -q $URL)
check_from "$OUT" grep background-image
# Make sure we can't load statistics from NON_LOCAL_IP.
ALT_STAT_URL=$(echo $STATISTICS_URL | sed s#localhost#$NON_LOCAL_IP#)
echo "wget $ALT_STAT_URL >& $TEMPDIR/alt_stat_url.$$"
wget $ALT_STAT_URL >& "$TEMPDIR/alt_stat_url.$$"
check [ $? = 8 ]
rm -f "$TEMPDIR/alt_stat_url.$$"
ALT_CE_URL="$ALT_STAT_URL.pagespeed.ce.8CfGBvwDhH.css"
wget -O - $ALT_CE_URL >& "$TEMPDIR/alt_ce_url.$$"
check [ $? = 8 ]
wget -O - --header="Host: $HOSTNAME" $ALT_CE_URL >& "$TEMPDIR/alt_ce_url.$$"
check [ $? = 8 ]
rm -f "$TEMPDIR/alt_ce_url.$$"
fi
start_test Accept bad query params and headers
# The examples page should have this EXPECTED_EXAMPLES_TEXT on it.
EXPECTED_EXAMPLES_TEXT="PageSpeed Examples Directory"
OUT=$(wget -O - $EXAMPLE_ROOT)
check_from "$OUT" grep "$EXPECTED_EXAMPLES_TEXT"
# It should still be there with bad query params.
OUT=$(wget -O - $EXAMPLE_ROOT?PageSpeedFilters=bogus)
check_from "$OUT" grep "$EXPECTED_EXAMPLES_TEXT"
# And also with bad request headers.
OUT=$(wget -O - --header=PageSpeedFilters:bogus $EXAMPLE_ROOT)
echo $OUT
check_from "$OUT" grep "$EXPECTED_EXAMPLES_TEXT"
# Test that loopback route fetcher works with vhosts not listening on
# 127.0.0.1
start_test IP choice for loopback fetches.
HOST_NAME="loopbackfetch.example.com"
URL="$HOST_NAME/mod_pagespeed_example/rewrite_images.html"
http_proxy=127.0.0.2:$SECONDARY_PORT \
fetch_until $URL 'grep -c .pagespeed.ic' 2
# When we allow ourself to fetch a resource because the Host header tells us
# that it is one of our resources, we should be fetching it from ourself.
start_test "Loopback fetches go to local IPs without DNS lookup"
# If we're properly fetching from ourself we will issue loopback fetches for
# /mod_pagespeed_example/combine_javascriptN.js, which will succeed, so
# combining will work. If we're taking 'Host:www.google.com' to mean that we
# should fetch from www.google.com then those fetches will fail because
# google.com won't have /mod_pagespeed_example/combine_javascriptN.js and so
# we'll not rewrite any resources.
URL="$HOSTNAME/mod_pagespeed_example/combine_javascript.html"
URL+="?PageSpeed=on&PageSpeedFilters=combine_javascript"
fetch_until "$URL" "fgrep -c .pagespeed." 1 --header=Host:www.google.com
# If this accepts the Host header and fetches from google.com it will fail with
# a 404. Instead it should use a loopback fetch and succeed.
URL="$HOSTNAME/mod_pagespeed_example/.pagespeed.ce.8CfGBvwDhH.css"
check wget -O /dev/null --header=Host:www.google.com "$URL"
test_filter combine_css combines 4 CSS files into 1.
fetch_until $URL 'grep -c text/css' 1
check run_wget_with_args $URL
test_resource_ext_corruption $URL $combine_css_filename
test_filter extend_cache rewrites an image tag.
fetch_until $URL 'grep -c src.*91_WewrLtP' 1
check run_wget_with_args $URL
echo about to test resource ext corruption...
test_resource_ext_corruption $URL images/Puzzle.jpg.pagespeed.ce.91_WewrLtP.jpg
test_filter outline_javascript outlines large scripts, but not small ones.
check run_wget_with_args $URL
check egrep -q '<script.*large.*src=' $FETCHED # outlined
check egrep -q '<script.*small.*var hello' $FETCHED # not outlined
start_test compression is enabled for rewritten JS.
JS_URL=$(egrep -o http://.*[.]pagespeed.*[.]js $FETCHED)
echo "JS_URL=\$\(egrep -o http://.*[.]pagespeed.*[.]js $FETCHED\)=\"$JS_URL\""
JS_HEADERS=$($WGET -O /dev/null -q -S --header='Accept-Encoding: gzip' \
$JS_URL 2>&1)
echo JS_HEADERS=$JS_HEADERS
check_from "$JS_HEADERS" egrep -qi 'HTTP/1[.]. 200 OK'
check_from "$JS_HEADERS" fgrep -qi 'Content-Encoding: gzip'
check_from "$JS_HEADERS" fgrep -qi 'Vary: Accept-Encoding'
check_from "$JS_HEADERS" egrep -qi '(Etag: W/"0")|(Etag: W/"0-gzip")'
check_from "$JS_HEADERS" fgrep -qi 'Last-Modified:'
WGET_ARGS="" # Done with test_filter, so clear WGET_ARGS.
start_test Respect X-Forwarded-Proto when told to
FETCHED=$OUTDIR/x_forwarded_proto
URL=$SECONDARY_HOSTNAME/mod_pagespeed_example/?PageSpeedFilters=add_base_tag
HEADERS="--header=X-Forwarded-Proto:https --header=Host:xfp.example.com"
check $WGET_DUMP -O $FETCHED $HEADERS $URL
# When enabled, we respect X-Forwarded-Proto and thus list base as https.
check fgrep -q '<base href="https://' $FETCHED
# Test RetainComment directive.
test_filter remove_comments retains appropriate comments.
URL="$SECONDARY_HOSTNAME/mod_pagespeed_example/$FILE"
check run_wget_with_args $URL --header=Host:retaincomment.example.com
check grep -q retained $FETCHED # RetainComment directive
# Make sure that when in PreserveURLs mode that we don't rewrite URLs. This is
# non-exhaustive, the unit tests should cover the rest.
# Note: We block with psatest here because this is a negative test. We wouldn't
# otherwise know how many wget attempts should be made.
WGET_ARGS="--header=X-PSA-Blocking-Rewrite:psatest"
WGET_ARGS+=" --header=Host:preserveurls.example.com"
start_test PreserveURLs on prevents URL rewriting
FILE=preserveurls/on/preserveurls.html
URL=$SECONDARY_HOSTNAME/mod_pagespeed_test/$FILE
FETCHED=$OUTDIR/preserveurls.html
check run_wget_with_args $URL
WGET_ARGS=""
check_not fgrep -q .pagespeed. $FETCHED
# When PreserveURLs is off do a quick check to make sure that normal rewriting
# occurs. This is not exhaustive, the unit tests should cover the rest.
start_test PreserveURLs off causes URL rewriting
WGET_ARGS="--header=Host:preserveurls.example.com"
FILE=preserveurls/off/preserveurls.html
URL=$SECONDARY_HOSTNAME/mod_pagespeed_test/$FILE
FETCHED=$OUTDIR/preserveurls.html
# Check that style.css was inlined.
fetch_until $URL 'egrep -c big.css.pagespeed.' 1
# Check that introspection.js was inlined.
fetch_until $URL 'grep -c document\.write(\"External' 1
# Check that the image was optimized.
fetch_until $URL 'grep -c BikeCrashIcn\.png\.pagespeed\.' 1
# When Cache-Control: no-transform is in the response make sure that
# the URL is not rewritten and that the no-transform header remains
# in the resource.
start_test HonorNoTransform cache-control: no-transform
WGET_ARGS="--header=X-PSA-Blocking-Rewrite:psatest"
WGET_ARGS+=" --header=Host:notransform.example.com"
URL="$SECONDARY_HOSTNAME/mod_pagespeed_test/no_transform/image.html"
FETCHED=$OUTDIR/output
wget -O - $URL $WGET_ARGS > $FETCHED
sleep .1 # Give pagespeed time to transform the image if it's going to.
wget -O - $URL $WGET_ARGS > $FETCHED
# Make sure that the URLs in the html are not rewritten
check_not fgrep -q '.pagespeed.' $FETCHED
URL="$SECONDARY_HOSTNAME/mod_pagespeed_test/no_transform/BikeCrashIcn.png"
wget -O - -S $URL $WGET_ARGS &> $FETCHED
# Make sure that the no-transfrom header is still there
check grep -q 'Cache-Control:.*no-transform' $FETCHED
WGET_ARGS=""
start_test respect vary user-agent
WGET_ARGS=""
URL="$SECONDARY_HOSTNAME/mod_pagespeed_test/vary/index.html"
URL+="?PageSpeedFilters=inline_css"
FETCH_CMD="$WGET_DUMP --header=Host:respectvary.example.com $URL"
OUT=$($FETCH_CMD)
# We want to verify that css is not inlined, but if we just check once then
# pagespeed doesn't have long enough to be able to inline it.
sleep .1
OUT=$($FETCH_CMD)
check_not_from "$OUT" fgrep "<style>"
# If DisableRewriteOnNoTransform is turned off, verify that the rewriting
# applies even if Cache-control: no-transform is set.
start_test rewrite on Cache-control: no-transform
URL=$TEST_ROOT/disable_no_transform/index.html?PageSpeedFilters=inline_css
fetch_until -save -recursive $URL 'grep -c style' 2
WGET_ARGS=""
start_test ShardDomain directive in location block
fetch_until -save $TEST_ROOT/shard/shard.html 'grep -c \.pagespeed\.' 4
check [ $(grep -ce href=\"http://shard1 $FETCH_FILE) = 2 ];
check [ $(grep -ce href=\"http://shard2 $FETCH_FILE) = 2 ];
start_test LoadFromFile
URL=$TEST_ROOT/load_from_file/index.html?PageSpeedFilters=inline_css
fetch_until $URL 'grep -c blue' 1
# The "httponly" directory is disallowed.
fetch_until $URL 'fgrep -c web.httponly.example.css' 1
# Loading .ssp.css files from file is disallowed.
fetch_until $URL 'fgrep -c web.example.ssp.css' 1
# There's an exception "allow" rule for "exception.ssp.css" so it can be loaded
# directly from the filesystem.
fetch_until $URL 'fgrep -c file.exception.ssp.css' 1
start_test statistics load
OUT=$($WGET_DUMP $STATISTICS_URL)
check_from "$OUT" grep 'VHost-Specific Statistics'
start_test scrape stats works
# This needs to be before reload, when we clear the stats.
check test $(scrape_stat image_rewrite_total_original_bytes) -ge 10000
# Test that ngx_pagespeed keeps working after nginx gets a signal to reload the
# configuration. This is in the middle of tests so that significant work
# happens both before and after.
start_test "Reload config"
check wget $EXAMPLE_ROOT/styles/W.rewrite_css_images.css.pagespeed.cf.Hash.css \
-O /dev/null
check_simple "$NGINX_EXECUTABLE" -s reload -c "$PAGESPEED_CONF"
check wget $EXAMPLE_ROOT/styles/W.rewrite_css_images.css.pagespeed.cf.Hash.css \
-O /dev/null
start_test LoadFromFileMatch
URL=$TEST_ROOT/load_from_file_match/index.html?PageSpeedFilters=inline_css
fetch_until $URL 'grep -c blue' 1
start_test Custom headers remain on HTML, but cache should be disabled.
URL=$TEST_ROOT/rewrite_compressed_js.html
echo $WGET_DUMP $URL
HTML_HEADERS=$($WGET_DUMP $URL)
check_from "$HTML_HEADERS" egrep -q "X-Extra-Header: 1"
# The extra header should only be added once, not twice.
check_not_from "$HTML_HEADERS" egrep -q "X-Extra-Header: 1, 1"
check_from "$HTML_HEADERS" egrep -q 'Cache-Control: max-age=0, no-cache'
start_test ModifyCachingHeaders
URL=$TEST_ROOT/retain_cache_control/index.html
OUT=$($WGET_DUMP $URL)
check_from "$OUT" grep -q "Cache-Control: private, max-age=3000"
check_from "$OUT" grep -q "Last-Modified:"
start_test ModifyCachingHeaders with DownstreamCaching enabled.
URL=$TEST_ROOT/retain_cache_control_with_downstream_caching/index.html
echo $WGET_DUMP -S $URL
OUT=$($WGET_DUMP -S $URL)
check_not_from "$OUT" grep -q "Last-Modified:"
check_from "$OUT" grep -q "Cache-Control: private, max-age=3000"
test_filter combine_javascript combines 2 JS files into 1.
start_test combine_javascript with long URL still works
URL=$TEST_ROOT/combine_js_very_many.html?PageSpeedFilters=combine_javascript
fetch_until $URL 'grep -c src=' 4
start_test aris disables js combining for introspective js and only i-js
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__on/"
URL+="?PageSpeedFilters=combine_javascript"
fetch_until $URL 'grep -c src=' 2
start_test aris disables js combining only when enabled
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__off.html?"
URL+="PageSpeedFilters=combine_javascript"
fetch_until $URL 'grep -c src=' 1
test_filter inline_javascript inlines a small JS file
start_test aris disables js inlining for introspective js and only i-js
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__on/"
URL+="?PageSpeedFilters=inline_javascript"
fetch_until $URL 'grep -c src=' 1
start_test aris disables js inlining only when enabled
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__off.html"
URL+="?PageSpeedFilters=inline_javascript"
fetch_until $URL 'grep -c src=' 0
test_filter rewrite_javascript minifies JavaScript and saves bytes.
start_test aris disables js cache extention for introspective js and only i-js
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__on/"
URL+="?PageSpeedFilters=rewrite_javascript"
# first check something that should get rewritten to know we're done with
# rewriting
fetch_until -save $URL 'grep -c "src=\"../normal.js\""' 0
check [ $(grep -c "src=\"../introspection.js\"" $FETCH_FILE) = 1 ]
start_test aris disables js cache extension only when enabled
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__off.html"
URL+="?PageSpeedFilters=rewrite_javascript"
fetch_until -save $URL 'grep -c src=\"normal.js\"' 0
check [ $(grep -c src=\"introspection.js\" $FETCH_FILE) = 0 ]
# Check that no filter changes urls for introspective javascript if
# avoid_renaming_introspective_javascript is on
start_test aris disables url modification for introspective js
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__on/"
URL+="?PageSpeedFilters=testing,core"
# first check something that should get rewritten to know we're done with
# rewriting
fetch_until -save $URL 'grep -c src=\"../normal.js\"' 0
check [ $(grep -c src=\"../introspection.js\" $FETCH_FILE) = 1 ]
start_test aris disables url modification only when enabled
URL="$TEST_ROOT/avoid_renaming_introspective_javascript__off.html"
URL+="?PageSpeedFilters=testing,core"
fetch_until -save $URL 'grep -c src=\"normal.js\"' 0
check [ $(grep -c src=\"introspection.js\" $FETCH_FILE) = 0 ]
start_test HTML add_instrumentation lacks '&' and does not contain CDATA
$WGET -O $WGET_OUTPUT $TEST_ROOT/add_instrumentation.html\
?PageSpeedFilters=add_instrumentation
check [ $(grep -c "\&" $WGET_OUTPUT) = 0 ]
# In mod_pagespeed this check is that we *do* contain CDATA. That's because
# mod_pagespeed generally runs before response headers are finalized so it has
# to assume the page is xhtml because the 'Content-Type' header might just not
# have been set yet. See RewriteDriver::MimeTypeXhtmlStatus(). In
# ngx_pagespeed response headers are already final when we're processing the
# body, so we know whether we're dealing with xhtml and in this case know we
# don't need CDATA.
check [ $(grep -c '//<\!\[CDATA\[' $WGET_OUTPUT) = 0 ]
start_test XHTML add_instrumentation also lacks '&' but contains CDATA
$WGET -O $WGET_OUTPUT $TEST_ROOT/add_instrumentation.xhtml\
?PageSpeedFilters=add_instrumentation
check [ $(grep -c "\&" $WGET_OUTPUT) = 0 ]
check [ $(grep -c '//<\!\[CDATA\[' $WGET_OUTPUT) = 1 ]
start_test cache_partial_html enabled has no effect
$WGET -O $WGET_OUTPUT $TEST_ROOT/add_instrumentation.html\
?PageSpeedFilters=cache_partial_html
check [ $(grep -c '<html>' $WGET_OUTPUT) = 1 ]
check [ $(grep -c '<body>' $WGET_OUTPUT) = 1 ]
check [ $(grep -c 'pagespeed.panelLoader' $WGET_OUTPUT) = 0 ]
start_test flush_subresources rewriter is not applied
URL="$TEST_ROOT/flush_subresources.html?\
PageSpeedFilters=flush_subresources,extend_cache_css,\
extend_cache_scripts"
# Fetch once with X-PSA-Blocking-Rewrite so that the resources get rewritten and
# property cache (once it's ported to ngx_pagespeed) is updated with them.
wget -O - --header 'X-PSA-Blocking-Rewrite: psatest' $URL > $TEMPDIR/flush.$$
# Fetch again. The property cache has (would have, if it were ported) the
# subresources this time but flush_subresources rewriter is not applied. This is
# a negative test case because this rewriter does not exist in ngx_pagespeed
# yet.
check [ `wget -O - $URL | grep -o 'link rel="subresource"' | wc -l` = 0 ]
rm -f $TEMPDIR/flush.$$
WGET_ARGS=""
start_test Respect custom options on resources.
IMG_NON_CUSTOM="$EXAMPLE_ROOT/images/xPuzzle.jpg.pagespeed.ic.fakehash.jpg"
IMG_CUSTOM="$TEST_ROOT/custom_options/xPuzzle.jpg.pagespeed.ic.fakehash.jpg"
# Identical images, but in the location block for the custom_options directory
# we additionally disable core-filter convert_jpeg_to_progressive which gives a
# larger file.
fetch_until $IMG_NON_CUSTOM 'wc -c' 98276 "" -le
fetch_until $IMG_CUSTOM 'wc -c' 102902 "" -le
# Test our handling of headers when a FLUSH event occurs.
start_test PHP is enabled.
echo "This test requires php. One way to set up php is with:"
echo " php-cgi -b 127.0.0.1:9000"
# Always fetch the first file so we can check if PHP is enabled.
FILE=php_withoutflush.php
URL=$TEST_ROOT/$FILE
FETCHED=$WGET_DIR/$FILE
check $WGET_DUMP $URL -O $FETCHED
check_not grep -q '<?php' $FETCHED
start_test Headers are not destroyed by a flush event.
check [ $(grep -c '^X-Page-Speed:' $FETCHED) = 1 ]
check [ $(grep -c '^X-My-PHP-Header: without_flush' $FETCHED) = 1 ]
# mod_pagespeed doesn't clear the content length header if there aren't any
# flushes, but ngx_pagespeed does. It's possible that ngx_pagespeed should also
# avoid clearing the content length, but it doesn't and I don't think it's
# important, so don't check for content-length.
# check [ $(grep -c '^Content-Length: [0-9]' $FETCHED) = 1 ]
FILE=php_withflush.php
URL=$TEST_ROOT/$FILE
FETCHED=$WGET_DIR/$FILE
$WGET_DUMP $URL > $FETCHED
check [ $(grep -c '^X-Page-Speed:' $FETCHED) = 1 ]
check [ $(grep -c '^X-My-PHP-Header: with_flush' $FETCHED) = 1 ]
# Test fetching a pagespeed URL via Nginx running as a reverse proxy, with
# pagespeed loaded, but disabled for the proxied domain. As reported in
# Issue 582 this used to fail in mod_pagespeed with a 403 (Forbidden).
start_test Reverse proxy a pagespeed URL.
PROXY_PATH="http://modpagespeed.com/styles"
ORIGINAL="${PROXY_PATH}/yellow.css"
FILTERED="${PROXY_PATH}/A.yellow.css.pagespeed.cf.KM5K8SbHQL.css"
WGET_ARGS="--save-headers"
# We should be able to fetch the original ...
echo http_proxy=$SECONDARY_HOSTNAME $WGET --save-headers -O - $ORIGINAL
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET --save-headers -O - $ORIGINAL 2>&1)
check_from "$OUT" fgrep " 200 OK"
# ... AND the rewritten version.
echo http_proxy=$SECONDARY_HOSTNAME $WGET --save-headers -O - $FILTERED
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET --save-headers -O - $FILTERED 2>&1)
check_from "$OUT" fgrep " 200 OK"
start_test MapProxyDomain
# depends on MapProxyDomain in pagespeed_test.conf.template
URL=$EXAMPLE_ROOT/proxy_external_resource.html
echo Rewrite HTML with reference to a proxyable image.
fetch_until -save -recursive $URL?PageSpeedFilters=-inline_images \
'grep -c 1.gif.pagespeed' 1
# To make sure that we can reconstruct the proxied content by going back
# to the origin, we must avoid hitting the output cache.
# Note that cache-flushing does not affect the cache of rewritten resources;
# only input-resources and metadata. To avoid hitting that cache and force
# us to rewrite the resource from origin, we grab this resource from a
# virtual host attached to a different cache.
#
# With the proper hash, we'll get a long cache lifetime.
SECONDARY_HOST="http://mpd.example.com/gstatic_images"
PROXIED_IMAGE="$SECONDARY_HOST/$(basename $WGET_DIR/*1.gif.pagespeed*)"
WGET_ARGS="--save-headers"
start_test $PROXIED_IMAGE expecting one year cache.
http_proxy=$SECONDARY_HOSTNAME fetch_until $PROXIED_IMAGE \
"grep -c max-age=31536000" 1
# With the wrong hash, we'll get a short cache lifetime (and also no output
# cache hit.
WRONG_HASH="0"
PROXIED_IMAGE="$SECONDARY_HOST/1.gif.pagespeed.ce.$WRONG_HASH.jpg"
start_test Fetching $PROXIED_IMAGE expecting short private cache.
http_proxy=$SECONDARY_HOSTNAME fetch_until $PROXIED_IMAGE \
"grep -c max-age=300,private" 1
WGET_ARGS=""
# This is dependent upon having a /ngx_pagespeed_beacon handler.
test_filter add_instrumentation beacons load.
# Nginx won't sent a Content-Length header on a 204, and while this is correct
# per rfc 2616 wget hangs. Adding --no-http-keep-alive fixes that, as wget will.
# send 'Connection: close' in its request headers, which will make nginx
# respond with that as well. Check that we got a 204.
BEACON_URL="http%3A%2F%2Fimagebeacon.example.com%2Fmod_pagespeed_test%2F"
OUT=$(wget -q --save-headers -O - --no-http-keep-alive \
"http://$HOSTNAME/ngx_pagespeed_beacon?ets=load:13&url=$BEACON_URL")
check_from "$OUT" grep '^HTTP/1.1 204'
# The $'...' tells bash to interpret c-style escapes, \r in this case.
check_from "$OUT" grep $'^Cache-Control: max-age=0, no-cache\r$'
start_test server-side includes
fetch_until -save $TEST_ROOT/ssi/ssi.shtml?PageSpeedFilters=combine_css \
'grep -c \.pagespeed\.' 1
check [ $(grep -ce $combine_css_filename $FETCH_FILE) = 1 ];
start_test Embed image configuration in rewritten image URL.
# The apache test names these virtual hosts as embed_config_*, which is
# unfortunate as underscores are not allowed in domain names. Apache doesn't
# care, and nginx doesn't either, except when you're proxying. So you can do:
#
# GET /embed_config.html HTTP/1.1
# Host: embed_config_html.example.com
#
# and it will work fine, but if you do:
#
# GET http://embed_config_html.example.com/embed_config.html HTTP/1.1
#
# then nginx will close the connection before you can even give it a Host
# header. I've modified this test code to replace embed_config_ with
# embed-config-, but the html file on disk has the underscore versions. Let's
# make a new html file that has the hyphen version:
cat "$SERVER_ROOT/mod_pagespeed_test/embed_config.html" | \
sed s/embed_config_/embed-config-/g > \
"$SERVER_ROOT/mod_pagespeed_test/embed-config.html"
# The embedded configuration is placed between the "pagespeed" and "ic", e.g.
# *xPuzzle.jpg.pagespeed.gp+jp+pj+js+rj+rp+rw+ri+cp+md+iq=73.ic.oFXPiLYMka.jpg
# We use a regex matching "gp+jp+pj+js+rj+rp+rw+ri+cp+md+iq=73" rather than
# spelling it out to avoid test regolds when we add image filter IDs.
WGET_ARGS="--save-headers"
http_proxy=$SECONDARY_HOSTNAME fetch_until -save -recursive \
http://embed-config-html.example.com/embed-config.html \
'fgrep -c .pagespeed.' 3
# with the default rewriters in vhost embed-config-resources.example.com
# the image will be >200k. But by enabling resizing & compression 73
# as specified in the HTML domain, and transmitting that configuration via
# image URL query param, the image file (including headers) is 8341 bytes.
# We check against 10000 here so this test isn't sensitive to
# image-compression tweaks (we have enough of those elsewhere).
check_file_size "$WGET_DIR/256x192xPuz*.pagespeed.*iq=*.ic.*" -lt 10000
# The CSS file gets rewritten with embedded options, and will have an
# embedded image in it as well.
check_file_size "$WGET_DIR/*rewrite_css_images.css.pagespeed.*+ii+*+iq=*.cf.*" \
-lt 600
# The JS file is rewritten but has no related options set, so it will
# not get the embedded options between "pagespeed" and "jm".
check_file_size "$WGET_DIR/rewrite_javascript.js.pagespeed.jm.*.js" -lt 500
# Count how many bytes there are of body, skipping the initial headers
function body_size {
fname="$1"
tail -n+$(($(extract_headers $fname | wc -l) + 1)) $fname | wc -c
}
# One flaw in the above test is that it short-circuits the decoding
# of the query-params because when pagespeed responds to the recursive
# wget fetch of the image, it finds the rewritten resource in the
# cache. The two vhosts are set up with the same cache. If they
# had different caches we'd have a different problem, which is that
# the first load of the image-rewrite from the resource vhost would
# not be resized. To make sure the decoding path works, we'll
# "finish" this test below after performing a cache flush, saving
# the encoded image and expected size.
EMBED_CONFIGURATION_IMAGE="http://embed-config-resources.example.com/images/"
EMBED_CONFIGURATION_IMAGE_TAIL=$(ls $WGET_DIR | grep 256x192xPuz | grep iq=)
EMBED_CONFIGURATION_IMAGE+="$EMBED_CONFIGURATION_IMAGE_TAIL"
EMBED_CONFIGURATION_IMAGE_LENGTH=$(
body_size "$WGET_DIR/$EMBED_CONFIGURATION_IMAGE_TAIL")
# Grab the URL for the CSS file.
EMBED_CONFIGURATION_CSS_LEAF=$(ls $WGET_DIR | \
grep '\.pagespeed\..*+ii+.*+iq=.*\.cf\..*')
EMBED_CONFIGURATION_CSS_LENGTH=$(
body_size $WGET_DIR/$EMBED_CONFIGURATION_CSS_LEAF)
EMBED_CONFIGURATION_CSS_URL="http://embed-config-resources.example.com/styles"
EMBED_CONFIGURATION_CSS_URL+="/$EMBED_CONFIGURATION_CSS_LEAF"
# Grab the URL for that embedded image; it should *also* have the embedded
# configuration options in it, though wget/recursive will not have pulled
# it to a file for us (wget does not parse CSS) so we'll have to request it.
EMBED_CONFIGURATION_CSS_IMAGE=$WGET_DIR/*images.css.pagespeed.*+ii+*+iq=*.cf.*
EMBED_CONFIGURATION_CSS_IMAGE_URL=$(egrep -o \
'http://.*iq=[0-9]*\.ic\..*\.jpg' \
$EMBED_CONFIGURATION_CSS_IMAGE)
# fetch that file and make sure it has the right cache-control
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP \
$EMBED_CONFIGURATION_CSS_IMAGE_URL > "$WGET_DIR/img"
CSS_IMAGE_HEADERS=$(head -10 "$WGET_DIR/img")
check_from "$CSS_IMAGE_HEADERS" fgrep -q "Cache-Control: max-age=31536000"
EMBED_CONFIGURATION_CSS_IMAGE_LENGTH=$(body_size "$WGET_DIR/img")
function embed_image_config_post_flush() {
# Finish off the url-params-.pagespeed.-resource tests with a clear
# cache. We split the test like this to avoid having multiple
# places where we flush cache, which requires sleeps since the
# cache-flush is poll driven.
start_test Embed image/css configuration decoding with clear cache.
WGET_ARGS=""
echo Looking for $EMBED_CONFIGURATION_IMAGE expecting \
$EMBED_CONFIGURATION_IMAGE_LENGTH bytes
http_proxy=$SECONDARY_HOSTNAME fetch_until "$EMBED_CONFIGURATION_IMAGE" \
"wc -c" $EMBED_CONFIGURATION_IMAGE_LENGTH
echo Looking for $EMBED_CONFIGURATION_CSS_IMAGE_URL expecting \
$EMBED_CONFIGURATION_CSS_IMAGE_LENGTH bytes
http_proxy=$SECONDARY_HOSTNAME fetch_until \
"$EMBED_CONFIGURATION_CSS_IMAGE_URL" \
"wc -c" $EMBED_CONFIGURATION_CSS_IMAGE_LENGTH
echo Looking for $EMBED_CONFIGURATION_CSS_URL expecting \
$EMBED_CONFIGURATION_CSS_LENGTH bytes
http_proxy=$SECONDARY_HOSTNAME fetch_until \
"$EMBED_CONFIGURATION_CSS_URL" \
"wc -c" $EMBED_CONFIGURATION_CSS_LENGTH
}
on_cache_flush embed_image_config_post_flush
# Several cache flushing tests.
start_test Touching cache.flush flushes the cache.
# If we write fixed values into the css file here, there is a risk that
# we will end up seeing the 'right' value because an old process hasn't
# invalidated things yet, rather than because it updated to what we expect
# in the first run followed by what we expect in the second run.
# So, we incorporate the timestamp into RGB colors, using hours
# prefixed with 1 (as 0-123 fits the 0-255 range) to get a second value.
# A one-second precision is good enough since there is a sleep 2 below.
COLOR_SUFFIX=`date +%H,%M,%S\)`
COLOR0=rgb\($COLOR_SUFFIX
COLOR1=rgb\(1$COLOR_SUFFIX
# We test on three different cache setups:
#
# 1. A virtual host using the normal FileCachePath.
# 2. Another virtual host with a different FileCachePath.
# 3. Another virtual host with a different CacheFlushFilename.
#
# This means we need to repeat many of the steps three times.
echo "Clear out our existing state before we begin the test."
check touch "$FILE_CACHE/cache.flush"
check touch "$FILE_CACHE/othercache.flush"
check touch "$SECONDARY_CACHE/cache.flush"
sleep 1
CSS_FILE="$SERVER_ROOT/mod_pagespeed_test/update.css"
echo ".class myclass { color: $COLOR0; }" > "$CSS_FILE"
URL_PATH="mod_pagespeed_test/cache_flush_test.html"
URL="$SECONDARY_HOSTNAME/$URL_PATH"
CACHE_A="--header=Host:cache_a.example.com"
fetch_until $URL "grep -c $COLOR0" 1 $CACHE_A
CACHE_B="--header=Host:cache_b.example.com"
fetch_until $URL "grep -c $COLOR0" 1 $CACHE_B
CACHE_C="--header=Host:cache_c.example.com"
fetch_until $URL "grep -c $COLOR0" 1 $CACHE_C
# All three caches are now populated.
# Track how many flushes were noticed by pagespeed processes up till this point
# in time. Note that each process/vhost separately detects the 'flush'.
# A helper function just used here to look up the cache flush count for each
# cache.
function cache_flush_count_scraper {
CACHE_LETTER=$1 # a, b, or c
URL="$SECONDARY_HOSTNAME/ngx_pagespeed_statistics"
HOST="--header=Host:cache_${CACHE_LETTER}.example.com"
$WGET_DUMP $HOST $URL | egrep "^cache_flush_count:? " | awk '{print $2}'
}
NUM_INITIAL_FLUSHES_A=$(cache_flush_count_scraper a)
NUM_INITIAL_FLUSHES_B=$(cache_flush_count_scraper b)
NUM_INITIAL_FLUSHES_C=$(cache_flush_count_scraper c)
# Now change the file to $COLOR1.
echo ".class myclass { color: $COLOR1; }" > "$CSS_FILE"
# We expect to have a stale cache for 5 seconds, so the result should stay
# $COLOR0. This only works because we have only one worker process. If we had
# more than one then the worker process handling this request might be different
# than the one that got the previous one, and it wouldn't be in cache.
OUT="$($WGET_DUMP $CACHE_A "$URL")"
check_from "$OUT" fgrep $COLOR0
OUT="$($WGET_DUMP $CACHE_B "$URL")"
check_from "$OUT" fgrep $COLOR0
OUT="$($WGET_DUMP $CACHE_C "$URL")"
check_from "$OUT" fgrep $COLOR0
# Flush the cache by touching a special file in the cache directory. Now
# css gets re-read and we get $COLOR1 in the output. Sleep here to avoid
# a race due to 1-second granularity of file-system timestamp checks. For
# the test to pass we need to see time pass from the previous 'touch'.
#
# The three vhosts here all have CacheFlushPollIntervalSec set to 1.
sleep 2
check touch "$FILE_CACHE/cache.flush"
sleep 1
# Check that CACHE_A flushed properly.
fetch_until $URL "grep -c $COLOR1" 1 $CACHE_A
# Cache was just flushed, so it should see see exactly one flush and the other
# two should see none.
NUM_MEDIAL_FLUSHES_A=$(cache_flush_count_scraper a)
NUM_MEDIAL_FLUSHES_B=$(cache_flush_count_scraper b)
NUM_MEDIAL_FLUSHES_C=$(cache_flush_count_scraper c)
check [ $(($NUM_MEDIAL_FLUSHES_A - $NUM_INITIAL_FLUSHES_A)) -eq 1 ]
check [ $NUM_MEDIAL_FLUSHES_B -eq $NUM_INITIAL_FLUSHES_B ]
check [ $NUM_MEDIAL_FLUSHES_C -eq $NUM_INITIAL_FLUSHES_C ]
start_test Flushing one cache does not flush all caches.
# Check that CACHE_B and CACHE_C are still serving a stale version.
OUT="$($WGET_DUMP $CACHE_B "$URL")"
check_from "$OUT" fgrep $COLOR0
OUT="$($WGET_DUMP $CACHE_C "$URL")"
check_from "$OUT" fgrep $COLOR0
start_test Secondary caches also flush.
# Now flush the other two files so they can see the color change.
check touch "$FILE_CACHE/othercache.flush"
check touch "$SECONDARY_CACHE/cache.flush"
sleep 1
# Check that CACHE_B and C flushed properly.
fetch_until $URL "grep -c $COLOR1" 1 $CACHE_B
fetch_until $URL "grep -c $COLOR1" 1 $CACHE_C
# Now cache A should see no flush while caches B and C should each see a flush.
NUM_FINAL_FLUSHES_A=$(cache_flush_count_scraper a)
NUM_FINAL_FLUSHES_B=$(cache_flush_count_scraper b)
NUM_FINAL_FLUSHES_C=$(cache_flush_count_scraper c)
check [ $NUM_FINAL_FLUSHES_A -eq $NUM_MEDIAL_FLUSHES_A ]
check [ $(($NUM_FINAL_FLUSHES_B - $NUM_MEDIAL_FLUSHES_B)) -eq 1 ]
check [ $(($NUM_FINAL_FLUSHES_C - $NUM_MEDIAL_FLUSHES_C)) -eq 1 ]
# Clean up update.css from mod_pagespeed_test so it doesn't leave behind
# a stray file not under source control.
rm -f $CSS_FILE
# connection_refused.html references modpagespeed.com:1023/someimage.png.
# Pagespeed will attempt to connect to that host and port to fetch the input
# resource using serf. We expect the connection to be refused. Relies on
# "pagespeed Domain modpagespeed.com:1023" in the config. Also relies on
# running after a cache-flush to avoid bypassing the serf fetch, since pagespeed
# remembers fetch-failures in its cache for 5 minutes.
start_test Connection refused handling
# Monitor the log starting now. tail -F will catch log rotations.
FETCHER_REFUSED_PATH=$TEMPDIR/instaweb_fetcher_refused.$$
rm -f $FETCHER_REFUSED_PATH
LOG="$TEST_TMP/error.log"
echo LOG = $LOG
tail --sleep-interval=0.1 -F $LOG > $FETCHER_REFUSED_PATH &
TAIL_PID=$!
# Wait for tail to start.
echo -n "Waiting for tail to start..."
while [ ! -s $FETCHER_REFUSED_PATH ]; do
sleep 0.1
echo -n "."
done
echo "done!"
# Actually kick off the request.
echo $WGET_DUMP $TEST_ROOT/connection_refused.html
echo checking...
check $WGET_DUMP $TEST_ROOT/connection_refused.html > /dev/null
echo check done
# If we are spewing errors, this gives time to spew lots of them.
sleep 1
# Wait up to 10 seconds for the background fetch of someimage.png to fail.
if [ "$NATIVE_FETCHER" = "on" ]; then
EXPECTED="111: Connection refused"
else
EXPECTED="Serf status 111"
fi
for i in {1..100}; do
ERRS=$(grep -c "$EXPECTED" $FETCHER_REFUSED_PATH)
if [ $ERRS -ge 1 ]; then
break;
fi;
echo -n "."
sleep 0.1
done;
echo "."
# Kill the log monitor silently.
kill $TAIL_PID
wait $TAIL_PID 2> /dev/null
check [ $ERRS -ge 1 ]
# TODO(jefftk): when we support ListOutstandingUrlsOnError uncomment the below
#
## Make sure we have the URL detail we expect because ListOutstandingUrlsOnError
## is on in the config file.
#echo Check that ListOutstandingUrlsOnError works
#check grep "URL http://modpagespeed.com:1023/someimage.png active for " \
# $FETCHER_REFUSED_PATH
# http://code.google.com/p/modpagespeed/issues/detail?id=494 -- test
# that fetching a css with embedded relative images from a different
# VirtualHost, accessing the same content, and rewrite-mapped to the
# primary domain, delivers results that are cached for a year, which
# implies the hash matches when serving vs when rewriting from HTML.
#
# This rewrites the CSS, absolutifying the embedded relative image URL
# reference based on the the main server host.
WGET_ARGS=""
start_test Relative images embedded in a CSS file served from a mapped domain
DIR="mod_pagespeed_test/map_css_embedded"
URL="http://www.example.com/$DIR/issue494.html"
MAPPED_PREFIX="$DIR/A.styles.css.pagespeed.cf"
http_proxy=$SECONDARY_HOSTNAME fetch_until $URL \
"grep -c cdn.example.com/$MAPPED_PREFIX" 1
MAPPED_CSS=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $URL | \
grep -o "$MAPPED_PREFIX..*.css")
# Now fetch the resource using a different host, which is mapped to the first
# one. To get the correct bytes, matching hash, and long TTL, we need to do
# apply the domain mapping in the CSS resource fetch.
URL="http://origin.example.com/$MAPPED_CSS"
echo http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $URL
CSS_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $URL)
check_from "$CSS_OUT" fgrep -q "Cache-Control: max-age=31536000"
# Test ForbidFilters, which is set in the config for the VHost
# forbidden.example.com, where we've forbidden remove_quotes, remove_comments,
# collapse_whitespace, rewrite_css, and resize_images; we've also disabled
# inline_css so the link doesn't get inlined since we test that it still has all
# its quotes.
FORBIDDEN_TEST_ROOT=http://forbidden.example.com/mod_pagespeed_test
function test_forbid_filters() {
QUERYP="$1"
HEADER="$2"
URL="$FORBIDDEN_TEST_ROOT/forbidden.html"
OUTFILE="$TEMPDIR/test_forbid_filters.$$"
echo http_proxy=$SECONDARY_HOSTNAME $WGET $HEADER $URL$QUERYP
http_proxy=$SECONDARY_HOSTNAME $WGET -q -O $OUTFILE $HEADER $URL$QUERYP
check egrep -q '<link rel="stylesheet' $OUTFILE
check egrep -q '<!--' $OUTFILE
check egrep -q ' <li>' $OUTFILE
rm -f $OUTFILE
}
start_test ForbidFilters baseline check.
test_forbid_filters "" ""
start_test ForbidFilters query parameters check.
QUERYP="?PageSpeedFilters="
QUERYP="${QUERYP}+remove_quotes,+remove_comments,+collapse_whitespace"
test_forbid_filters $QUERYP ""
start_test "ForbidFilters request headers check."
HEADER="--header=PageSpeedFilters:"
HEADER="${HEADER}+remove_quotes,+remove_comments,+collapse_whitespace"
test_forbid_filters "" $HEADER
start_test ForbidFilters disallows direct resource rewriting.
FORBIDDEN_EXAMPLE_ROOT=http://forbidden.example.com/mod_pagespeed_example
FORBIDDEN_STYLES_ROOT=$FORBIDDEN_EXAMPLE_ROOT/styles
FORBIDDEN_IMAGES_ROOT=$FORBIDDEN_EXAMPLE_ROOT/images
# .ce. is allowed
ALLOWED="$FORBIDDEN_STYLES_ROOT/all_styles.css.pagespeed.ce.n7OstQtwiS.css"
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET -O /dev/null $ALLOWED 2>&1)
check_from "$OUT" fgrep -q "200 OK"
# .cf. is forbidden
FORBIDDEN=$FORBIDDEN_STYLES_ROOT/A.all_styles.css.pagespeed.cf.UH8L-zY4b4.css
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET -O /dev/null $FORBIDDEN 2>&1)
check_from "$OUT" fgrep -q "404 Not Found"
# The image will be optimized but NOT resized to the much smaller size,
# so it will be >200k (optimized) rather than <20k (resized).
# Use a blocking fetch to force all -allowed- rewriting to be done.
RESIZED=$FORBIDDEN_IMAGES_ROOT/256x192xPuzzle.jpg.pagespeed.ic.8AB3ykr7Of.jpg
HEADERS="$WGET_DIR/headers.$$"
http_proxy=$SECONDARY_HOSTNAME $WGET -q --server-response -O /dev/null \
--header 'X-PSA-Blocking-Rewrite: psatest' $RESIZED >& $HEADERS
LENGTH=$(grep '^ *Content-Length:' $HEADERS | sed -e 's/.*://')
check test -n "$LENGTH"
check test $LENGTH -gt 200000
CCONTROL=$(grep '^ *Cache-Control:' $HEADERS | sed -e 's/.*://')
check_from "$CCONTROL" grep -w max-age=300
check_from "$CCONTROL" grep -w private
start_test Blocking rewrite enabled.
# We assume that blocking_rewrite_test_dont_reuse_1.jpg will not be
# rewritten on the first request since it takes significantly more time to
# rewrite than the rewrite deadline and it is not already accessed by
# another request earlier.
BLOCKING_REWRITE_URL="$TEST_ROOT/blocking_rewrite.html"
BLOCKING_REWRITE_URL+="?PageSpeedFilters=rewrite_images"
OUTFILE=$WGET_DIR/blocking_rewrite.out.html
OLDSTATS=$WGET_DIR/blocking_rewrite_stats.old
NEWSTATS=$WGET_DIR/blocking_rewrite_stats.new
$WGET_DUMP $STATISTICS_URL > $OLDSTATS
check $WGET_DUMP --header 'X-PSA-Blocking-Rewrite: psatest'\
$BLOCKING_REWRITE_URL -O $OUTFILE
$WGET_DUMP $STATISTICS_URL > $NEWSTATS
check_stat $OLDSTATS $NEWSTATS image_rewrites 1
check_stat $OLDSTATS $NEWSTATS cache_hits 0
check_stat $OLDSTATS $NEWSTATS cache_misses 2
check_stat $OLDSTATS $NEWSTATS cache_inserts 3
# TODO(sligocki): There is no stat num_rewrites_executed. Fix.
#check_stat $OLDSTATS $NEWSTATS num_rewrites_executed 1
start_test Blocking rewrite enabled using wrong key.
URL="blocking.example.com/mod_pagespeed_test/blocking_rewrite_another.html"
OUTFILE=$WGET_DIR/blocking_rewrite.out.html
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP \
--header 'X-PSA-Blocking-Rewrite: junk' \
$URL > $OUTFILE
check [ $(grep -c "[.]pagespeed[.]" $OUTFILE) -lt 1 ]
http_proxy=$SECONDARY_HOSTNAME fetch_until $URL \
'grep -c [.]pagespeed[.]' 1
run_post_cache_flush
# Test ForbidAllDisabledFilters, which is set in the config for
# /mod_pagespeed_test/forbid_all_disabled/disabled/ where we've disabled
# remove_quotes, remove_comments, and collapse_whitespace. We fetch 3 times
# trying to circumvent the forbidden flag: a normal fetch, a fetch using a query
# parameter to try to enable the forbidden filters, and a fetch using a request
# header to try to enable the forbidden filters.
function test_forbid_all_disabled() {
QUERYP="$1"
HEADER="$2"
if [ -n "$QUERYP" ]; then
INLINE_CSS=",-inline_css"
else
INLINE_CSS="?PageSpeedFilters=-inline_css"
fi
WGET_ARGS="--header=X-PSA-Blocking-Rewrite:psatest"
URL=$TEST_ROOT/forbid_all_disabled/disabled/forbidden.html
OUTFILE="$TEMPDIR/test_forbid_all_disabled.$$"
# Fetch testing that forbidden filters stay disabled.
echo $WGET $HEADER $URL$QUERYP$INLINE_CSS
$WGET $WGET_ARGS -q -O $OUTFILE $HEADER $URL$QUERYP$INLINE_CSS
check egrep -q '<link rel="stylesheet' $OUTFILE
check egrep -q '<!--' $OUTFILE
check egrep -q ' <li>' $OUTFILE
# Fetch testing that enabling inline_css works.
echo $WGET $HEADER $URL
$WGET $WGET_ARGS -q -O $OUTFILE $HEADER $URL
check egrep -q '<style>.yellow' $OUTFILE
rm -f $OUTFILE
WGET_ARGS=""
}
start_test ForbidAllDisabledFilters baseline check.
test_forbid_all_disabled "" ""
start_test ForbidAllDisabledFilters query parameters check.
QUERYP="?PageSpeedFilters="
QUERYP="${QUERYP}+remove_quotes,+remove_comments,+collapse_whitespace"
test_forbid_all_disabled $QUERYP ""
start_test ForbidAllDisabledFilters request headers check.
HEADER="--header=PageSpeedFilters:"
HEADER="${HEADER}+remove_quotes,+remove_comments,+collapse_whitespace"
test_forbid_all_disabled "" $HEADER
# Test that we work fine with an explicitly configured SHM metadata cache.
start_test Using SHM metadata cache
HOST_NAME="http://shmcache.example.com"
URL="$HOST_NAME/mod_pagespeed_example/rewrite_images.html"
http_proxy=$SECONDARY_HOSTNAME fetch_until $URL 'grep -c .pagespeed.ic' 2
# Fetch a test resource repeatedly from the target host and verify that the
# statistics change as expected.
#
# $1: hostname
# $2: 1 if an lru cache was explcitly configured for this vhost, 0 otherwise
# $3: 1 if a shared memory metadata cache was, 0 otherwise
function test_cache_stats {
TEST_NAME=$1
LRU_CONFIGURED=$2
SHM_CONFIGURED=$3
SHARED_MEMORY_METADATA=$SHM_CONFIGURED
# There's a global shared memory cache enabled by default, which means our
# testing vhosts will use it even if one wasn't explicitly configured.
SHARED_MEMORY_METADATA=1
TEST_NAME+="-defaultshm"
FILECACHE_DATA=1
FILECACHE_METADATA=1
MEMCACHED_DATA=0
MEMCACHED_METADATA=0
TEST_NAME+="-fc"
LRUCACHE_DATA=$LRU_CONFIGURED
LRUCACHE_METADATA=$LRU_CONFIGURED
if [ $SHARED_MEMORY_METADATA -eq 1 ]; then
# If both an LRU cache and an SHM cache are configured, we only use the SHM
# cache for metadata.
LRUCACHE_METADATA=0
fi
if [ $SHM_CONFIGURED -eq 1 ]; then
# When the shared memory cache is explicitly configured we don't write
# metadata through to the file cache.
FILECACHE_METADATA=0
fi
# For hits we have to know which cache is L1 and which is L2. The shm and lru
# caches are always L1 if they're present, but if they're not the file or memc
# cache is effectively L1.
FILECACHE_DATA_L1=0
MEMCACHED_DATA_L1=0
FILECACHE_METADATA_L1=0
MEMCACHED_METADATA_L1=0
if [ $LRUCACHE_DATA -eq 0 ]; then
# No L1 data cache, so the memcache or filecache will serve data reads.
FILECACHE_DATA_L1=$FILECACHE_DATA
MEMCACHED_DATA_L1=$MEMCACHED_DATA
fi
if [ $SHARED_MEMORY_METADATA -eq 0 -a $LRUCACHE_METADATA -eq 0 ]; then
# No L1 metadata cache, so the memcache or filecache will serve meta reads.
FILECACHE_METADATA_L1=$FILECACHE_METADATA
MEMCACHED_METADATA_L1=$MEMCACHED_METADATA
fi
start_test "Cache configuration $TEST_NAME"
# We don't want this to be in cache on repeat runs.
CACHEBUSTER="$RANDOM$RANDOM"
IMAGE_PATH="http://$1.example.com/mod_pagespeed_example/styles/"
IMAGE_PATH+="A.blue.css,qcb=$CACHEBUSTER.pagespeed.cf.0.css"
GLOBAL_STATISTICS="ngx_pagespeed_global_statistics?PageSpeed=off"
GLOBAL_STATISTICS_URL="http://$1.example.com/$GLOBAL_STATISTICS"
OUTDIR_CSTH="$OUTDIR/$1"
mkdir -p "$OUTDIR_CSTH"
STATS_A="$OUTDIR_CSTH/$GLOBAL_STATISTICS"
STATS_B="$OUTDIR_CSTH/$GLOBAL_STATISTICS.1"
STATS_C="$OUTDIR_CSTH/$GLOBAL_STATISTICS.2"
# Curl has much deeper debugging output, but we don't want to add a dependency
# Use it if it exists, otherwise fall back to wget.
#
# These will be pipelined and served all from the same persistent connection
# to one process. This is needed to test the per-process LRU cache.
#
# TODO(jefftk): The ipv4 restriction is because on one test system I was
# consistently seeing one instead of two data cache inserts on first load when
# using ipv6.
if type $CURL &> /dev/null ; then
echo "Using curl."
set -x
http_proxy=$SECONDARY_HOSTNAME $CURL -4 -v \
-o "$STATS_A" $GLOBAL_STATISTICS_URL \
-o /dev/null $IMAGE_PATH \
-o "$STATS_B" $GLOBAL_STATISTICS_URL \
-o /dev/null $IMAGE_PATH \
-o "$STATS_C" $GLOBAL_STATISTICS_URL
set +x
else
echo "Using wget."
set -x
http_proxy=$SECONDARY_HOSTNAME $WGET \
--header='Connection: Keep-Alive' \
--directory=$OUTDIR_CSTH \
--prefer-family=IPv4 \
$GLOBAL_STATISTICS_URL \
$IMAGE_PATH \
$GLOBAL_STATISTICS_URL \
$IMAGE_PATH \
$GLOBAL_STATISTICS_URL
set +x
fi
check [ -e $STATS_A ]
check [ -e $STATS_B ]
check [ -e $STATS_C ]
echo " shm meta: $SHARED_MEMORY_METADATA"
echo " lru data: $LRUCACHE_DATA"
echo " lru meta: $LRUCACHE_METADATA"
echo " file data: $FILECACHE_DATA"
echo " file data is L1: $FILECACHE_DATA_L1"
echo " file meta: $FILECACHE_METADATA"
echo " file meta is L1: $FILECACHE_METADATA_L1"
echo " memc data: $MEMCACHED_DATA"
echo " memc data is L1: $MEMCACHED_DATA_L1"
echo " memc meta: $MEMCACHED_METADATA"
echo " memc meta is L1: $MEMCACHED_METADATA_L1"
# There should be no deletes from any cache.
ALL_CACHES="shm_cache lru_cache file_cache memcached"
for cachename in $ALL_CACHES; do
check_stat "$STATS_A" "$STATS_B" "${cachename}_deletes" 0
check_stat "$STATS_B" "$STATS_C" "${cachename}_deletes" 0
done
# We should miss in all caches on the first try, and insert when we miss:
# requests:
# - output resource: miss
# - metadata entry: miss
# - input resource: miss
# inserts:
# - input resource
# - output resource under correct hash
# - metadata entry
for cachename in $ALL_CACHES; do
check_stat "$STATS_A" "$STATS_B" "${cachename}_hits" 0
done
# Two misses for data, one for meta.
check_stat "$STATS_A" "$STATS_B" "shm_cache_misses" $SHARED_MEMORY_METADATA
check_stat "$STATS_A" "$STATS_B" "lru_cache_misses" \
$(($LRUCACHE_METADATA + 2*$LRUCACHE_DATA))
check_stat "$STATS_A" "$STATS_B" "file_cache_misses" \
$(($FILECACHE_METADATA + 2*$FILECACHE_DATA))
check_stat "$STATS_A" "$STATS_B" "memcached_misses" \
$(($MEMCACHED_METADATA + 2*$MEMCACHED_DATA))
# Two inserts for data, one for meta.
check_stat "$STATS_A" "$STATS_B" "shm_cache_inserts" $SHARED_MEMORY_METADATA
check_stat "$STATS_A" "$STATS_B" "lru_cache_inserts" \
$(($LRUCACHE_METADATA + 2*$LRUCACHE_DATA))
check_stat "$STATS_A" "$STATS_B" "file_cache_inserts" \
$(($FILECACHE_METADATA + 2*$FILECACHE_DATA))
check_stat "$STATS_A" "$STATS_B" "memcached_inserts" \
$(($MEMCACHED_METADATA + 2*$MEMCACHED_DATA))
# Second try. We're requesting with a hash mismatch so the output resource
# will always miss.
# requests:
# - output resource: miss
# - metadata entry: hit
# - output resource under correct hash: hit
for cachename in $ALL_CACHES; do
check_stat "$STATS_B" "$STATS_C" "${cachename}_inserts" 0
done
# One hit for data, one hit for meta.
check_stat "$STATS_B" "$STATS_C" "shm_cache_hits" $SHARED_MEMORY_METADATA
check_stat "$STATS_B" "$STATS_C" "lru_cache_hits" \
$(($LRUCACHE_METADATA + $LRUCACHE_DATA))
check_stat "$STATS_B" "$STATS_C" "file_cache_hits" \
$(($FILECACHE_METADATA_L1 + $FILECACHE_DATA_L1))
check_stat "$STATS_B" "$STATS_C" "memcached_hits" \
$(($MEMCACHED_METADATA_L1 + $MEMCACHED_DATA_L1))
# One miss for data, none for meta.
check_stat "$STATS_B" "$STATS_C" "shm_cache_misses" 0
check_stat "$STATS_B" "$STATS_C" "lru_cache_misses" $LRUCACHE_DATA
check_stat "$STATS_B" "$STATS_C" "file_cache_misses" $FILECACHE_DATA
check_stat "$STATS_B" "$STATS_C" "memcached_misses" $MEMCACHED_DATA
}
test_cache_stats lrud-lrum 1 0 # lru=yes, shm=no
test_cache_stats lrud-shmm 1 1 # lru=yes, shm=yes
test_cache_stats noned-shmm 0 1 # lru=no, shm=yes
test_cache_stats noned-nonem 0 0 # lru=no, shm=no
# Test max_cacheable_response_content_length. There are two Javascript files
# in the html file. The smaller Javascript file should be rewritten while
# the larger one shouldn't.
start_test Maximum length of cacheable response content.
HOST_NAME="http://max-cacheable-content-length.example.com"
DIR_NAME="mod_pagespeed_test/max_cacheable_content_length"
HTML_NAME="test_max_cacheable_content_length.html"
URL=$HOST_NAME/$DIR_NAME/$HTML_NAME
RESPONSE_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header \
'X-PSA-Blocking-Rewrite: psatest' $URL)
check_from "$RESPONSE_OUT" fgrep -qi small.js.pagespeed.
check_not_from "$RESPONSE_OUT" fgrep -qi large.js.pagespeed.
# This test checks that the PageSpeedXHeaderValue directive works.
start_test PageSpeedXHeaderValue directive
RESPONSE_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP \
http://xheader.example.com/mod_pagespeed_example)
check_from "$RESPONSE_OUT" fgrep -q "X-Page-Speed: UNSPECIFIED VERSION"
# This test checks that the DomainRewriteHyperlinks directive
# can turn off. See mod_pagespeed_test/rewrite_domains.html: it has
# one <img> URL, one <form> URL, and one <a> url, all referencing
# src.example.com. Only the <img> url should be rewritten.
start_test RewriteHyperlinks off directive
HOST_NAME="http://domain-hyperlinks-off.example.com"
RESPONSE_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP \
$HOST_NAME/mod_pagespeed_test/rewrite_domains.html)
MATCHES=$(echo "$RESPONSE_OUT" | fgrep -c http://dst.example.com)
check [ $MATCHES -eq 1 ]
# This test checks that the DomainRewriteHyperlinks directive
# can turn on. See mod_pagespeed_test/rewrite_domains.html: it has
# one <img> URL, one <form> URL, and one <a> url, all referencing
# src.example.com. They should all be rewritten to dst.example.com.
start_test RewriteHyperlinks on directive
HOST_NAME="http://domain-hyperlinks-on.example.com"
RESPONSE_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP \
$HOST_NAME/mod_pagespeed_test/rewrite_domains.html)
MATCHES=$(echo "$RESPONSE_OUT" | fgrep -c http://dst.example.com)
check [ $MATCHES -eq 3 ]
# Test to make sure dynamically defined url-valued attributes are rewritten by
# rewrite_domains. See mod_pagespeed_test/rewrite_domains.html: in addition
# to having one <img> URL, one <form> URL, and one <a> url it also has one
# <span src=...> URL, one <hr imgsrc=...> URL, and one <hr src=...> URL, all
# referencing src.example.com. The first three should be rewritten because of
# hardcoded rules, the span.src and hr.imgsrc should be rewritten because of
# UrlValuedAttribute directives, and the hr.src should be left
# unmodified. The rewritten ones should all be rewritten to dst.example.com.
HOST_NAME="http://url-attribute.example.com"
TEST="$HOST_NAME/mod_pagespeed_test"
REWRITE_DOMAINS="$TEST/rewrite_domains.html"
UVA_EXTEND_CACHE="$TEST/url_valued_attribute_extend_cache.html"
UVA_EXTEND_CACHE+="?PageSpeedFilters=core,+left_trim_urls"
start_test Rewrite domains in dynamically defined url-valued attributes.
RESPONSE_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $REWRITE_DOMAINS)
MATCHES=$(echo "$RESPONSE_OUT" | fgrep -c http://dst.example.com)
check [ $MATCHES -eq 5 ]
MATCHES=$(echo "$RESPONSE_OUT" | \
fgrep -c '<hr src=http://src.example.com/hr-image>')
check [ $MATCHES -eq 1 ]
start_test Additional url-valued attributes are fully respected.
function count_exact_matches() {
# Needed because "fgrep -c" counts lines with matches, not pure matches.
fgrep -o "$1" | wc -l
}
# There are nine resources that should be optimized
http_proxy=$SECONDARY_HOSTNAME \
fetch_until $UVA_EXTEND_CACHE 'count_exact_matches .pagespeed.' 9
# Make sure <custom d=...> isn't modified at all, but that everything else is
# recognized as a url and rewritten from ../foo to /foo. This means that only
# one reference to ../mod_pagespeed should remain, <custom d=...>.
http_proxy=$SECONDARY_HOSTNAME \
fetch_until $UVA_EXTEND_CACHE 'grep -c d=.[.][.]/mod_pa' 1
http_proxy=$SECONDARY_HOSTNAME \
fetch_until $UVA_EXTEND_CACHE 'fgrep -c ../mod_pa' 1
# There are nine images that should be optimized.
http_proxy=$SECONDARY_HOSTNAME \
fetch_until $UVA_EXTEND_CACHE 'count_exact_matches .pagespeed.ic' 9
# Test the experiment framework (Furious).
start_test PageSpeedExperiment cookie is set.
EXP_EXAMPLE="http://experiment.example.com/mod_pagespeed_example"
EXP_EXTEND_CACHE="$EXP_EXAMPLE/extend_cache.html"
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $EXP_EXTEND_CACHE)
check_from "$OUT" fgrep "PageSpeedExperiment="
MATCHES=$(echo "$OUT" | grep -c "PageSpeedExperiment=")
check [ $MATCHES -eq 1 ]
start_test PageSpeedFilters query param should disable experiments.
URL="$EXP_EXTEND_CACHE?PageSpeed=on&PageSpeedFilters=rewrite_css"
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $URL)
check_not_from "$OUT" fgrep 'PageSpeedExperiment='
start_test If the user is already assigned, no need to assign them again.
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=2' \
$EXP_EXTEND_CACHE)
check_not_from "$OUT" fgrep 'PageSpeedExperiment='
start_test The beacon should include the experiment id.
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=2' \
$EXP_EXTEND_CACHE)
BEACON_CODE="pagespeed.addInstrumentationInit('/ngx_pagespeed_beacon', 'load',"
BEACON_CODE+=" '&exptid=2', 'http://experiment.example.com/"
BEACON_CODE+="mod_pagespeed_example/extend_cache.html');"
check_from "$OUT" grep "$BEACON_CODE"
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=7' \
$EXP_EXTEND_CACHE)
BEACON_CODE="pagespeed.addInstrumentationInit('/ngx_pagespeed_beacon', 'load',"
BEACON_CODE+=" '&exptid=7', 'http://experiment.example.com/"
BEACON_CODE+="mod_pagespeed_example/extend_cache.html');"
check_from "$OUT" grep "$BEACON_CODE"
start_test The no-experiment group beacon should not include an experiment id.
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=0' \
$EXP_EXTEND_CACHE)
check_not_from "$OUT" grep 'pagespeed_beacon.*exptid'
# We expect id=7 to be index=a and id=2 to be index=b because that's the
# order they're defined in the config file.
start_test Resource urls are rewritten to include experiment indexes.
http_proxy=$SECONDARY_HOSTNAME \
WGET_ARGS="--header Cookie:PageSpeedExperiment=7" fetch_until $EXP_EXTEND_CACHE \
"fgrep -c .pagespeed.a.ic." 1
http_proxy=$SECONDARY_HOSTNAME \
WGET_ARGS="--header Cookie:PageSpeedExperiment=2" fetch_until $EXP_EXTEND_CACHE \
"fgrep -c .pagespeed.b.ic." 1
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=7' \
$EXP_EXTEND_CACHE)
check_from "$OUT" fgrep ".pagespeed.a.ic."
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=2' \
$EXP_EXTEND_CACHE)
check_from "$OUT" fgrep ".pagespeed.b.ic."
start_test Images are different when the url specifies different experiments.
# While the images are the same, image B should be smaller because in the config
# file we enable convert_jpeg_to_progressive only for id=2 (side B). Ideally we
# would check that it was actually progressive, by checking whether "identify
# -verbose filename" produced "Interlace: JPEG" or "Interlace: None", but that
# would introduce a dependency on imagemagick. This is just as accurate, but
# more brittle (because changes to our compression code would change the
# computed file sizes).
IMG_A="$EXP_EXAMPLE/images/xPuzzle.jpg.pagespeed.a.ic.fakehash.jpg"
IMG_B="$EXP_EXAMPLE/images/xPuzzle.jpg.pagespeed.b.ic.fakehash.jpg"
http_proxy=$SECONDARY_HOSTNAME fetch_until $IMG_A 'wc -c' 102902 "" -le
http_proxy=$SECONDARY_HOSTNAME fetch_until $IMG_B 'wc -c' 98276 "" -le
start_test Analytics javascript is added for the experimental group.
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=2' \
$EXP_EXTEND_CACHE)
check_from "$OUT" fgrep -q 'Experiment: 2'
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=7' \
$EXP_EXTEND_CACHE)
check_from "$OUT" fgrep -q 'Experiment: 7'
start_test Analytics javascript is not added for the no-experiment group.
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=0' \
$EXP_EXTEND_CACHE)
check_not_from "$OUT" fgrep -q 'Experiment:'
start_test Analytics javascript is not added for any group with Analytics off.
EXP_NO_GA_EXTEND_CACHE="http://experiment.noga.example.com"
EXP_NO_GA_EXTEND_CACHE+="/mod_pagespeed_example/extend_cache.html"
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=2' \
$EXP_NO_GA_EXTEND_CACHE)
check_not_from "$OUT" fgrep -q 'Experiment:'
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=7' \
$EXP_NO_GA_EXTEND_CACHE)
check_not_from "$OUT" fgrep -q 'Experiment:'
OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP --header='Cookie: PageSpeedExperiment=0' \
$EXP_NO_GA_EXTEND_CACHE)
check_not_from "$OUT" fgrep -q 'Experiment:'
# check_failures_and_exit will actually call exit, but we don't want it to.
# Specifically we want it to call exit 3 instad of exit 1 if it finds
# something. Reimplement it here:
#
# TODO(jefftk): change this in mod_pagespeed and push it out, then remove this
# modified copy.
function check_failures_and_exit() {
if [ -e $FAILURES ] ; then
echo Failing Tests:
sed 's/^/ /' $FAILURES
echo "FAIL."
exit 3
fi
echo "PASS."
exit 0
}
start_test Make sure nostore on a subdirectory is retained
URL=$TEST_ROOT/nostore/nostore.html
HTML_HEADERS=$($WGET_DUMP $URL)
check_from "$HTML_HEADERS" egrep -q \
'Cache-Control: max-age=0, no-cache, no-store'
start_test Custom headers remain on resources, but cache should be 1 year.
URL="$TEST_ROOT/compressed/hello_js.custom_ext.pagespeed.ce.HdziXmtLIV.txt"
echo $WGET_DUMP $URL
RESOURCE_HEADERS=$($WGET_DUMP $URL)
check_from "$RESOURCE_HEADERS" egrep -q 'X-Extra-Header: 1'
# The extra header should only be added once, not twice.
check_not_from "$RESOURCE_HEADERS" egrep -q 'X-Extra-Header: 1, 1'
check [ "$(echo "$RESOURCE_HEADERS" | grep -c '^X-Extra-Header: 1')" = 1 ]
check_from "$RESOURCE_HEADERS" egrep -q 'Cache-Control: max-age=31536000'
# Test critical CSS beacon injection, beacon return, and computation. This
# requires UseBeaconResultsInFilters() to be true in rewrite_driver_factory.
# NOTE: must occur after cache flush on a repeat run. All repeat runs now
# run the cache flush test.
test_filter prioritize_critical_css
fetch_until -save $URL 'fgrep -c pagespeed.criticalCssBeaconInit' 1
check [ $(fgrep -o ".very_large_class_name_" $FETCH_FILE | wc -l) -eq 36 ]
CALL_PAT=".*criticalCssBeaconInit("
SKIP_ARG="[^,]*,"
CAPTURE_ARG="'\([^']*\)'.*"
BEACON_PATH=$(sed -n "s/${CALL_PAT}${CAPTURE_ARG}/\1/p" $FETCH_FILE)
ESCAPED_URL=$(sed -n "s/${CALL_PAT}${SKIP_ARG}${CAPTURE_ARG}/\1/p" $FETCH_FILE)
OPTIONS_HASH=$( \
sed -n "s/${CALL_PAT}${SKIP_ARG}${SKIP_ARG}${CAPTURE_ARG}/\1/p" $FETCH_FILE)
NONCE=$( \
sed -n "s/${CALL_PAT}${SKIP_ARG}${SKIP_ARG}${SKIP_ARG}${CAPTURE_ARG}/\1/p" \
$FETCH_FILE)
BEACON_URL="http://${HOSTNAME}${BEACON_PATH}?url=${ESCAPED_URL}"
BEACON_DATA="oh=${OPTIONS_HASH}&n=${NONCE}&cs=.big,.blue,.bold,.foo"
# See the comments about 204 responses and --no-http-keep-alive above.
OUT=$(wget -q --save-headers -O - --no-http-keep-alive \
--post-data "$BEACON_DATA" "$BEACON_URL")
check_from "$OUT" grep '^HTTP/1.1 204'
# Now make sure we see the correct critical css rules.
fetch_until $URL \
'grep -c <style>[.]blue{[^}]*}</style>' 1
fetch_until $URL \
'grep -c <style>[.]big{[^}]*}</style>' 1
fetch_until $URL \
'grep -c <style>[.]blue{[^}]*}[.]bold{[^}]*}</style>' 1
fetch_until -save $URL \
'grep -c <style>[.]foo{[^}]*}</style>' 1
# The last one should also have the other 3, too.
check [ `grep -c '<style>[.]blue{[^}]*}</style>' $FETCH_UNTIL_OUTFILE` = 1 ]
check [ `grep -c '<style>[.]big{[^}]*}</style>' $FETCH_UNTIL_OUTFILE` = 1 ]
check [ `grep -c '<style>[.]blue{[^}]*}[.]bold{[^}]*}</style>' \
$FETCH_UNTIL_OUTFILE` = 1 ]
# This test checks that the ClientDomainRewrite directive can turn on.
start_test ClientDomainRewrite on directive
HOST_NAME="http://client-domain-rewrite.example.com"
RESPONSE_OUT=$(http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP \
$HOST_NAME/mod_pagespeed_test/rewrite_domains.html)
MATCHES=$(echo "$RESPONSE_OUT" | grep -c pagespeed\.clientDomainRewriterInit)
check [ $MATCHES -eq 1 ]
# Verify rendered image dimensions test.
WGET_ARGS=""
start_test resize_rendered_image_dimensions with critical images beacon
HOST_NAME="http://renderedimagebeacon.example.com"
URL="$HOST_NAME/mod_pagespeed_test/image_rewriting/image_resize_using_rendered_dimensions.html"
http_proxy=$SECONDARY_HOSTNAME\
fetch_until -save -recursive $URL 'fgrep -c "pagespeed_url_hash"' 1 \
'--header=X-PSA-Blocking-Rewrite:psatest'
check [ $(grep -c "^pagespeed\.criticalImagesBeaconInit" \
$WGET_DIR/image_resize_using_rendered_dimensions.html) = 1 ];
OPTIONS_HASH=$(awk -F\' '/^pagespeed\.criticalImagesBeaconInit/ {print $(NF-3)}' \
$WGET_DIR/image_resize_using_rendered_dimensions.html)
NONCE=$(awk -F\' '/^pagespeed\.criticalImagesBeaconInit/ {print $(NF-1)}' \
$WGET_DIR/image_resize_using_rendered_dimensions.html)
# Send a beacon response using POST indicating that OptPuzzle.jpg is
# critical and has rendered dimensions.
BEACON_URL="$HOST_NAME/ngx_pagespeed_beacon"
BEACON_URL+="?url=http%3A%2F%2Frenderedimagebeacon.example.com%2Fmod_pagespeed_test%2F"
BEACON_URL+="image_rewriting%2Fimage_resize_using_rendered_dimensions.html"
BEACON_DATA="oh=$OPTIONS_HASH&n=$NONCE&ci=1344500982&rd=%7B%221344500982%22%3A%7B%22renderedWidth%22%3A150%2C%22renderedHeight%22%3A100%2C%22originalWidth%22%3A256%2C%22originalHeight%22%3A192%7D%7D"
OUT=$(env http_proxy=$SECONDARY_HOSTNAME \
$WGET_DUMP --post-data "$BEACON_DATA" "$BEACON_URL")
check_from "$OUT" egrep -q "HTTP/1[.]. 204"
http_proxy=$SECONDARY_HOSTNAME \
fetch_until -save -recursive $URL \
'fgrep -c 150x100xOptPuzzle.jpg.pagespeed.ic.' 1
# Verify that we can send a critical image beacon and that lazyload_images
# does not try to lazyload the critical images.
WGET_ARGS=""
start_test lazyload_images,rewrite_images with critical images beacon
HOST_NAME="http://imagebeacon.example.com"
URL="$HOST_NAME/mod_pagespeed_test/image_rewriting/rewrite_images.html"
# There are 3 images on rewrite_images.html. Check that they are all
# lazyloaded by default.
http_proxy=$SECONDARY_HOSTNAME\
fetch_until -save -recursive $URL 'fgrep -c pagespeed_lazy_src=' 3
check [ $(grep -c "^pagespeed\.criticalImagesBeaconInit" \
$WGET_DIR/rewrite_images.html) = 1 ];
# We need the options hash and nonce to send a critical image beacon, so extract
# it from injected beacon JS.
OPTIONS_HASH=$(awk -F\' '/^pagespeed\.criticalImagesBeaconInit/ {print $(NF-3)}' \
$WGET_DIR/rewrite_images.html)
NONCE=$(awk -F\' '/^pagespeed\.criticalImagesBeaconInit/ {print $(NF-1)}' \
$WGET_DIR/rewrite_images.html)
OPTIONS_HASH=$(grep "^pagespeed\.criticalImagesBeaconInit" \
$WGET_DIR/rewrite_images.html | awk -F\' '{print $(NF-3)}')
# Send a beacon response using POST indicating that Puzzle.jpg is a critical
# image.
BEACON_URL="$HOST_NAME/ngx_pagespeed_beacon"
BEACON_URL+="?url=http%3A%2F%2Fimagebeacon.example.com%2Fmod_pagespeed_test%2F"
BEACON_URL+="image_rewriting%2Frewrite_images.html"
BEACON_DATA="oh=$OPTIONS_HASH&n=$NONCE&ci=2932493096"
# See the comments about 204 responses and --no-http-keep-alive above.
OUT=$(env http_proxy=$SECONDARY_HOSTNAME \
wget -q --save-headers -O - --no-http-keep-alive \
--post-data "$BEACON_DATA" "$BEACON_URL")
echo $OUT
check_from "$OUT" egrep -q "HTTP/1[.]. 204"
# Now only 2 of the images should be lazyloaded, Cuppa.png should not be.
http_proxy=$SECONDARY_HOSTNAME \
fetch_until -save -recursive $URL 'fgrep -c pagespeed_lazy_src=' 2
# Now test sending a beacon with a GET request, instead of POST. Indicate that
# Puzzle.jpg and Cuppa.png are the critical images. In practice we expect only
# POSTs to be used by the critical image beacon, but both code paths are
# supported. We add query params to URL to ensure that we get an instrumented
# page without blocking.
URL="$URL?id=4"
http_proxy=$SECONDARY_HOSTNAME\
fetch_until -save -recursive $URL 'fgrep -c pagespeed_lazy_src=' 3
check [ $(grep -c "^pagespeed\.criticalImagesBeaconInit" \
"$WGET_DIR/rewrite_images.html?id=4") = 1 ];
OPTIONS_HASH=$(awk -F\' '/^pagespeed\.criticalImagesBeaconInit/ {print $(NF-3)}' \
"$WGET_DIR/rewrite_images.html?id=4")
NONCE=$(awk -F\' '/^pagespeed\.criticalImagesBeaconInit/ {print $(NF-1)}' \
"$WGET_DIR/rewrite_images.html?id=4")
BEACON_URL="$HOST_NAME/ngx_pagespeed_beacon"
BEACON_URL+="?url=http%3A%2F%2Fimagebeacon.example.com%2Fmod_pagespeed_test%2F"
BEACON_URL+="image_rewriting%2Frewrite_images.html%3Fid%3D4"
BEACON_DATA="oh=$OPTIONS_HASH&n=$NONCE&ci=2932493096"
# Add the hash for Cuppa.png to BEACON_DATA, which will be used as the query
# params for the GET.
BEACON_DATA+=",2644480723"
OUT=$(env http_proxy=$SECONDARY_HOSTNAME \
$WGET_DUMP "$BEACON_URL&$BEACON_DATA")
check_from "$OUT" egrep -q "HTTP/1[.]. 204"
# Now only BikeCrashIcn.png should be lazyloaded.
http_proxy=$SECONDARY_HOSTNAME \
fetch_until -save -recursive $URL 'fgrep -c pagespeed_lazy_src=' 1
start_test keepalive with html rewriting
keepalive_test "keepalive-html.example.com"\
"/mod_pagespeed_example/rewrite_images.html" ""
start_test keepalive with serving resources
keepalive_test "keepalive-resource.example.com"\
"/mod_pagespeed_example/combine_javascript2.js+combine_javascript1.js+combine_javascript2.js.pagespeed.jc.0.js"\
""
BEACON_URL="http%3A%2F%2Fimagebeacon.example.com%2Fmod_pagespeed_test%2F"
start_test keepalive with beacon get requests
keepalive_test "keepalive-beacon-get.example.com"\
"/ngx_pagespeed_beacon?ets=load:13&url=$BEACON_URL" ""
BEACON_DATA="url=http%3A%2F%2Fimagebeacon.example.com%2Fmod_pagespeed_test%2F"
BEACON_DATA+="image_rewriting%2Frewrite_images.html"
BEACON_DATA+="&oh=$OPTIONS_HASH&ci=2932493096"
start_test keepalive with beacon post requests
keepalive_test "keepalive-beacon-post.example.com" "/ngx_pagespeed_beacon"\
"$BEACON_DATA"
start_test keepalive with static resources
keepalive_test "keepalive-static.example.com"\
"/ngx_pagespeed_static/js_defer.0.js" ""
# Test for MaxCombinedCssBytes. The html used in the test, 'combine_css.html',
# has 4 CSS files in the following order.
# yellow.css : 36 bytes
# blue.css : 21 bytes
# big.css : 4307 bytes
# bold.css : 31 bytes
# Because the threshold was chosen as '57', only the first two CSS files
# are combined.
test_filter combine_css Maximum size of combined CSS.
QUERY_PARAM="PageSpeedMaxCombinedCssBytes=57"
URL="$URL?$QUERY_PARAM"
# Make sure that we have got the last CSS file and it is not combined.
fetch_until -save $URL 'grep -c styles/bold.css\"' 1
# Now check that the 1st and 2nd CSS files are combined, but the 3rd
# one is not.
check [ $(grep -c 'styles/yellow.css+blue.css.pagespeed.' \
$FETCH_UNTIL_OUTFILE) = 1 ]
check [ $(grep -c 'styles/big.css\"' $FETCH_UNTIL_OUTFILE) = 1 ]
# Test to make sure we have a sane Connection Header. See
# https://code.google.com/p/modpagespeed/issues/detail?id=664
#
# Note that this bug is dependent on seeing a resource for the first time in the
# InPlaceResourceOptimization path, because in that flow we are caching the
# response-headers from the server. The reponse-headers from Serf never seem to
# include the Connection header. So we have to pick a JS file that is not
# otherwise used after cache is flushed in this block.
WGET_ARGS=""
start_test Sane Connection header
URL="$TEST_ROOT/normal.js"
fetch_until -save $URL 'grep -c W/\"PSA-aj-' 1 --save-headers
CONNECTION=$(extract_headers $FETCH_UNTIL_OUTFILE | fgrep "Connection:")
check_not_from "$CONNECTION" fgrep -qi "Keep-Alive, Keep-Alive"
check_from "$CONNECTION" fgrep -qi "Keep-Alive"
test_filter ngx_pagespeed_static defer js served with correct headers.
# First, determine which hash js_defer is served with. We need a correct hash
# to get it served up with an Etag, which is one of the things we want to test.
URL="$HOSTNAME/mod_pagespeed_example/defer_javascript.html?PageSpeed=on&PageSpeedFilters=defer_javascript"
OUT=$($WGET_DUMP $URL)
HASH=$(echo $OUT \
| grep --only-matching "/js_defer\\.*\([^.]\)*.js" | cut -d '.' -f 2)
JS_URL="$HOSTNAME/ngx_pagespeed_static/js_defer.$HASH.js"
JS_HEADERS=$($WGET -O /dev/null -q -S --header='Accept-Encoding: gzip' \
$JS_URL 2>&1)
check_from "$JS_HEADERS" egrep -qi 'HTTP/1[.]. 200 OK'
check_from "$JS_HEADERS" fgrep -qi 'Content-Encoding: gzip'
check_from "$JS_HEADERS" fgrep -qi 'Vary: Accept-Encoding'
# Nginx's gzip module clears etags, which we don't want. Make sure we have it.
check_from "$JS_HEADERS" egrep -qi 'Etag: W/"0"'
check_from "$JS_HEADERS" fgrep -qi 'Last-Modified:'
start_test PageSpeedFilters response headers is interpreted
URL=$SECONDARY_HOSTNAME/mod_pagespeed_example/
OUT=$($WGET_DUMP --header=Host:response-header-filters.example.com $URL)
check_from "$OUT" egrep -qi 'addInstrumentationInit'
OUT=$($WGET_DUMP --header=Host:response-header-disable.example.com $URL)
check_not_from "$OUT" egrep -qi 'addInstrumentationInit'
start_test IPRO flow uses cache as expected.
# TODO(sligocki): Use separate VHost instead to separate stats.
STATS=$OUTDIR/blocking_rewrite_stats
IPRO_ROOT=http://ipro.example.com/mod_pagespeed_test/ipro
URL=$IPRO_ROOT/test_image_dont_reuse2.png
IPRO_STATS_URL=http://ipro.example.com/ngx_pagespeed_statistics?PageSpeed=off
# Initial stats.
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.0
# First IPRO request.
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP $URL -O /dev/null
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.1
# Resource not in cache the first time.
check_stat $STATS.0 $STATS.1 cache_hits 0
check_stat $STATS.0 $STATS.1 cache_misses 1
check_stat $STATS.0 $STATS.1 ipro_served 0
check_stat $STATS.0 $STATS.1 ipro_not_rewritable 0
# So we run the ipro recorder flow and insert it into the cache.
check_stat $STATS.0 $STATS.1 ipro_not_in_cache 1
check_stat $STATS.0 $STATS.1 ipro_recorder_resources 1
check_stat $STATS.0 $STATS.1 ipro_recorder_inserted_into_cache 1
# Image doesn't get rewritten the first time.
# TODO(sligocki): This should change to 1 when we get image rewrites started
# in the Apache output filter flow.
check_stat $STATS.0 $STATS.1 image_rewrites 0
# Second IPRO request.
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP $URL -O /dev/null
# Wait for image rewrite to finish.
sleep 1
# TODO(sligocki): Replace sleep with some sort of reasonable check.
# Unfortunately bash has thwarted my every effort to compose a reaonable
# check. Both the below checks do not run:
#fetch_until $IPRO_STATS_URL \
# 'grep image_ongoing_rewrites | egrep -o "[0-9]"' 0
#fetch_until $IPRO_STATS_URL \
# "sed -ne 's/^.*image_ongoing_rewrites: *\([0-9]*\).*$/\1/p'" 0
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.2
# Resource is found in cache the second time.
check_stat $STATS.1 $STATS.2 cache_hits 1
check_stat $STATS.1 $STATS.2 ipro_served 1
check_stat $STATS.1 $STATS.2 ipro_not_rewritable 0
# So we don't run the ipro recorder flow.
check_stat $STATS.1 $STATS.2 ipro_not_in_cache 0
check_stat $STATS.1 $STATS.2 ipro_recorder_resources 0
# Image gets rewritten on the second pass through this filter.
# TODO(sligocki): This should change to 0 when we get image rewrites started
# in the Apache output filter flow.
check_stat $STATS.1 $STATS.2 image_rewrites 1
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP $URL -O /dev/null
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.3
check_stat $STATS.2 $STATS.3 cache_hits 1
check_stat $STATS.2 $STATS.3 ipro_served 1
check_stat $STATS.2 $STATS.3 ipro_recorder_resources 0
check_stat $STATS.2 $STATS.3 image_rewrites 0
start_test "IPRO flow doesn't copy uncacheable resources multiple times."
URL=$IPRO_ROOT/nocache/test_image_dont_reuse.png
# Initial stats.
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.0
# First IPRO request.
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP $URL -O /dev/null
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.1
# Resource not in cache the first time.
check_stat $STATS.0 $STATS.1 cache_hits 0
check_stat $STATS.0 $STATS.1 cache_misses 1
check_stat $STATS.0 $STATS.1 ipro_served 0
check_stat $STATS.0 $STATS.1 ipro_not_rewritable 0
# So we run the ipro recorder flow, but the resource is not cacheable.
check_stat $STATS.0 $STATS.1 ipro_not_in_cache 1
check_stat $STATS.0 $STATS.1 ipro_recorder_resources 1
check_stat $STATS.0 $STATS.1 ipro_recorder_not_cacheable 1
# Uncacheable, so no rewrites.
check_stat $STATS.0 $STATS.1 image_rewrites 0
check_stat $STATS.0 $STATS.1 image_ongoing_rewrites 0
# Second IPRO request.
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP $URL -O /dev/null
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.2
check_stat $STATS.1 $STATS.2 cache_hits 0
# Note: This should load a RecentFetchFailed record from cache, but that
# is reported as a cache miss.
check_stat $STATS.1 $STATS.2 cache_misses 1
check_stat $STATS.1 $STATS.2 ipro_served 0
check_stat $STATS.1 $STATS.2 ipro_not_rewritable 1
# Important: We do not record this resource the second and third time
# because we remember that it was not cacheable.
check_stat $STATS.1 $STATS.2 ipro_not_in_cache 0
check_stat $STATS.1 $STATS.2 ipro_recorder_resources 0
check_stat $STATS.1 $STATS.2 image_rewrites 0
check_stat $STATS.1 $STATS.2 image_ongoing_rewrites 0
http_proxy=$SECONDARY_HOSTNAME check $WGET_DUMP $URL -O /dev/null
http_proxy=$SECONDARY_HOSTNAME $WGET_DUMP $IPRO_STATS_URL > $STATS.3
# Same as second fetch.
check_stat $STATS.2 $STATS.3 cache_hits 0
check_stat $STATS.2 $STATS.3 cache_misses 1
check_stat $STATS.2 $STATS.3 ipro_not_rewritable 1
check_stat $STATS.2 $STATS.3 ipro_recorder_resources 0
check_stat $STATS.2 $STATS.3 image_rewrites 0
check_stat $STATS.2 $STATS.3 image_ongoing_rewrites 0
start_test IPRO-optimized resources should have fixed size, not chunked.
URL="$EXAMPLE_ROOT/images/Puzzle.jpg"
URL+="?PageSpeedJpegRecompressionQuality=75"
fetch_until -save $URL "wc -c" 90000 "--save-headers" "-lt"
check_from "$(extract_headers $FETCH_UNTIL_OUTFILE)" fgrep -q 'Content-Length:'
CONTENT_LENGTH=$(extract_headers $FETCH_UNTIL_OUTFILE | \
awk '/Content-Length:/ {print $2}')
check [ "$CONTENT_LENGTH" -lt 90000 ];
check_not_from "$(extract_headers $FETCH_UNTIL_OUTFILE)" \
fgrep -q 'Transfer-Encoding: chunked'
# Test handling of large HTML files. We first test with a cold cache, and verify
# that we bail out of parsing and insert a script redirecting to
# ?PageSpeed=off. This should also insert an entry into the property cache so
# that the next time we fetch the file it will not be parsed at all.
start_test Handling of large files.
# Add a timestamp to the URL to ensure it's not in the property cache.
FILE="max_html_parse_size/large_file.html?value=$(date +%s)"
URL=$TEST_ROOT/$FILE
# Enable a filter that will modify something on this page, since we testing that
# this page should not be rewritten.
WGET_ARGS="--header=PageSpeedFilters:rewrite_images"
WGET_EC="$WGET_DUMP $WGET_ARGS"
echo $WGET_EC $URL
LARGE_OUT=$($WGET_EC $URL)
check_from "$LARGE_OUT" grep -q window.location=".*&ModPagespeed=off"
# The file should now be in the property cache so make sure that the page is no
# longer parsed. Use fetch_until because we need to wait for a potentially
# non-blocking write to the property cache from the previous test to finish
# before this will succeed.
fetch_until -save $URL 'grep -c window.location=".*&ModPagespeed=off"' 0
check_not fgrep -q pagespeed.ic $FETCH_FILE
start_test messages load
OUT=$($WGET_DUMP "$HOSTNAME/ngx_pagespeed_message")
check_not_from "$OUT" grep "Writing to ngx_pagespeed_message failed."
check_from "$OUT" grep -q "/mod_pagespeed_example"
start_test Check keepalive after a 304 responses.
# '-m 2' specifies that the whole operation is allowed to take 2 seconds max.
curl -vv -m 2 http://$PRIMARY_HOSTNAME/foo.css.pagespeed.ce.0.css \
-H 'If-Modified-Since: Z' http://$PRIMARY_HOSTNAME/foo
check [ $? = "0" ]
start_test Date response header set
OUT=$($WGET_DUMP $EXAMPLE_ROOT/combine_css.html)
check_not_from "$OUT" egrep -q '^Date: Thu, 01 Jan 1970 00:00:00 GMT'
OUT=$($WGET_DUMP --header=Host:date.example.com \
http://$SECONDARY_HOSTNAME/mod_pagespeed_example/combine_css.html)
check_from "$OUT" egrep -q '^Date: Fri, 16 Oct 2009 23:05:07 GMT'
if $USE_VALGRIND; then
kill -s quit $VALGRIND_PID
wait
# Clear the previously set trap, we don't need it anymore.
trap - EXIT
start_test No Valgrind complaints.
check_not [ -s "$TEST_TMP/valgrind.log" ]
fi
check_failures_and_exit
| true
|
f6111e2b5b63cd03b83a14a113e6cbbfe651be8d
|
Shell
|
cy20lin/crouton_backup
|
/crouton/bin/unmount-chroot
|
UTF-8
| 18,675
| 4.375
| 4
|
[] |
no_license
|
#!/bin/sh -e
# Copyright (c) 2016 The crouton Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
APPLICATION="${0##*/}"
ALLCHROOTS=''
BINDIR="`dirname "\`readlink -f -- "$0"\`"`"
CHROOTS="`readlink -m -- "$BINDIR/../chroots"`"
EXCLUDEROOT=''
FORCE=''
PRINT=''
SIGNAL='TERM'
TRIES=5
YES=''
ROOT="`readlink -m -- '/var/run/crouton'`"
USAGE="$APPLICATION [options] name [...]
Unmounts one or more chroots, optionally killing any processes still running
inside them.
By default, it will run in interactive mode where it will ask to kill any
remaining processes if unable to unmount the chroot within 5 seconds.
Options:
-a Unmount all chroots in the CHROOTS directory.
-c CHROOTS Directory the chroots are in. Default: $CHROOTS
-f Forces a chroot to unmount, potentially breaking or killing
other instances of the same chroot.
-k KILL Send the processes SIGKILL instead of SIGTERM.
-p Print to STDOUT the processes stopping a chroot from unmounting.
-t TRIES Number of seconds to try before signalling the processes.
Use -t inf to be exceedingly patient. Default: $TRIES
-x Keep the root directory of the chroot mounted.
-y Signal any remaining processes without confirmation.
Automatically escalates from SIGTERM to SIGKILL."
# Common functions
# Exits the script with exit code $1, spitting out message $@ to stderr
error() {
local ecode="$1"
shift
if [ "$ecode" -eq 1 ]; then
echo_color "r" "$*" 1>&2
else
echo "$*" 1>&2
fi
exit "$ecode"
}
# Setup trap ($1) in case of interrupt or error.
# Traps explicitly do not exit on command error.
# Traps are first disabled to avoid executing clean-up commands twice.
# In the case of interrupts, exit is called to avoid the script continuing.
# $1 must either be empty or end in a semicolon.
settrap() {
trap "set +e; trap - INT HUP TERM 0; $1 exit 2" INT HUP TERM
trap "set +e; trap - INT HUP TERM 0; $1" 0
}
# Prepend a command to the existing $TRAP
addtrap() {
OLDTRAP="$TRAP"
TRAP="$1;$TRAP"
settrap "$TRAP"
}
# Revert the last trap change
undotrap() {
TRAP="$OLDTRAP"
settrap "$TRAP"
}
# Works mostly like built-in getopts but silently coalesces positional arguments.
# Does not take parameters. Set getopts_string prior to calling.
# Sets getopts_var and getopts_arg.
# $@ will be left with the positional arguments, so you should NOT shift at all.
# In bash, enables alias expansion, but that shouldn't impact anything.
shopt -q -s expand_aliases 2>/dev/null || true
# Running count of the number of positional arguments
# We're done processing if all of the remaining arguments are positional.
getopts_npos=0
getopts_dashdash=''
alias getopts_nextarg='getopts_ret=1
while [ "$#" -gt "$getopts_npos" ]; do
if [ -z "$getopts_dashdash" ] && getopts "$getopts_string" getopts_var; then
if [ "$(($#+1-OPTIND))" -lt "$getopts_npos" ]; then
# Bad parameter usage ate a positional argument.
# Generate the proper error message by abusing getopts.
set -- "-$getopts_var"
getopts "$getopts_var:" getopts_var
shift
fi
getopts_arg="$OPTARG"
getopts_ret=0
# Avoid -- confusion by shifting if OPTARG is set
if [ -n "$OPTARG" ]; then
shift "$((OPTIND-1))"
OPTIND=1
fi
break
fi
# Do not let getopts consume a --
if [ "$OPTIND" -gt 1 ]; then
shift "$((OPTIND-2))"
if [ "$1" != "--" ]; then
shift
fi
fi
OPTIND=1
if [ -z "$getopts_dashdash" -a "$1" = "--" ]; then
# Still need to loop through to fix the ordering
getopts_dashdash=y
else
set -- "$@" "$1"
getopts_npos="$((getopts_npos+1))"
fi
shift
done
[ "$getopts_ret" = 0 ]'
# Compares $RELEASE to the specified releases, assuming $DISTRODIR/releases is
# sorted oldest to newest. Every two parameters are considered criteria that are
# ORed together. The first parameter is the comparator, as provided to "test".
# The second parameter is the release to compare to. A comparison against a
# release from a different distro always fails. Since either $DISTRODIR/releases
# has to be readable or the release list has to be embedded, and RELEASE has to
# be set properly, this function should only be used in the context of targets.
# Returns non-zero if the release doesn't match
# Example: release -ge quantal -ge wheezy
release() {
if [ "$(($# % 2))" -ne 0 ]; then
error 3 "$(echo_color "y" "invalid parameters to release(): $*")"
fi
# Load up the list of releases; this will be replaced with a literal list
local releases="`cat "$DISTRODIR/releases" 2>/dev/null`"
if [ -z "$releases" ]; then
error 3 "$(echo_color "y" "list of releases for $DISTRO not found")"
fi
# End-of-word regex for awk
local eow='([^a-z]|$)'
local relnum="`echo "$releases" | awk "/^$RELEASE$eow/ {print NR; exit}"`"
if [ -z "$relnum" ]; then
error 3 "$(echo_color "y" "$RELEASE not found in $DISTRO")"
fi
while [ "$#" -ge 2 ]; do
local cmp="`echo "$releases" | awk "/^$2$eow/ {print NR; exit}"`"
if [ -n "$cmp" ] && test "$relnum" "$1" "$cmp"; then
return 0
fi
shift 2
done
return 1
}
# Large writes to slow devices (e.g. SD card or USB stick) can cause a task to
# be stuck for longer than 120 seconds, which triggers a kernel panic (and an
# immediate reboot). Instead of disabling the timeout altogether, we just make
# sure the kernel does not panic (this is the default configuration of a vanilla
# kernel). See crbug.com/260955 for details.
disablehungtask() {
echo 0 > /proc/sys/kernel/hung_task_panic
}
# Run an awk program, without buffering its output.
# unbuffered_awk 'program' [argument ...]
# In the awk code, all "print" calls must be replaced by "output"
#
# - Detects whether to run mawk or gawk (mawk is preferred as it is faster),
# - Prepends the definition of the "output" function to the awk program
# - Run the modified awk program.
# mawk needs '-W interactive' to flush the output properly (fflush does not
# work as documented, but apparently this behaviour is intentional, see
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=593504).
# Furthermore, fflush is not POSIX so we cannot use any other awk flavor.
unbuffered_awk() {
local cmd
if hash mawk 2>/dev/null; then
cmd="mawk -W interactive"
elif hash gawk 2>/dev/null; then
cmd="gawk"
else
echo "Cannot find mawk or gawk." 1>&2
return 1
fi
local program="$1"
shift
$cmd '
function output(s) {
print s
fflush()
}
'"$program" "$@"
}
# Validate a chroot name: It cannot contain any /, and cannot be ".", ".." nor
# an empty string.
validate_name() {
if [ "${1%/*}" != "$1" -o "$1" = "." -o "$1" = ".." -o -z "$1" ]; then
return 1
fi
return 0
}
# Returns the mountpoint a path is on. The path doesn't need to exist.
# $1: the path to check
# outputs on stdout
getmountpoint() {
mp="`readlink -m -- "$1"`"
while ! stat -c '%m' "${mp:-/}" 2>/dev/null; do
mp="${mp%/*}"
done
}
# Echos to stderr, or /dev/tty if stdin is a tty but stderr is not
echo_tty() {
if [ -t 0 -a ! -t 2 ]; then
echo "$@" 1>/dev/tty
else
echo "$@" 1>&2
fi
}
# Outputs colored text to stdout
# usage: echo_color [l][color] [colored string] [uncolored string]
# Specifying "t" (thick) uses bold text.
# Color can be red, green, yellow, blue, magenta, cyan.
# Color can be specified with just the first letter.
# example: echo_color "tr" "bold red" "normal text"
echo_color() {
# If not outputting to a tty print no colors.
if [ ! -t 2 ]; then
shift
echo "$@"
return
fi
printf "\033["
local c="$1"
if [ "${c#t}" != "$c" ]; then
printf "1;"
c="${c#t}"
else
printf "0;"
fi
case "$c" in
r*) printf "31m";;
g*) printf "32m";;
y*) printf "33m";;
b*) printf "34m";;
m*) printf "35m";;
c*) printf "36m";;
*) printf "37m";;
esac
shift
local s='\n'
if [ "$1" = '-n' ]; then
s=''
shift
fi
echo '-n' "$1"
printf "\033[0m"
shift
echo '-n' "$*"
printf "$s"
}
# Websocket interface
PIPEDIR='/tmp/crouton-ext'
CRIATDISPLAY="$PIPEDIR/kiwi-display"
CROUTONLOCKDIR='/tmp/crouton-lock'
# Write a command to croutonwebsocket, and read back response
websocketcommand() {
# Check that $PIPEDIR and the FIFO pipes exist
if ! [ -d "$PIPEDIR" -a -p "$PIPEDIR/in" -a -p "$PIPEDIR/out" ]; then
echo "EError $PIPEDIR/in or $PIPEDIR/out are not pipes."
exit 0
fi
if ! timeout 3 \
sh -c "flock 5; cat > '$PIPEDIR/in';
cat '$PIPEDIR/out'" 5>"$PIPEDIR/lock"; then
echo "EError timeout"
fi
chmod -Rf g+rwX "$PIPEDIR" || true
chgrp -Rf crouton "$PIPEDIR" || true
}
# Process arguments
getopts_string='ac:fkpt:xy'
while getopts_nextarg; do
case "$getopts_var" in
a) ALLCHROOTS='y';;
c) CHROOTS="`readlink -m -- "$getopts_arg"`";;
f) FORCE='y';;
k) SIGNAL="KILL";;
p) PRINT='y';;
t) TRIES="$getopts_arg";;
x) EXCLUDEROOT='y';;
y) YES='a';;
\?) error 2 "$USAGE";;
esac
done
# Need at least one chroot listed, or -a; not both.
if [ $# = 0 -a -z "$ALLCHROOTS" ] || [ $# != 0 -a -n "$ALLCHROOTS" ]; then
error 2 "$USAGE"
fi
# Make sure TRIES is valid
if [ "$TRIES" = inf ]; then
TRIES=-1
elif [ "$TRIES" -lt -1 ]; then
error 2 "$USAGE"
fi
# We need to run as root
if [ "$USER" != root -a "$UID" != 0 ]; then
error 2 "$APPLICATION must be run as root."
fi
# Check if a chroot is running with this directory. We detect the
# appropriate commands by checking if the command's parent root is not equal
# to the pid's root. This avoids not unmounting due to a lazy-quitting
# background application within the chroot. We also don't consider processes
# that have a parent PID of 1 or that of session_manager's (which would mean an
# orphaned process in this case), as enter-chroot never orphans its children.
# $1: $base; the canonicalized base path of the chroot
# Returns: non-zero if the chroot is in use.
checkusage() {
if [ -n "$FORCE" ]; then
return 0
fi
local b="${1%/}/" pid ppid proot prootdir root rootdir pids=''
local smgrpid="`pgrep -o -u 0 -x session_manager || echo 1`"
for root in /proc/*/root; do
if [ ! -r "$root" ]; then
continue
fi
rootdir="`readlink -f -- "$root"`"
rootdir="${rootdir%/}/"
if [ "${rootdir#"$b"}" = "$rootdir" ]; then
continue
fi
pid="${root#/proc/}"
pid="${pid%/root}"
ppid="`ps -p "$pid" -o ppid= 2>/dev/null | sed 's/ //g'`"
if [ -z "$ppid" ] || [ "$ppid" -eq 1 -o "$ppid" -eq "$smgrpid" ]; then
continue
fi
proot="/proc/$ppid/root"
if [ -r "$proot" ]; then
prootdir="`readlink -f -- "$proot"`"
if [ "${prootdir%/}/" = "$rootdir" ]; then
continue
fi
fi
if [ -n "$PRINT" ]; then
pids="$pids $pid"
continue
fi
return 1
done
if [ -n "$PRINT" -a -n "$pids" ]; then
ps -p "${pids# }" -o pid= -o cmd= || true
return 1
fi
return 0
}
# If we specified all chroots, bring in all chroots.
if [ -n "$ALLCHROOTS" ]; then
if [ ! -d "$CHROOTS" ]; then
error 1 "$CHROOTS not found."
fi
set -- "$CHROOTS/"*
fi
# Follows and fixes dangerous symlinks, returning the canonicalized path.
fixabslinks() {
local p="$CHROOT/$1" c
# Follow and fix dangerous absolute symlinks
while c="`readlink -m -- "$p"`" && [ "$c" != "$p" ]; do
p="$CHROOT${c#"$CHROOT"}"
done
echo "$p"
}
# Unmount the specified chroot $1
# sets oldstyle if the chroot was unmounted in an old location.
# if oldstyle is set upon entry, skips the check for old-style mounts.
unmount() {
NAME="${1#"$CHROOTS/"}"
# Check for existence
CHROOT="$CHROOTS/$NAME"
if [ ! -d "$CHROOT" ]; then
if [ -z "$ALLCHROOTS" ]; then
echo "$CHROOT not found." 1>&2
ret=1
fi
return 0
fi
# Switch to the true mount point, but sort of support old-style mounted
# chroots with minimal false-positives to ease transition. Don't unmount
# old-style twice in a row, though.
CHROOTSRC="$CHROOT"
oldencr="$CHROOTS/.secure/$NAME"
if mountpoint -q "$oldencr" \
&& [ -d "$oldencr/etc/crouton" -a -z "$oldstyle" ]; then
# Old encrypted chroots
oldstyle='y'
CHROOT="$oldencr"
echo "$CHROOTSRC appears to be mounted in $CHROOT" 1>&2
elif mountpoint -q "$CHROOT" \
&& [ -d "$CHROOT/etc/crouton" -a -z "$oldstyle" ]; then
# Keep the chroot the same
oldstyle='y'
echo "$CHROOTSRC appears to be mounted in place" 1>&2
else
oldstyle=''
CHROOT="$ROOT/${CHROOT#/}"
if [ ! -d "$CHROOT" ]; then
# Not mounted
return 0
fi
fi
base="`readlink -f -- "$CHROOT"`"
if ! checkusage "$base"; then
echo "Not unmounting $CHROOTSRC as another instance is using it." 1>&2
ret=1
return 0
fi
# Kill the chroot's system dbus if one is running; failure is fine
env -i chroot "$CHROOT" su -s '/bin/sh' -c '
pidfile="/var/run/dbus/pid"
if [ ! -f "$pidfile" ]; then
exit 0
fi
pid="`cat "$pidfile"`"
if ! grep -q "^dbus-daemon" "/proc/$pid/cmdline" 2>/dev/null; then
exit 0
fi
kill $pid' - root 2>/dev/null || true
# Unmount all mounts
ntries=0
if [ -z "$EXCLUDEROOT" ]; then
echo "Unmounting $CHROOTSRC..." 1>&2
else
echo "Pruning $CHROOTSRC mounts..." 1>&2
fi
baseesc="`echo "$base" | sed 's= =//=g'`"
# Define the mountpoint filter to only unmount specific mounts.
# The filter is run on the escaped version of the mountpoint.
filter() {
if [ -z "$EXCLUDEROOT" ]; then
grep "^$baseesc\\(/.*\\)\\?\$"
else
# Don't include the base directory
grep "^$baseesc/."
fi
}
# Sync for safety
sync
# Make sure the chroot's system media bind-mount is marked as slave to avoid
# unmounting devices system-wide. We still want to unmount locally-mounted
# media, though.
media="`fixabslinks '/var/host/media'`"
if mountpoint -q "$media"; then
mount --make-rslave "$media"
fi
# Some /proc/mounts entries may end with \040(deleted), in that case, try to
# umount them with and without the suffix (in the unlikely case the mount
# point actually ends with ' (deleted)')
# umount has a bug and may return 0 when many mount points cannot be
# unmounted, so we call it once per mount point ('-n 1')
while ! sed "s=\\\\040=//=g" /proc/mounts | cut -d' ' -f2 | filter \
| sed -e 's=//= =g;s/^\(\(.*\) (deleted)\)$/\1\n\2/' \
| sort -r | xargs --no-run-if-empty -d '
' -n 1 umount 2>/dev/null; do
if [ "$ntries" -eq "$TRIES" ]; then
# Send signal to all processes running under the chroot
# ...but confirm first.
printonly=''
if [ "${YES#[Aa]}" = "$YES" ]; then
echo -n "Failed to unmount $CHROOTSRC. Kill processes? [a/k/y/p/N] " 1>&2
if [ -n "$CROUTON_UNMOUNT_RESPONSE" ]; then
YES="$CROUTON_UNMOUNT_RESPONSE"
echo "$YES" 1>&2
else
read -r YES
fi
if [ "${YES#[Kk]}" != "$YES" ]; then
SIGNAL='KILL'
elif [ "${YES#[Pp]}" != "$YES" ]; then
printonly=y
elif [ "${YES#[AaYy]}" = "$YES" ]; then
echo "Skipping unmounting of $CHROOTSRC" 1>&2
ret=1
break
fi
fi
if [ -z "$printonly" ]; then
echo "Sending SIG$SIGNAL to processes under $CHROOTSRC..." 1>&2
fi
for root in /proc/*/root; do
if [ ! -r "$root" ] \
|| [ ! "`readlink -f -- "$root"`" = "$base" ]; then
continue
fi
pid="${root#/proc/}"
pid="${pid%/root}"
if [ -n "${printonly:-"$PRINT"}" ]; then
ps -p "$pid" -o pid= -o cmd= || true
fi
if [ -z "$printonly" ]; then
kill "-$SIGNAL" "$pid" 2>/dev/null || true
fi
done
# Escalate
if [ "${YES#[Aa]}" != "$YES" ]; then
SIGNAL='KILL'
fi
if [ -z "$printonly" ]; then
ntries=0
fi
else
ntries="$((ntries+1))"
fi
sleep 1
if ! checkusage "$base"; then
echo "Aborting unmounting $CHROOTSRC as another instance has begun using it." 1>&2
ret=1
break
fi
done
# More sync for more safety
sync
}
# Unmount each chroot
ret=0
for NAME in "$@"; do
if [ -z "$NAME" ]; then
continue
fi
oldstyle=''
unmount "$NAME"
# If we unmounted old-style, do it again in case the new-style was also mounted.
if [ -n "$oldstyle" ]; then
unmount "$NAME"
fi
done
# HACK: restart debugd when running tests to avoid namespace issues.
# This will go away when we start using mount namespaces.
if [ -n "$CROUTON_UNMOUNT_RESTART_DEBUGD" ]; then
restart debugd >/dev/null || true
fi
# Re-disable USB persistence (the Chromium OS default) if we no longer
# have chroots running with a root in removable media
if checkusage "$ROOT/media"; then
for usbp in /sys/bus/usb/devices/*/power/persist; do
if [ -e "$usbp" ]; then
echo 0 > "$usbp"
fi
done
fi
exit $ret
| true
|
54aab35206f24c726b73c660836e451a47d2cde9
|
Shell
|
aeiouaoeiuv/wifi_rootfs_XiaoMi
|
/usr/sbin/guest_ssid_network
|
UTF-8
| 2,855
| 3.375
| 3
|
[] |
no_license
|
#! /bin/sh
# use this script to prepare firewall and dhcp for guest wifi
usage () {
echo "$0 start <ipaddr> <netmask>"
echo "$0 stop"
exit 1
}
start_guest() {
local ip="$1"
local mask="$2"
uci -q batch <<EOF
set network.eth0_3=switch_vlan
set network.eth0_3.device=eth0
set network.eth0_3.vlan=3
set network.eth0_3.ports=5
set network.guest=interface
set network.guest.type=bridge
set network.guest.ifname=eth0.3
set network.guest.proto=static
set network.guest.ipaddr=$ip
set network.guest.netmask=$mask
set dhcp.guest=dhcp
set dhcp.guest.interface=guest
set dhcp.guest.leasetime=12h
set dhcp.guest.force=1
set dhcp.guest.dhcp_option_force='43,XIAOMI_ROUTER'
set dhcp.guest.start=100
set dhcp.guest.limit=50
set firewall.guest_zone=zone
set firewall.guest_zone.name=guest
set firewall.guest_zone.network=guest
set firewall.guest_zone.input=REJECT
set firewall.guest_zone.forward=REJECT
set firewall.guest_zone.output=ACCEPT
set firewall.guest_forward=forwarding
set firewall.guest_forward.src=guest
set firewall.guest_forward.dest=wan
set firewall.guest_dns=rule
set firewall.guest_dns.name='Allow Guest DNS Queries'
set firewall.guest_dns.src=guest
set firewall.guest_dns.dest_port=53
set firewall.guest_dns.proto=tcpudp
set firewall.guest_dns.target=ACCEPT
set firewall.guest_dhcp=rule
set firewall.guest_dhcp.name='Allow Guest DHCP request'
set firewall.guest_dhcp.src=guest
set firewall.guest_dhcp.src_port='67-68'
set firewall.guest_dhcp.dest_port='67-68'
set firewall.guest_dhcp.proto='udp'
set firewall.guest_dhcp.target=ACCEPT
set firewall.guest_8999=rule
set firewall.guest_8999.name='Hello wifi 8999'
set firewall.guest_8999.src=guest
set firewall.guest_8999.proto=tcp
set firewall.guest_8999.dest_port=8999
set firewall.guest_8999.target=ACCEPT
set firewall.guest_8300=rule
set firewall.guest_8300.name='Hello wifi 8300'
set firewall.guest_8300.src=guest
set firewall.guest_8300.proto=tcp
set firewall.guest_8300.dest_port=8300
set firewall.guest_8300.target=ACCEPT
set firewall.guest_7080=rule
set firewall.guest_7080.name='Hello wifi 7080'
set firewall.guest_7080.src=guest
set firewall.guest_7080.proto=tcp
set firewall.guest_7080.dest_port=7080
set firewall.guest_7080.target=ACCEPT
commit
EOF
/etc/init.d/network restart
/etc/init.d/firewall restart
/etc/init.d/dnsmasq restart
/etc/init.d/trafficd restart
}
stop_guest() {
uci -q batch <<EOF
delete network.eth0_3
delete network.guest
delete dhcp.guest
delete firewall.guest_zone
delete firewall.guest_forward
delete firewall.guest_dns
delete firewall.guest_dhcp
delete firewall.guest_8999
delete firewall.guest_8300
delete firewall.guest_7080
commit
EOF
/etc/init.d/network restart
/etc/init.d/firewall restart
/etc/init.d/dnsmasq restart
}
case "$1" in
start)
shift
start_guest "$1" "$2"
;;
stop)
stop_guest
;;
*)
usage
;;
esac
| true
|
ead44f93bd2a77e78562100f495b817cf859f121
|
Shell
|
hyraxZK/pws
|
/experiments/plot_all.sh
|
UTF-8
| 643
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
exec 3< <( cd lanczos2 ; ./process.sh ; ./plot.sh )
exec 4< <( cd matmult ; ./process.sh ; ./plot.sh )
exec 5< <( cd SHA256 ; ./process.sh ; ./plot.sh )
LEGEND="-L Hyrax-$\nicefrac{1}{2}$ -L Hyrax-$\nicefrac{1}{3}$ -L Hyrax-naive -L BCCGP-sqrt -L Bulletproofs -L ZKB++ -L Ligero"
FILES="-f SHA256/out/merkle.out -f SHA256/out/merkle.out $(for i in unopt bccgp bullet zkbpp ligero; do echo "-f SHA256/out/merkle_${i}.out" ; done | xargs)"
./plot.py ${LEGEND} ${FILES} -H -o legend.pdf
pdfcrop --bbox "60 896 1410 955" legend.pdf legend.pdf
pdfcrop legend.pdf legend.pdf
# wait for the subshells to finish
cat <&3
cat <&4
cat <&5
| true
|
810da1f77f51a64e9852b628772111c592c91a33
|
Shell
|
toshiakiasakura/backup_vim_bash
|
/.bash_aliases
|
UTF-8
| 3,544
| 2.890625
| 3
|
[] |
no_license
|
# for convenient purpose
alias open="xdg-open"
alias jpt_lab="jupyter lab"
alias pwdcp="pwd | pbcopy"
alias ctf="cd ~/Desktop/programming/ctf"
alias searchKokushi="cd /Users/Toshiaki/Desktop/medicine/prj_kokushi/src && python kokushiSearch.py"
alias o.="open ."
alias snippets="cd ~/.local/share/jupyter/snippets"
alias cdoutyou="cd /home/toshiaki/Desktop/prj_doutyou/prj_D_analysis"
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias c.="cd .."
alias c..="cd ../.."
alias t1="tree -L 1"
alias t2="tree -L 2"
alias t3="tree -L 3"
# git/github alias
alias git_push_easy="bash /Users/Toshiaki/Desktop/programming/bash/gitPush.sh"
alias gCM="git checkout main"
alias gC="git checkout"
alias gPull="git pull origin"
alias gPullM="git pull origin master"
alias gPush="git push origin"
alias gPushM="git push origin master"
alias gB="git branch"
alias gS="git status"
alias lg1="git log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold green)(%ar)%C(reset) %C(white)%s%C(reset) %C(dim white)- %an%C(reset)%C(bold yellow)%d%C(reset)' --all"
alias lg2="git log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold cyan)%aD%C(reset) %C(bold green)(%ar)%C(reset)%C(bold yellow)%d%C(reset)%n'' %C(white)%s%C(reset) %C(dim white)- %an%C(reset)' --all"
alias lg3="git log --graph --pretty=oneline --abbrev-commit "
alias g_ignore="cat ~/Desktop/programming/python/snippets/src/gitignore.py > ./.gitignore"
function git_merge_one_file(){
if [ $# -ne 4 ]; then
echo -e "\nUsage : git_merge_one_file <target-file> <current-branch> <base-branch> <other-branch>"
echo " <target-file> should include './' for a prefix"
echo " if you want to select a file in the currenct directry."
else
file_name=$1
ours=$2
base=$3
theirs=$4
git show ${ours}:${file_name} > ${file_name}.ours
git show ${base}:${file_name} > ${file_name}.base
git show ${theirs}:${file_name} > ${file_name}.theirs
git merge-file -p ${file_name}.ours ${file_name}.base ${file_name}.theirs > ${file_name}
rm ${file_name}.ours ${file_name}.base ${file_name}.theirs
fi
}
function mdview() {
markdown $1 | lynx -stdin
}
# for specific path
alias 3CSRTT="cd /home/toshiaki/Desktop/study/3CSRTT/prj_SD_add_202006"
# pyqt5 designer
alias craku="cd /home/toshiaki/Desktop/prj_doutyou/rakutto_collect_project/"
alias crakuInput="cd /home/toshiaki/Desktop/rakutto_collect_project/prj_input/src_input_gui/"
alias cDAna="cd /home/toshiaki/Desktop/prj_doutyou/prj_D_analysis/"
function pyqtConv(){
python -m PyQt5.uic.pyuic -x $1 -o $2
}
# alias for pycharm
alias pycharm="sh /home/toshiaki/pycharm-community-2020.1.3/bin/pycharm.sh"
# make alias for my cipher
Path="/home/toshiaki/Desktop/programming/python/binary"
file="fibonaCipher.py"
alias encryption="python $Path/$file -e"
alias decryption="python $Path/$file -d"
# list size of each file (total)
alias lsize='for f in * ; do du -hs "$f" ; done'
# for dictionary
function dict() {
grep $1 ~/Desktop/programming/linux/gene-utf8.txt -A 1 $2 $3 -wi --color
}
# for word search.
function med_grep(){
echo "-------------------------------"
C_op=${2:--C10}
grep $1 /home/toshiaki/Desktop/medicine/words_connections.txt $C_op -i
}
alias med_edit="gedit /home/toshiaki/Desktop/medicine/words_connections.txt"
# for smartgit
alias smartgit="bash /home/toshiaki/custom_items/smartgit/bin/smartgit.sh"
| true
|
74b69845504d6339079d872c6fa23ed1e89d0c3b
|
Shell
|
arendtio/mdl.sh
|
/development/module/dependency/updatable/spec/assets/updateable-by-recursion-1.0.0.sh
|
UTF-8
| 452
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
set -eu
# check number of arguments
if [ "$#" -ne 0 ]; then
printf 'Invalid number of arguments\n' >&2
exit 64
fi
# the purpose of this module is to reference a module that references an outdated version
# referencing the updateable-by-outdated-reference-1.0.0 module
module "updateableByOutdatedReference" "https://mdl.sh/development/module/dependency/updatable/spec/assets/updateable-by-outdated-reference-1.0.0.sh" "cksum-1153090430"
| true
|
270adbffa562f90f97586a75a00adb440e814c5e
|
Shell
|
Sixtease/CorpusMakoni
|
/scripts/delete-unused-splits.sh
|
UTF-8
| 390
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
ls_cmd="${1:-gsutil ls gs://karel-makon-splits/splits/}"; shift
rm_cmd="${1:-gsutil -m rm}"; shift
while read stem; do
torm=$(
$ls_cmd$stem/*/ | while read url; do
bn=`basename "$url"`
if grep -q "$bn" split-meta/"$stem".jsonp; then :
else
echo -n " $url"
fi
done
)
$rm_cmd$torm
done
| true
|
26766c94f1fe7b01894aae6dfe937ea3f02ab474
|
Shell
|
FH-Potsdam/tombola
|
/shell/tombola.sh
|
UTF-8
| 702
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
main(){
# the attendees
arr=("Duffy Duck" "Some Body" )
# check if homebrew is installed
if hash brew 2>/dev/null; then
echo "homebrew is installed"
else
# no homebrew? install it
echo "installing homebrew"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew doctor
fi
# check if coreutils are installed
if hash gshuf 2>/dev/null; then
echo "coreutils are installed"
else
# noe coreutils? installe them
echo "installing coreutils"
brew install coreutils
fi
# generate 5 random numbers between 1 and 34
entries=($(gshuf -i 1-2 -n 1))
# loop the array of attendees
for i in ${entries[@]}
do
# output to console
echo ${arr[$i]}
done
}
main
| true
|
6045b49cb5c3c5e06c7cde8ca180435e532665ee
|
Shell
|
rchain/rchain-testnet-node
|
/rundeck-scripts/dump-java-heap
|
UTF-8
| 236
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -o pipefail
source "$(dirname $0)/functions"
node_pid="$(get_node_pid)"
check_diag_directory
out_file="$DIAG_DIR/heapdump.$(get_current_timestamp).hprof"
logcmd jcmd $node_pid GC.heap_dump "$out_file"
sync-diag-dir
| true
|
10996af242221938732c4623c803a5bc18af8e0c
|
Shell
|
powerpak/pathogendb-pipeline
|
/scripts/pathogendb-upload-illumina-data.sh
|
UTF-8
| 520
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
# 17.03.2019 12:56:56 EDT
# Harm van Bakel <hvbakel@gmail.com>
module purge all
module load python/2.7.6
module load py_packages/2.7
module load blat
module load bioperl
module load ucsc-utils/2015-04-07
module load openssl/1.0.2
for i in ER*prokka
do
cwd=`pwd`
name=`basename $i _prokka`
cd $i
rast2igb.pl -f ${i}.gbk -g E_faecalis_${name} -i /sc/orga/projects/InfectiousDisease/igb -r ~/opt/pathogendb-pipeline/
igb2pathogendb.pl -i /sc/orga/projects/InfectiousDisease/igb/E_faecalis_${name}
cd $cwd
done
| true
|
1b30b20cf36d072b40a7aadbf7069a3fd363eda8
|
Shell
|
brookingcharlie/kubernetes-notes
|
/basics/run.sh
|
UTF-8
| 1,539
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# Install kubectl (with man pages and bash/zsh completion)
brew install kubernetes-cli
# Remove existing symlink from /usr/local/bin/kubectl to Docker Desktop's bundled kubectl.
brew link --overwrite kubernetes-cli
brew install minikube
kubectl apply -f configmap.yaml
kubectl create secret generic web --from-literal=password=helloWorld42+
kubectl apply -f deployment.yaml
kubectl exec deploy/web -- bash -c 'echo $APP_NAME'
kubectl exec deploy/web -- bash -c 'echo $PASSWORD'
kubectl exec deploy/web -- bash -c 'cat /etc/config/password'
kubectl apply -f service.yaml
kubectl port-forward service/web 8080:80
#brew install helm
#helm repo add stable https://kubernetes-charts.storage.googleapis.com
#helm search repo metabase
#helm install metabase stable/metabase
# Install ingress-nginx (see https://kubernetes.github.io/ingress-nginx/deploy/)
# If on Docker for Mac:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.35.0/deploy/static/provider/cloud/deploy.yaml
# If on minikube:
minikube addons enable ingress
# Using Helm:
kubectl create namespace ingress-nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm install ingress-nginx ingress-nginx/ingress-nginx -n ingress-nginx
kubectl apply -f ingress.yaml
# to see metrics
kubectl port-forward -n ingress-nginx $(kubectl get pod -n ingress-nginx -l app.kubernetes.io/component=controller -o jsonpath='{.items[0].metadata.name}') 10254:10254
curl http://kubernetes.docker.internal:10254/metrics
| true
|
142cc21a609c024a7d690b8d4353b97116157864
|
Shell
|
taqtiqa-admin/oci-jupyter-base
|
/bob/scripts/alpine/3.7/user/install.sh
|
UTF-8
| 304
| 2.75
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env sh
# No Requires
for f in /etc/profile.d/*; do source $f; done
OCI_USER=${OCI_USER:-bob}
user_name=${1:-$OCI_USER}
adduser -h /home/${user_name} -G 'wheel' -S -s /bin/ash ${user_name}
passwd -d ${user_name}
passwd ${user_name}<<EOF
${user_name}${user_name}
${user_name}${user_name}
EOF
| true
|
eea72af345e53f747016ac8a891faec565990339
|
Shell
|
joetm/jonaso
|
/reading_list/metadata-shell.BAK.sh
|
UTF-8
| 1,965
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# the folders to search for
BASEPATHS[0]="/home/jonas/OULU/Literatur/"
BASEPATHS[1]="/home/jonas/FU/IKON/Literatur"
BASEPATHS[2]="/home/jonas/FU/Misc/"
echo "Mining pdfs"
# echo "Mining pdfs in the following folders:"
# printf " %s\n" "${BASEPATHS[@]}"
# reset reading list
truncate -s 0 readinglist.csv
# do stuff with each pdf file
handle_pdf () {
# echo "==="
# echo $1
# skip folders
if [ -d "$1" ]
then
return 0
fi
# get filename
# fname=`basename $1`
# echo $1 | awk -F"/" '{print $NF}'
fname=`echo "$1" | sed 's/.*\///'`
# echo $fname
# get directory name
dir=`dirname "$1"`
# echo $dir
# skip '.' entries
# if [ "$dir" = "." ]
# then
# echo "dir is ."
# return 0
# fi
# author=`pdfinfo $1 | grep 'Author' | awk '{split($0,a,":"); print $2}'`
authorline=`pdfinfo $1 | grep 'Author'`
author=$( echo ${authorline##*:} | sed -e 's/\r//g')
titleline=`pdfinfo $1 | grep 'Title'`
# title=${titleline##*:}
title=$( echo ${titleline##*:} | sed -e 's/\r//g')
keywordsline=`pdfinfo $1 | grep 'Keywords'`
# keywords=${keywordsline##*:}
keywords=$( echo ${keywordsline##*:} | sed -e 's/\r//g')
numPagesline=`pdfinfo $1 | grep 'Pages'`
# numPages=${numPagesline##*:}
numPages=$( echo ${numPagesline##*:} | sed -e 's/\r//g')
# echo "$fname, $dir" >> readinglist.csv
echo "$fname|$title|$author|$keywords|$numPages|$dir|$1" >> readinglist.csv
return 0
}
export -f handle_pdf
# write header row
# directory |
echo "filename | title | author | keywords | numPages | directory | path" >> readinglist.csv
for i in "${BASEPATHS[@]}"
do
:
# go through the pdf files in this folder
# -name "*.pdf"
# echo "$i"
find $i -regextype sed -regex ".*/+.*\.pdf$" -print0 | xargs -0 -n 1 -P 1 -I {} bash -c 'handle_pdf "$@"' _ {}
done
# csvtool setcolumns 7 tmp.csv > readinglist.csv
# rm tmp.csv
# ./bugfix.py
| true
|
8e96e090f88bf09a884d13fed73864e2484f8e31
|
Shell
|
anusree/tools
|
/openstack/list_all_users_ina_project.sh
|
UTF-8
| 267
| 3.515625
| 4
|
[] |
no_license
|
#/bin/sh
if [ $# -eq 0 ]
then
echo "Please enter a project name"
exit 1
fi
PROJECT_NAME=$1
PROJECT_INFO=$(openstack project show $PROJRCT_NAME)
if [ $? -eq 0 ]
then
openstack user list --project $PROJECT_NAME
else
echo "Project $PROJECT_NAME does not exist"
fi
| true
|
8387068ca8401c9fadc1d823a5290c0dac831486
|
Shell
|
wedevelopnl/silverstripe-cypress
|
/scripts/provision-assets.sh
|
UTF-8
| 175
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
mkdir -p ./public/assets
if [ -d "./public/assets/" ]; then
cp -r "./dev/docker/silverstripe/assets/." "./public/assets/"
fi
echo "All assets copied."
| true
|
0e315a052188373751bd6353e2ed0ee2b5cd49df
|
Shell
|
wing-888/MOR_X5_FROM_VM
|
/x5/maintenance/test_fix_scripts/asterisk/etc_asterisk_extensions_mor_conf_h323_include.sh
|
UTF-8
| 2,282
| 3.734375
| 4
|
[] |
no_license
|
#! /bin/sh
# Author: Mindaugas Mardosas
# Year: 2010
# About:
#
# This script checks that:
#
#1. /etc/asterisk/extensions_mor_h323.conf must be included into extensions_mor.conf
#2. /etc/asterisk/extensions_mor.conf must be included in extensions.conf
#3. /etc/asterisk/asterisk.conf must have uncommented line: execincludes=yes
#4. /etc/asterisk/h323.conf must have a line at the end: #exec /usr/local/mor/mor_ast_h323
#
. /usr/src/mor/x5/framework/bash_functions.sh
#------VARIABLES-------------
#----- FUNCTIONS ------------
mor_ast_h323_exec_directive()
{
# Author: Mindaugas Mardosas
# Year: 2010
# About: This function checks if config /etc/asterisk/h323.conf has '#exec /usr/local/mor/mor_ast_h323 line present'
Directive=`grep "#exec" /etc/asterisk/h323.conf | awk -F";" '{print $1}' | grep '/usr/local/mor/mor_ast_h323' | awk '{print $2}'`
if [ "$Directive" == "/usr/local/mor/mor_ast_h323" ]; then
report "/etc/asterisk/h323.conf: #exec /usr/local/mor/mor_ast_h323" 0
else
echo '#exec /usr/local/mor/mor_ast_h323' >> /etc/asterisk/h323.conf
Directive=`grep "#exec" /etc/asterisk/h323.conf | awk -F";" '{print $1}' | grep '/usr/local/mor/mor_ast_h323' | awk '{print $2}'`
if [ "$Directive" == "/usr/local/mor/mor_ast_h323" ]; then
report "/etc/asterisk/h323.conf: #exec /usr/local/mor/mor_ast_h323" 4
else
report "/etc/asterisk/h323.conf: #exec /usr/local/mor/mor_ast_h323" 1
fi
fi
}
#--------MAIN -------------
asterisk_is_running
if [ "$?" != "0" ]; then
exit 0;
fi
separator "Checking various Asterisk includes, exec's"
asterisk_include_directive /etc/asterisk/extensions_mor.conf "extensions_mor_h323.conf"
report "/etc/asterisk/extensions_mor.conf: #include extensions_mor_h323.conf" "$?"
asterisk_include_directive /etc/asterisk/extensions.conf "extensions_mor.conf"
report "/etc/asterisk/extensions.conf: #include extensions_mor.conf" "$?"
check_if_setting_match_fix /etc/asterisk/asterisk.conf "execincludes" "execincludes=yes"
report "/etc/asterisk/asterisk.conf: execincludes=yes" "$?"
# check if #exec /usr/local/mor/mor_ast_h323 exists in /etc/asterisk/h323.conf
#------------------------
mor_ast_h323_exec_directive
| true
|
0d6516097486f3e1458b76308df917e82831c27e
|
Shell
|
Tell1/ml-impl
|
/com.github.tellmp.ml-algorithms/hmm_matlab/nilmbee-master/sw/firmware/batchprogram.sh
|
UTF-8
| 738
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 1 ]; then
echo "Usage: batchprogram.sh node_id [last_node_id]"
echo "Program a single node or multiple nodes from node_id to last_node_id (inclusive)"
else
s="$1"
if [ -z "$2" ]; then
echo "const uint8_t NODE_ID = $s;" > nodeid.h
echo "Press any key to programming node #$s ..."
read -n1 -s
make program
echo "Finished"
else
echo "Programming node #$s - #$2 ..."
while [ "$s" -le "$2" ]; do
echo "const uint8_t NODE_ID = $s;" > nodeid.h
echo "Press any key to programming node #$s ..."
read -n1 -s
make program
s=`expr $s + 1`
done
echo "Finished"
fi
fi
| true
|
3f4bf3b9dd3cbbc5d68639f3d939e7a520452159
|
Shell
|
justcompile/midgard
|
/scripts/build-plugins.sh
|
UTF-8
| 246
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
cwd="${PWD}"
function finish {
cd "$cwd"
}
trap finish EXIT ERR
find plugins -d 1 -type d | while read line; do
name=$(basename $line)
GO111MODULE=auto go build -buildmode=plugin -o "${name}.so" "$cwd/$line"
done
| true
|
53618a343419f07210e8d4d38f00160dc68753d6
|
Shell
|
dey600r/MTM
|
/Utils/Versions/deploy-release-android.sh
|
UTF-8
| 1,535
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
path=$1
version=$2
prod=$3-android
pass=$4
alias=$5
free="_"
case $prod in
*free*)
free="_Free_";
esac
echo "----> BUILDING ANDROID ON VERSION $version USING $prod WITH PATH $path AND $free <----";
path_version=$path/Utils/Versions/Android/MtM$free$version
path_key=$path/Utils/Versions/Android
path_release=$path/app/platforms/android/app/build/outputs/bundle/release
path_native_lib=$path/app/platforms/android/app/build/intermediates/merged_native_libs/release/out/lib
path_retrace_r8=$path/app/platforms/android/app/build/outputs/mapping/release
path_trace_store=$path_version/trace-store
cd $path/app;
rm -f -r $path_version;
mkdir $path_version;
ionic cordova build android --release --configuration=$prod && \
echo "--- SIGNING BUNDLE ANDROID ---"
jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 -storepass "$pass" -keystore $path_key/mtm-release-prod-key.keystore $path_release/app-release.aab $alias && \
echo "--- ZIPPING BUNDLE ANDROID ---"
zipalign -v -f 4 $path_release/app-release.aab $path_version/MtM$free$version.aab;
echo "--- SAVING BUNDLE NATIVE LIBS ---"
cd $path_native_lib
mkdir $path_trace_store
zip -rv native-libs-symbols-all.zip *
mv native-libs-symbols-all.zip $path_trace_store
rm -r $path/app/platforms/android/app/build/intermediates/merged_native_libs/release/out/lib/armeabi
zip -rv native-libs-symbols.zip *
mv native-libs-symbols.zip $path_trace_store
echo "--- SAVING BUNDLE RETRACE R8 ---"
cp $path_retrace_r8/* $path_trace_store
echo "----> END ANDROID $prod <----";
| true
|
04af0656cb560cce568efe01d0027753af97584e
|
Shell
|
MISAKIGA/general_scripts
|
/linux_shell/安装docker/auto-install-docker-program.sh
|
UTF-8
| 1,675
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
unzipDir="/usr/local/docker/"
#初始化
init(){
echo "init"
if [ ! `ls | grep docker-in.tar`];then
echo "请检查当前路径是否存在docker-in.tar"
exit 2
fi
echo "解压到$unzipDir"
tar -zxvf ./docker-in.tar -C $unzipDir
if [ ! $? -eq 0 ];then
echo "解压失败!"
exit 2
fi
echo "解压成功!"
}
scanning(){
cd $unzipDir
tbDir=()
echo ${tbDir[*]}
for i in `ls -1`
do
if [ -d "$i" ];then
echo "即将在docker上安装以下软件$i"
tbDir[$j]=$i
j=`expr $j + 1`
fi
done
tempDir=${tbDir[*]}
if [ ! -n tempDir ];
then
echo "安装目录为空!"
exit 2
fi
echo "待安装:${tbDir[*]}"
for i in ${tbDir[*]}
do
echo "aaa$i"
tbInstall $i
if [ ! $? -eq 0 ];then
echo "$i安装失败!"
continue
fi
done
}
tbInstall(){
echo "准备安装 $1 !"
tbIn=$1/docker-compose.yml
if [ ! `ls $1 |grep docker-compose.yml` ];then
echo "$tbIn 没有找到docker-compose.yml"
return
fi
docker-compose -f $1/docker-compose.yml up -d
if [ ! $? -eq 0 ];then
exit 3
fi
echo "安装完成!"
}
main()
{
init
scanning
}
main
#main > ./run.log 2>&1
| true
|
f6d1c7434e312a664e9cede62aaa5852742a645e
|
Shell
|
apetrisor/nox-kit
|
/cli.js
|
UTF-8
| 630
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$1" = "dev" ]; then
echo "Starting dev environment..."
env $(cat .env | xargs) ./node_modules/.bin/postcss ./src/assets/global.pcss -o ./static/css/global.css --config ./node_modules/@apetrisor/nox-kit
env $(cat .env | xargs) ./node_modules/.bin/sapper dev --build-dir ./nox/dev
elif [ "$1" = "build" ]; then
echo "Starting build..."
env $(cat .env | xargs) NODE_ENV=production ./node_modules/.bin/postcss src/assets/global.pcss -o static/css/global.css --config ./node_modules/@apetrisor/nox-kit
env $(cat .env | xargs) NODE_ENV=production ./node_modules/.bin/sapper build ./nox/build --legacy
fi
| true
|
53ccc971c78e040b7fed34eeefd66663cca080b3
|
Shell
|
HiramHe/FACS
|
/.travis/travis_before_install.sh
|
UTF-8
| 411
| 3.25
| 3
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
#!/usr/bin/env bash
export PATH=$HOME/miniconda3/bin:$PATH
if test -e $HOME/miniconda3/bin; then
echo "miniconda already installed."
else
rm -rf $HOME/miniconda3
wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda3.sh
chmod +x miniconda3.sh
./miniconda3.sh -b -p $HOME/miniconda3
conda update --yes conda --quiet
fi
# For debugging:
conda info -a
| true
|
70effe9c538db5e183dab8777f3672f80d8b5abf
|
Shell
|
uk-gov-mirror/ministryofjustice.opg-lpa
|
/scripts/non_live_seeding/seed_environment.sh
|
UTF-8
| 2,809
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
# The following variables are set from docker-compose when run locally,
# from ECS container definitions when run as a task in AWS,
# or from scripts/non_live_seeding/.envrc when executed manually from a SSH terminal.
# OPG_LPA_STACK_NAME
# OPG_LPA_STACK_ENVIRONMENT
# OPG_LPA_POSTGRES_HOSTNAME
# OPG_LPA_POSTGRES_PORT
# OPG_LPA_POSTGRES_NAME - database name
# OPG_LPA_POSTGRES_USERNAME
# OPG_LPA_POSTGRES_PASSWORD
AWS_DEFAULT_REGION=eu-west-1
API_OPTS="--host=${OPG_LPA_POSTGRES_HOSTNAME} --username=${OPG_LPA_POSTGRES_USERNAME}"
check_db_exists()
{
ret_val=0
if [ "$( PGPASSWORD=${OPG_LPA_POSTGRES_PASSWORD} psql ${API_OPTS} ${OPG_LPA_POSTGRES_NAME} -tAc "SELECT 1 FROM pg_database WHERE datname='${OPG_LPA_POSTGRES_NAME}'" )" = '1' ]
then
echo "LPA Database exists. Can continue"
else
echo "LPA Database does not exist. Seeding will fail"
ret_val=1
fi
return $ret_val
}
# returns 0 if tables are ready, 1 otherwise
check_tables_exist()
{
count_tables=0
tries=0
sql="SELECT COUNT(*) FROM (
SELECT FROM pg_tables
WHERE schemaname = 'public'
AND tablename = 'users' OR tablename = 'applications'
) AS tables;"
# expect two tables
while [[ "$count_tables" -ne "2" ]] ; do
tries=$(($tries+1))
count_tables=$(PGPASSWORD=${OPG_LPA_POSTGRES_PASSWORD} psql ${API_OPTS} ${OPG_LPA_POSTGRES_NAME} -tAc "$sql")
# error codes mean there are no tables
if [ "$?" -ne "0" ] ; then
count_tables=0
fi
# one minute
if [ $tries -gt 12 ] ; then
break
fi
sleep 5
done
ret_val=1
if [ "$count_tables" -eq "2" ] ; then
ret_val=0
fi
return $ret_val
}
if [ "$OPG_LPA_STACK_ENVIRONMENT" == "production" ]; then
echo "These scripts must not be run on production."
exit 1
fi
echo "Waiting for postgres to be ready"
timeout 90s sh -c 'pgready=1; until [ ${pgready} -eq 0 ]; do pg_isready -h ${OPG_LPA_POSTGRES_HOSTNAME} -d ${OPG_LPA_POSTGRES_NAME}; pgready=$? ; sleep 5 ; done'
echo "Checking database exists"
check_db_exists
if [ "$?" -ne "0" ] ; then
echo "ERROR: database does not exist"
exit 1
fi
echo "Waiting for tables to be ready"
check_tables_exist
if [ "$?" -ne "0" ] ; then
echo "ERROR: Seeding aborted; database tables not ready in a timely fashion"
exit 1
fi
echo "Seeding data"
PGPASSWORD=${OPG_LPA_POSTGRES_PASSWORD} psql ${API_OPTS} \
${OPG_LPA_POSTGRES_NAME} \
-f clear_tables.sql
PGPASSWORD=${OPG_LPA_POSTGRES_PASSWORD} psql ${API_OPTS} \
${OPG_LPA_POSTGRES_NAME} \
-f seed_test_users.sql
PGPASSWORD=${OPG_LPA_POSTGRES_PASSWORD} psql ${API_OPTS} \
${OPG_LPA_POSTGRES_NAME} \
-f seed_test_applications.sql
| true
|
e8de0bb79b88c17467956c4b5dd5d5d5c4ff8fa8
|
Shell
|
ray-project/ray
|
/ci/run/run_bazel_test_with_sharding.sh
|
UTF-8
| 934
| 3.796875
| 4
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2046
# Shard bazel tests and then run bazel test
# Passes arguments through
set -x
test_tag_filters=""
optional_args=()
targets=()
for arg in "$@"; do
shift
if [[ "${arg:0:19}" == "--test_tag_filters=" ]]
then
test_tag_filters="${arg:19}"
elif [[ "${arg:0:1}" == "-" ]]
then
optional_args+=("$arg")
else
targets+=("$arg")
fi
done
SHARD=$(python ./ci/ray_ci/bazel_sharding.py --exclude_manual --index "${BUILDKITE_PARALLEL_JOB}" --count "${BUILDKITE_PARALLEL_JOB_COUNT}" --tag_filters="$test_tag_filters" "${targets[@]}")
# If no targets are assigned to this shard, skip bazel test run.
# Otherwise, pass the list of targets to `bazel test`
if [[ -z "$SHARD" ]]
then
echo "No targets found for this shard, exiting"
else
echo "$SHARD"
echo "$SHARD" | xargs bazel test --test_tag_filters="$test_tag_filters" "${optional_args[@]}"
fi
| true
|
272e0280a7d32e2944eb237a346a330038a79bf4
|
Shell
|
bonnetb/quickstart
|
/ArjunaJTS/standalone/run.sh
|
UTF-8
| 931
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
# ALLOW JOBS TO BE BACKGROUNDED
set -m
JDKORBPROPS="-DOrbPortabilityEnvironmentBean.orbDataClassName=com.arjuna.orbportability.internal.orbspecific.versions.javaidl_1_4 -DOrbPortabilityEnvironmentBean.orbImpleClassName=com.arjuna.orbportability.internal.orbspecific.javaidl.orb.implementations.javaidl_1_4"
echo "Running jts standalone quickstart using JacOrb"
mvn clean compile exec:java -Dexec.cleanupDaemonThreads=false -Dexec.mainClass=org.jboss.narayana.jta.quickstarts.TransactionExample
if [ "$?" != "0" ]; then
echo jts standalone using JacOrb quickstart failed
exit -1
fi
echo "Running jts standalone quickstart using JdkOrb"
mvn exec:java -Dexec.cleanupDaemonThreads=false -Dexec.mainClass=org.jboss.narayana.jta.quickstarts.TransactionExample $JDKORBPROPS
if [ "$?" != "0" ]; then
echo jts standalone using JdkOrb quickstart failed
exit -1
fi
echo "JTS standalone example succeeded"
| true
|
c349fb192300e4d6f4b0b681f9178f0aff049382
|
Shell
|
therealromster/crux-ports-romster
|
/games/pingus-levels/pingus-levels
|
UTF-8
| 586
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/sh
PINGUS_DIR=/usr/share
[ -x $PINGUS_DIR/games/pingus ] || PINGUS_DIR=$(pwd)/$(dirname $0)/..
[ -x $PINGUS_DIR/games/pingus ] || PINGUS_DIR=$(dirname $0)/..
[ -f /$(type -P zenity) ] || echo zenity not found
cd $PINGUS_DIR/games/pingus/levels/ok
exec >/dev/null 2>&1
f=1
while [ "$f" != "" ]
do
f="$(zenity --title="Last Level: $(cat ~/.pingus.last)" --file-selection)"
[ "$f" != "" ] && echo $(basename $f) > ~/.pingus.last
cd $PINGUS_DIR/games
if grep -q "<demo>" $f /dev/null
then
pingus -r "" -p $f "$@"
else
pingus $f "$@"
fi
cd $(dirname $f)
done
| true
|
b865ae26d67948ae2109ef68563a8d286a77f86e
|
Shell
|
ab08028/OtterExomeProject
|
/scripts/scripts_20180521/analyses/TREEMIX/treemix_Step_1_runTreemix.snp7.GoodParameters.sh
|
UTF-8
| 2,619
| 2.671875
| 3
|
[] |
no_license
|
######## can run in the shell (is very fast) ########
# after a lot of testing of treemix paramters (see sandbox scripts)
# I have found that the paramters that yield the most sensical tree are:
# -k 500 (or any amount of ld pruning); -global (from Gauntiz paper); use sample size correction (don't disable with -noss);
#### Also want to use files that have excluded 3 close relatives from the data set to not skew allele frequencies
#### Also want to do with and without baja
module load treemix
gitdir=/u/home/a/ab08028/klohmueldata/annabel_data/OtterExomeProject/scripts/scripts_20180521/
scriptdir=$gitdir/analyses/TREEMIX/
genotypeDate=20181119
vcfdir=/u/flashscratch/a/ab08028/captures/vcf_filtering/${genotypeDate}_filtered/
treeFileDir=$vcfdir/treemixFormat/
header=snp_7_maxNoCallFrac_0.2_passingBespoke_passingAllFilters_postMerge_raw_variants
### using snp7 because it contains admixed individuals
### and I want those migration edges to show up
### it does also contain relatives which isn't amazing but shouldn't make a huge difference
### can also do with snp9a and see the difference
#header=snp_9a_forPCAetc_maxHetFilter_0.75_rmRelatives_rmAdmixed_passingBespoke_maxNoCallFrac_0.2_passingBespoke_passingAllFilters_postMerge_raw_variants
wd=/u/flashscratch/a/ab08028/captures/analyses/TREEMIX/$genotypeDate/snp7 # update if using snp7 8 etc
mkdir -p $wd
######### With BAJA (no relatives) #############
k=500
marker="sepCA-BAJ.exclRelatives"
infile=${header}.${marker}.frq.strat.treemixFormat.gz
root='CA,BAJ'# root is both
for m in {0..10}
do
outdir="root.${root}.mig.${m}.k.${k}.global.${marker}.treemix"
mkdir -p $wd/$outdir
treemix -i $treeFileDir/$infile -m ${m} -root ${root} -k ${k} -global -o $wd/$outdir/$outdir
done
######### CA Only (no Baja, no relatives) #############
k=500
marker="noBAJA.exclRelatives"
infile=${header}.${marker}.frq.strat.treemixFormat.gz #
root='CA'
for m in {0..10}
do
outdir="root.${root}.mig.${m}.k.${k}.global.${marker}.treemix"
mkdir -p $wd/$outdir
treemix -i $treeFileDir/$infile -m ${m} -root ${root} -k ${k} -global -o $wd/$outdir/$outdir
done
################ Experiments #######################
###### expt: try with AK as root : #########
######### CA Only (no Baja, no relatives) #############
k=500
marker="noBAJA.exclRelatives"
infile=${header}.${marker}.frq.strat.treemixFormat.gz #
root='AK'
for m in {0..3}
do
outdir="root.${root}.mig.${m}.k.${k}.global.${marker}.treemix"
mkdir -p $wd/$outdir
treemix -i $treeFileDir/$infile -m ${m} -root ${root} -k ${k} -global -o $wd/$outdir/$outdir
done
## try removing any site with missing data:
| true
|
1241a1c6d3eda0daec87fcad7915da6342931aaa
|
Shell
|
PhilipJLudlam/horde
|
/bin/quicklyGenerateHordeEBuilds.sh
|
UTF-8
| 3,214
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
rm -rf /tmp/generateHordeEBuilds/temp_*
mkdir -p /tmp/generateHordeEBuilds
indexpage="/tmp/generateHordeEBuilds/pear.horde.org-index.html"
if [ ! -e ${indexpage} ]; then
echo "Information from the Horde website has not been downloaded."
echo "Please run generateHordeEBuilds.sh first."
exit 1
fi
tot=`grep -o "<h3>[A-Za-z0-9_]*" $indexpage | wc -l`
pos=0
for i in `grep -o "<h3>[A-Za-z0-9_]*" $indexpage`
do
j=`echo $i | cut -c 5- `
hordepackage="horde/$j"
category=""
k=`echo $j | cut -c 6- `
if [ "$k" != "horde_" ]; then
category="--wwwapps"
fi
pos=$(($pos+1))
echo "Creating ebuild for $hordepackage ($pos of $tot)"
/usr/bin/php ./epearForHorde.php --force $category $hordepackage
done
#Clean up the exceptions after the build:
# 1. Remove any PHPUnit builds
# as they are not part of our concern
rm -rf /usr/local/horde/horde/dev-php/phpunit-*
# 2. Sort out the log files generated
# 2.a. The USE/IUSE field can use a lot of packages
# So sort them, remove duplicates and pretty it up
cat /tmp/generateHordeEBuilds/temp_iuse | sort | uniq > /tmp/generateHordeEBuilds/temp_iuse2
grep -v "phpunit-" /tmp/generateHordeEBuilds/temp_iuse2 \
> /tmp/generateHordeEBuilds/temp_iuse3
cat > ./../package.use << EOF
package.use for Horde
=====================
Below is a list of use flags used in this repository.
Enabling a use flag will mostly likely pull in other packages (the ebuilds for which are either supplied here or are in the main Gentoo repository).
Please review the list before using it.
EOF
cat >> ./../package.use < /tmp/generateHordeEBuilds/temp_iuse3
cat >> ./../package.use << EOF
----
EOF
# 2.b. The Keywords files need the same treatment
cat /tmp/generateHordeEBuilds/temp_keywords | sort | uniq > /tmp/generateHordeEBuilds/temp_keywords2
cat /tmp/generateHordeEBuilds/temp_keywords_with_version | sort | uniq > /tmp/generateHordeEBuilds/temp_keywords_with_version2
grep -v "dev-php/phpunit-phpunit_story" /tmp/generateHordeEBuilds/temp_keywords2 | \
grep -v "dev-php/pecl-apc" \
>/tmp/generateHordeEBuilds/temp_keywords3
grep -v "dev-php/phpunit-phpunit_story" /tmp/generateHordeEBuilds/temp_keywords_with_version2 |\
grep -v "dev-php/pecl-apc" \
>/tmp/generateHordeEBuilds/temp_keywords_with_version3
cat > ./../package.keywords << EOF
package.keywords for Horde
==========================
Below is a list of package atoms in this repository.
By default, they are tagged as "unstable".
If you run stable by default, then the list below will assist in setting the exceptions you need in your package.keywords file
The first list is a generic excpetion for the package, regardless of version number.
The second list is for the specific package atoms supplied in this repository.
Please review the list before using it.
EOF
cat >> ./../package.keywords < /tmp/generateHordeEBuilds/temp_keywords3
cat >> ./../package.keywords << EOF
-----
Below is the second list for the specific package atoms supplied in this repository.
EOF
cat >> ./../package.keywords < /tmp/generateHordeEBuilds/temp_keywords_with_version3
cat >> ./../package.keywords << EOF
----
EOF
| true
|
72a58b4975b9ddf89f30f6eec1136dfe3462b2f7
|
Shell
|
mitsu9/dotfiles
|
/zsh/zshrc
|
UTF-8
| 5,961
| 2.828125
| 3
|
[] |
no_license
|
################
## キーバインド ##
################
bindkey -e
##################
## 文字コード関係 ##
##################
# 文字コードの設定
export LANG=ja_JP.UTF-8
# 日本語ファイル名を表示可能にする
setopt print_eight_bit
###########
## 色関係 ##
###########
# 色の使用できるようにする
autoload -U colors; colors
# PROMPT変数内で変数参照する
setopt prompt_subst
# プロンプト表示
local number_of_jobs="%(1j.%F{208} / %f%F{226}%B%j%b%f.)"
PROMPT='%F{yellow}%* %F{green}%n@%m %F{reset_color}at %F{cyan}$(pwd | sed -e "s,^$HOME,~," | perl -pe "s/~\/(.ghq|.go\/src)\/.+?\//ghq:/")%F{reset_color}${number_of_jobs}'
PROMPT=$PROMPT' ${vcs_info_msg_0_} %F{cyan}%F{reset_color}
>>> '
SPROMPT='%F{red}??? もしかして %r のこと? [No/Yes/Abort/Edit]: %F{reset_color}'
# vcs_infoロード
autoload -Uz vcs_info
# vcsの表示
zstyle ':vcs_info:*' formats "on %F{magenta}%b %c %u %f" #通常
zstyle ':vcs_info:*' actionformats "on %F{magenta}%b%F{white}|%F{red}%a " #rebase 途中,merge コンフリクト等 formats 外の表示
zstyle ':vcs_info:git:*' check-for-changes true #formats 設定項目で %c,%u が使用可
zstyle ':vcs_info:git:*' stagedstr "%F{green}✚" #commit されていないファイルがある
zstyle ':vcs_info:git:*' unstagedstr "%F{blue}✹" #add されていないファイルがある
# プロンプト表示直前にvcs_info呼び出し
precmd() { vcs_info }
# ls時の色
export CLICOLOR=true
#############
## 補完関係 ##
#############
# 補完候補を一覧で表示
setopt auto_list
# 補完キー連打で候補順に自動で補完
setopt auto_menu
# コマンドで可能なオプションの補完
autoload -U compinit
compinit
# 補完で小文字でも大文字にマッチさせる
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}'
#############
## 履歴関連 ##
############
# 履歴を保存するファイル
HISTFILE=~/.zsh_history
# メモリに保存する履歴の件数
HISTSIZE=1000000
# ファイルに保存する履歴の件数
SAVEHIST=1000000
# 直前と重複するコマンドを無視
setopt hist_ignore_dups
# 履歴と重複するコマンドを保存しない
setopt hist_save_no_dups
# zsh間で履歴を共有
setopt share_history
###########
## その他 ##
###########
# ディレクトリ名のみで移動
setopt auto_cd
# cd時に自動でpushd
setopt auto_pushd
# 間違えてコマンド入力した時に修正してくれる
setopt correct
# ビープを鳴らさなk
setopt nobeep
# iTerm2のタブ名を変更する
function title {
echo -ne "\033]0;"$*"\007"
}
###########
## alias ##
###########
alias ..=".."
alias ...="../.."
alias ts="tig status"
alias c="clear"
alias gc="git commit -m"
alias gps="git push"
alias gf="git fetch -p"
alias gpl="git pull"
alias gco="git checkout"
alias gs="git switch"
alias be="bundle exec"
alias h="history -30"
alias g="git"
alias gls='cd $(ghq list -p | fzf --preview "cat {}/README.*")'
alias d='docker'
alias dc='docker-compose'
alias tf='terraform'
alias t='tmux'
#############
## history ##
#############
function peco-history-selection() {
BUFFER="$(history -nr 1 | awk '!a[$0]++' | peco --query "$LBUFFER" | sed 's/\\n/\n/')"
CURSOR=$#BUFFER
zle reset-prompt
}
zle -N peco-history-selection
bindkey '^R' peco-history-selection
###################
## hub extension ##
###################
alias search-issues='hub browse -- issues/$(hub issue | peco | tr -s '\'' '\'' '\'' '\'' | cut -d'\'' '\'' -f 2 | cut -c 2-)'
## fzf
export FZF_TMUX=1
export FZF_DEFAULT_OPTS="--height 50% --layout=reverse --border"
#############
## パス設定 ##
############
# brew
ARCH=$(uname -m)
if [[ $ARCH == arm64 ]]; then
eval $(/opt/homebrew/bin/brew shellenv)
elif [[ $ARCH == x86_64 ]]; then
eval $(/usr/local/bin/brew shellenv)
fi
# 重複する要素を自動的に削除
typeset -U path cdpath fpath manpath
# for brew
export PATH=/usr/local/sbin:$PATH
# for vim(/usr/binより/usr/local/binを優先的に見に行くようにする)
export PATH=/usr/local/bin:$PATH
# go
export PATH=$PATH:/usr/local/go/bin
export GOPATH=$HOME/.go:$HOME/.ghq
export PATH=$PATH:$HOME/.go/bin
# awscli
export PATH=~/.local/bin:$PATH
# rust
export PATH=$HOME/.cargo/env:$PATH
# git
export PATH=/usr/local/bin/git:$PATH
# gcloud
source "$(brew --prefix)/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/path.zsh.inc"
source "$(brew --prefix)/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/completion.zsh.inc"
# direnv
eval "$(direnv hook zsh)"
# mysql@5.7
export PATH=/usr/local/opt/mysql@5.7/bin/:$PATH
###########
## ctags ##
###########
alias ctags="`brew --prefix`/bin/ctags"
# asdf
#autoload -Uz compinit && compinit
export PATH=$HOME/.asdf/bin:$PATH
export PATH=$PATH:$HOME/.asdf/shim
source $HOME/.asdf/asdf.sh
source $HOME/.asdf/completions/asdf.bash
# flutter SDK
export PATH=$PATH:$HOME/development/flutter/bin
######################
## useful functions ##
######################
function set-aws-sts-session-token {
unset AWS_ACCESS_KEY_ID
unset AWS_SECRET_ACCESS_KEY
unset AWS_SESSION_TOKEN
read SERIAL_NUMBER\?'Input MFA Serial Number: '
read TOKEN_CODE\?'Input MFA Code: '
OUTPUT=`aws sts get-session-token \
--serial-number ${SERIAL_NUMBER} \
--token-code ${TOKEN_CODE}`
AWS_ACCESS_KEY_ID=`echo $OUTPUT | jq -r .Credentials.AccessKeyId`
AWS_SECRET_ACCESS_KEY=`echo $OUTPUT | jq -r .Credentials.SecretAccessKey`
AWS_SESSION_TOKEN=`echo $OUTPUT | jq -r .Credentials.SessionToken`
export AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
export AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN
echo Set envs
echo AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID
echo AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
echo AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN
}
###################
## local setting ##
###################
# githubにあげたくない設定を読み込む
source ~/.zshrc_local
| true
|
41c539e5d18b137c584ae0e8287073b287c7c86c
|
Shell
|
outsideris/.vim
|
/dotfiles/extensions/tmux-powerline/segments/np_mpd_simple.sh
|
UTF-8
| 688
| 3.578125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Simple np script for mpd. Works with streams!
# Only tested on OS X... should work the same way on other platforms though.
trim_method="trim" # Can be {Trim or roll).
max_len=40 # Trim output to this length.
roll_speed=2 # Roll speed in chraacters per second.
segment_path=$(dirname $0)
source "$segment_path/../lib.sh"
np=$(mpc current 2>&1)
if [ $? -eq 0 ] && [ -n "$np" ]; then
mpc | grep "paused" > /dev/null
if [ $? -eq 0 ]; then
exit 1
fi
case "$trim_method" in
"roll")
np=$(roll_text "${np}" ${max_len} 2)
;;
"trim")
np=${np:0:max_len}
;;
esac
echo "♫ ${np}"
exit 0
fi
exit 1
| true
|
3dcdda349d82f405367925f8be19856e6747af79
|
Shell
|
hpchud/vcc-torque
|
/hooks/headnode.sh
|
UTF-8
| 604
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# service hook for torque server
# $1 is set to the value of the service key, in this case, will be the torque server
# the hooks can be customised in different images to perform different tasks
echo $1 > /var/spool/torque/server_name
# configure the mom if we have it
echo "\$pbsserver $1" > /var/spool/torque/mom_priv/config
echo "\$logevent 255" >> /var/spool/torque/mom_priv/config
echo "\$mom_host vnode_`hostname`" >> /var/spool/torque/mom_priv/config
# the first run of this hook should be before services start
# for a context update, we should handle restarting service somehow
| true
|
63ccdc5f3a6cbcfce3f169676f0997a9884ee949
|
Shell
|
adibodas/CS2043_PS2
|
/wordfreq.sh
|
UTF-8
| 779
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
find ./tweets -maxdepth 1 -type f -name "*.txt" -exec wc -w {} \; | sort -n > freqlist.txt
tail -n 1 freqlist.txt | awk '{print $1}' > max_words.txt
head -n 1 freqlist.txt | awk '{print $1}' > min_words.txt
rm -r freqlist.txt
cd tweets
numwords="$(wc -w *.txt | tail -n 1 | awk '{print $1}')"
numtweets="$(ls -l *.txt | wc -l)"
cd ..
echo $((numwords / numtweets)) > avg_words.txt
find ./tweets -maxdepth 1 -type f -name "*.txt" -exec wc -c {} \; | sort -n > freqlist.txt
tail -n 1 freqlist.txt | awk '{print $1}' > max_chars.txt
head -n 1 freqlist.txt | awk '{print $1}' > min_chars.txt
rm -r freqlist.txt
cd tweets
numwords="$(wc -c *.txt | tail -n 1 | awk '{print $1}')"
numtweets="$(ls -l *.txt | wc -l)"
cd ..
echo $((numwords / numtweets)) > avg_chars.txt
| true
|
23e2aee69715a6de2c94bb37a7e02217bcfc61b2
|
Shell
|
bveratudela/audit
|
/sqoop/sqoop-hue.sh
|
UTF-8
| 1,866
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
source ../common.properties
TABLE_NAME=hue
LANDING_ZONE=${PARENT_ZONE}/landing/${TABLE_NAME}
function import() {
# DB_QUERY='SELECT "0.0.0.0", a.username, "hue", UNIX_TIMESTAMP(a.last_login) from hue.auth_user a, hue.auth_user_groups b, hue.auth_group c WHERE $CONDITIONS AND a.id = b.user_id and b.group_id = c.id order by a.id;'
#DB_QUERY="SELECT a.username,'hue', a.last_login from hue.auth_user a, hue.auth_user_groups b, hue.auth_group c WHERE a.id = b.user_id and b.group_id = c.id AND \$CONDITIONS order by a.id"
DB_QUERY="SELECT '0.0.0.0', a.username,'hue', (a.last_login) from hue.auth_user a, hue.auth_user_groups b, hue.auth_group c WHERE a.id = b.user_id and b.group_id = c.id AND \$CONDITIONS order by a.id"
echo "Importing ${TABLE_NAME} ..."
#sqoop import --connect jdbc:mysql://${DB_HOST}/hue --username ${DB_USER} --password ${DB_PASS} --query "${DB_QUERY}" --target-dir hdfs://${LANDING_ZONE}/dt=${INGEST_YEAR}${INGEST_MONTH}${INGEST_DAY} -m 1 --hive-import --hive-table ${TABLE_NAME} --create-hive-table --fields-terminated-by "|"
#sqoop import --connect jdbc:oracle:thin:@//${DB_HOST} --username ${DB_USER} --password ${DB_PASS} --query "${DB_QUERY}" --target-dir hdfs://${LANDING_ZONE}/dt=${INGEST_YEAR}${INGEST_MONTH}${INGEST_DAY} -m 1 --hive-import --hive-table OPS_TEAM_AUDIT.${TABLE_NAME} --create-hive-table --fields-terminated-by "|"
sqoop import --options-file sqoop-hue.options --target-dir hdfs://${LANDING_ZONE}/
}
function clean() {
echo "Removing ${LANDING_ZONE} ..."
hdfs dfs -rm -R -f hdfs://${LANDING_ZONE}
}
while getopts ":ci" opt
do
case ${opt} in
c ) clean
;;
i ) import
;;
\?) echo "Usage: ${0} [-c] [-i]"
;;
: ) echo "Invalid option: ${OPTARG} requires an argument" 1>&2
;;
esac
done
shift $((OPTIND -1))
| true
|
da4e5e090fa29ca943b5dca21620b1e3c95d5385
|
Shell
|
DevKleber/DotFiles
|
/fonts/roboto.sh
|
UTF-8
| 461
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Instalando Roboto"
path="$pwd/fonts/files"
name_folder="Roboto"
folder_zip="$name_folder.zip"
if [ ! -d "/usr/share/fonts/$name_folder" ]; then
wget -c "https://fonts.google.com/download?family=Roboto" --output-document="$path/$folder_zip"
unzip $path/$folder_zip -d $path/$name_folder
sudo mv $path/$name_folder /usr/share/fonts
echo "Removendo fonte"
sleep 3
rm $path/$folder_zip
rm -r $path/$name_folder
fi
| true
|
d014b4a643597b6f71e42d867088413625b620e1
|
Shell
|
chrissikath/C.Elegans
|
/20211022_EasySpliceVariantAnalyses/visualization_splice_variants.sh
|
UTF-8
| 2,425
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
#Author Christina Kuhn @github: https://github.com/chrissikath
# pre-used scripts from Alex
# Shell Script, welches anhand von RNA-Seq Dateien Splice-Varianten findet Part 2
# R version: 3.6
# BiocManager::install(version = "3.10")
# R-Packages installieren
#1.0 files
GENOME="/home/horn/2021_Celegans/20210902_alex_data/genome/wormbase/caenorhabditis_elegans.PRJNA13758.WBPS10.genomic.fa"
exon_information_script="/home/christina/C_elegans/analysis/20211022_EasySpliceVariantAnalyses/exon-information.r"
packages_functions_script="/home/christina/C_elegans/analysis/20211022_EasySpliceVariantAnalyses/packages_functions_Alex.R"
#2.0 files
motifs_gene_file="motifs_adgrl1"
set_working_dir="/home/christina/C_elegans/analysis/20211022_EasySpliceVariantAnalyses/"
visualization_splice_variants_script="/home/christina/C_elegans/analysis/20211022_EasySpliceVariantAnalyses/visualization_splice_variants.R"
#Gene information
gene="Lat-1" # define gene name
gene_name="ADGRL1"
start="8896841" # define start
stop="8908666" # define stop
chr="II" #define chromosome
strand="+" #define strand
############################## 1.0 shell part ###################
# exon-based comparison
cd results
R --vanilla < $exon_information_script $gene_name $packages_functions_script \
`ls | awk -F '/' '{print $NF}' | awk -F '.' '{print $1}'`
cd ..
# concat all appended files
cat results/*_append.gtf > all_transcripts.gtf
# make bed file from exons.gtfs
cat results/unique_exons.gtf | awk 'OFS="\t" {print $1,$4-1,$5,$12,$7}' | tr -d '";' > results/unique_exons.bed
bedtools getfasta -fi ${GENOME} \
-bed results/unique_exons.bed \
-fo exon_seqs.fa -name
# get sequence of whole locus
sed -e 1b -e '$!d' results/unique_exons.bed > locus.tmp
first=`cat locus.tmp | head -1 | awk -v x=2 '{print $x}'`
last=`cat locus.tmp | sed -n 2p | awk -v x=3 '{print $x}'`
rm locus.tmp
echo $chromosome $first $last > whole_locus.tmp
cat whole_locus.tmp | awk -v OFS='\t' '{print $1, $2,$3}' > whole_locus.bed
rm whole_locus.tmp
bedtools getfasta -fi ${GENOME} \
-bed whole_locus.bed \
-fo whole_locus.fa -name
############################# 2.0 R Part ##########################################
# Wichtige Output Dateien:
# transcripts variants pdf
# sequence_and_orf_analysis pdf
# orfs.fa
R --vanilla < $visualization_splice_variants_script $gene_name $motifs_gene_file $packages_functions_script $set_working_dir
| true
|
6608f855ea265a38d326922fdfc3c3615137087d
|
Shell
|
omniti-labs/omniti-ms
|
/build/nginx/build.sh
|
UTF-8
| 2,475
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2011-2013 OmniTI Computer Consulting, Inc. All rights reserved.
# Use is subject to license terms.
#
# Load support functions
. ../../lib/functions.sh
PROG=nginx
VER=1.11.5
VERHUMAN=$VER
PKG=omniti/server/nginx
SUMMARY="nginx web server"
DESC="nginx is a high-performance HTTP(S) server and reverse proxy"
PREFIX=/opt/nginx
BUILD_DEPENDS_IPS="library/security/openssl library/pcre"
DEPENDS_IPS=$BUILD_DEPENDS_IPS
BUILDARCH=64
CONFIGURE_OPTS_64=" \
--with-ipv6 \
--with-threads \
--with-http_v2_module \
--with-http_ssl_module \
--with-http_addition_module \
--with-http_xslt_module \
--with-http_flv_module \
--with-http_gzip_static_module \
--with-http_mp4_module \
--with-http_random_index_module \
--with-http_realip_module \
--with-http_secure_link_module \
--with-http_stub_status_module \
--with-http_sub_module \
--with-http_dav_module \
--prefix=$PREFIX \
"
LDFLAGS64="$LDFLAGS64 -L/opt/omni/lib/$ISAPART64 -R/opt/omni/lib/$ISAPART64"
CFLAGS64="$CFLAGS64"
add_extra_files() {
logmsg "--- Copying SMF manifest"
logcmd mkdir -p ${DESTDIR}/lib/svc/manifest/network
logcmd cp $SRCDIR/files/http-nginx.xml ${DESTDIR}/lib/svc/manifest/network
logcmd mkdir -p ${DESTDIR}/lib/svc/method/
logcmd cp $SRCDIR/files/http-nginx ${DESTDIR}/lib/svc/method
logmsg "Installing custom files and scripts"
logcmd mv $DESTDIR$PREFIX/conf/nginx.conf $DESTDIR$PREFIX/conf/nginx.conf.dist
}
init
download_source $PROG $PROG $VER
patch_source
prep_build
build
make_isa_stub
add_extra_files
make_package
clean_up
# Vim hints
# vim:ts=4:sw=4:et:
| true
|
d6872b00e3053c389bbb6561128ccd57c80e1c98
|
Shell
|
jdidat/Assembly-projects
|
/BashScripting/hello_world.sh
|
UTF-8
| 94
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
#declare STRING variable
STRING="Hello World"
#pring variable on a screen
echo $STRING
| true
|
f47179b7a653a8869cc1cb1ea7a60bacc4615a03
|
Shell
|
XamLua/itmo-3
|
/OSP/l3/13.bash
|
UTF-8
| 1,815
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
shopt -s expand_aliases
alias id='/usr/xpg4/bin/id'
tres=""
res=""
user="$USER"
if ! getent passwd "$user" > /dev/null ; then
echo "No such user"
exit 0
fi
for file in $(ls $1)
do
if [ -f "$file" ]; then
ouid=$(ls -n "$file" | nawk '{print $3}')
ogid=$(ls -n "$file" | nawk '{print $4}')
acl=$(ls -V "$file" | tail +2 | egrep '.*:.-.p..........:......:(allow|deny)$' | tr -d " \t")
acl=$(echo "$acl" | sed 's/\(.*\):\(.*:..............:......:.*\)/\1;\2/p' | nawk -F ":" '!_[$1]++')
acl=$(echo "$acl" | sed 's/\(.*\);\(.*:..............:......:.*\)/\1:\2/p' | egrep "w$" | uniq)
exec_acl=$(echo "$acl" | nawk -F ":" '{print $1,$2}')
if [ -z "$exec_acl" ]; then
exit 0
fi
IFS=$'\n';
for temp in $exec_acl
do
fp=$(echo $temp | cut -d " " -f1)
sp=$(echo $temp | cut -d " " -f2)
case $fp in
"owner@" )
if [ "$(id -u $user)"=="$ouid" ]; then
tres="$file"
fi
;;
"group@" )
members="$(getent passwd | nawk -v "gid=$ogid" -F ':' '$4==gid { printf "%s\\n",$1 }')"
members+="$(getent group $ogid | cut -d ":" -f4 | nawk -v RS=',' '{ printf "%s\\n",$1 }')"
if echo -e $members | grep "$user" > /dev/null ; then
tres="$file"
fi
;;
"everyone@" )
tres="$file"
;;
"user" )
if [ "$sp"=="$user" ]; then
tres="$file"
fi
;;
"group" )
sgid="$(getent group $sp | cut -d ":" -f3)"
members="$(getent passwd | nawk -v "gid=$sgid" -F ':' '$4==gid { print $1 }')"
members+="$(getent group $sgid | cut -d ":" -f4 | nawk -v RS=',' '{print $1 }')"
if echo -e $members | grep "$user" > /dev/null ; then
tres="$file"
fi
;;
esac
done
if [ ! -z "$tres" ]; then
res+="$tres\n"
fi
fi
done
echo -e $res | uniq | sed '/^$/d'
exit 0
| true
|
6e118f525fcf7c458f92d1918176b5600bbe5f4a
|
Shell
|
me176c-dev/android_device_asus_K013
|
/firmware/fingerprint/addond.sh
|
UTF-8
| 448
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/sbin/sh
#
# ADDOND_VERSION=1
#
# /system/addon.d/51-asus-fingerprint.sh
# Change build fingerprint to the one from the stock ROM to pass Google Play certification.
#
# New fingerprint: %FINGERPRINT%
#
set -e
case "$1" in
post-restore)
sed -i -e "s@^\(ro\.build\.fingerprint\)=.*@\1=%FINGERPRINT%@" "$S/build.prop"
sed -i -e "s@^\(ro\.vendor\.build\.fingerprint\)=.*@\1=%FINGERPRINT%@" "$S/vendor/build.prop"
;;
esac
| true
|
a401ed5348227292c8d5e101cf825f7cc16d1715
|
Shell
|
Vicchio/palmetto_bin
|
/phase-finder.sh
|
UTF-8
| 971
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Stephen Patrick Vicchio
# 2019-11-11
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Identifies the phase of an older job!
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
WORKDIR=$PWD
input_file=${1}
if [[ -z ${input_file} ]]; then
echo ''
echo 'Please specify a zc-#######.JOBID to copy'
echo ''
elif [[ ${input_file} != *.JOBID ]]; then
echo ''
echo 'Please specify a zc-#######.JOBID to copy'
echo ''
else
JOBID_NAME=${input_file%.*}
JOBID_MOVE=$(echo ${JOBID_NAME} | cut -c 4- )
node_info_cut=$(grep -m 1 "exec_host" *o${JOBID_MOVE} | awk '{print $3}' | cut -c1-8)
line_number_resources_availible=$(pbsnodes ${node_info_cut} | grep resources_available.phase)
echo -e 'Node: ' ${node_info_cut} > ${WORKDIR}/zp-phase.PHASE
echo -e 'The calculation is currently running on: \n' ${line_number_resources_availible} >> ${WORKDIR}/zp-phase.PHASE
fi
| true
|
9fc5d4ed4ce0fdadeac0153f84ab55a32ca47727
|
Shell
|
Glasslock/LiteCloud
|
/contrib/init/ltcdd.init
|
UTF-8
| 1,263
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# ltcld The ltcl core server.
#
#
# chkconfig: 345 80 20
# description: ltcld
# processname: ltcld
#
# Source function library.
. /etc/init.d/functions
# you can override defaults in /etc/sysconfig/ltcld, see below
if [ -f /etc/sysconfig/ltcld ]; then
. /etc/sysconfig/ltcld
fi
RETVAL=0
prog=ltcld
# you can override the lockfile via BITCOIND_LOCKFILE in /etc/sysconfig/ltcld
lockfile=${BITCOIND_LOCKFILE-/var/lock/subsys/ltcld}
# ltcld defaults to /usr/bin/ltcld, override with BITCOIND_BIN
bitcoind=${BITCOIND_BIN-/usr/bin/ltcld}
# ltcld opts default to -disablewallet, override with BITCOIND_OPTS
bitcoind_opts=${BITCOIND_OPTS}
start() {
echo -n $"Starting $prog: "
daemon $DAEMONOPTS $bitcoind $bitcoind_opts
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch $lockfile
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f $lockfile
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart)
stop
start
;;
*)
echo "Usage: service $prog {start|stop|status|restart}"
exit 1
;;
esac
| true
|
05458196e14df3ca05bd3cbd1912ae5360f46389
|
Shell
|
ManticoreProject/manticore
|
/config/make-file-cache.sh
|
UTF-8
| 527
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# COPYRIGHT (c) 2007 The Manticore Project (http://manticore.cs.uchicago.edu)
# All rights reserved.
#
# Create a cache file for HLOp-definition and rewriting-rule files.
#
if test $# != "1" ; then
echo "usage: make-file-cache <dir>"
exit 1
fi
case $1 in
/*) ;;
*) echo "make-file-cache: directory must be absolute path"; exit 1;;
esac
if test -d $1 ; then
find $1 \( -name "*.hlop" -o -name "*.hlrw" \) -print > $1/.cache
exit 0
else
echo "make-file-cache: \"$1\" is not a directory"
exit 1
fi
| true
|
262d0d4f42a46826834bd4bdc7cc6c594cb069b5
|
Shell
|
isahakukamil/circleci-101
|
/PassRestriction.sh
|
UTF-8
| 11,398
| 3.484375
| 3
|
[] |
no_license
|
echo "************************************************************"
echo "*******Script to Set Password Restiction on IN nodes********"
echo "*********Name:Issahaku Kamil | UserID : EKAMISS*************"
echo "************************************************************"
#Create a backup directory,extract and append timestamp to backup filename and copy files to new backup file
if grep -Fxq "LoginDefsBackup" /tmp
then
echo ".........................................................................."
echo "...Backup of /etc/login.defs is stored in /tmp/LockoutBackup directory..."
echo ".........................................................................."
else
mkdir /tmp/LoginDefsBackup
echo ".........................................................................."
echo "...Backup of /etc/login.defs is stored in /tmp/LockoutBackup directory..."
echo ".........................................................................."
fi
if grep -Fxq "pamConfBackup" /tmp
then
echo "......................................................................................."
echo "...Backup of /etc/security/pwquality.conf is stored in /tmp/pamConfBackup directory..."
echo "......................................................................................."
else
mkdir /tmp/pamConfBackup
echo "......................................................................................."
echo "...Backup of /etc/security/pwquality.conf is stored in /tmp/pamConfBackup directory..."
echo "......................................................................................."
fi
ExtrTimeStamp=$(date "+%Y-%m-%d_%H-%M-%S")
echo "............................................................."
echo "Note the Date-Time-Stamp in case of a rollback:$ExtrTimeStamp"
echo "............................................................."
touch /tmp/LoginDefsBackup/LoginDefsBackups.$ExtrTimeStamp;
touch /tmp/pamConfBackcup/PamConfBackups.$ExtrTimeStamp;
cp -r /etc/login.defs /tmp/LoginDefsBackup/LoginDefsBackups.$ExtrTimeStamp
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
#end
#Set password restictions
sed -i '/^PASS_MAX_DAYS[ \t]\+\w\+$/{s//PASS_MAX_DAYS 90/g;}' /etc/login.defs
status="$?"
if [[ $status="0" ]]
then
echo ".................................................................."
echo "....Duration before the next password change is set to 90 days...."
echo ".................................................................."
elif [[ $status="1" ]]
then
echo "........................................"
echo "....Could not set password max days....."
echo "........................................"
cp -r /etc/tmp/LoginDefsBackup/LoginDefsBackups.$ExtrTimeStamp /etc/login.defs
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i '/^PASS_WARN_AGE[ \t]\+\w\+$/{s//PASS_WARN_AGE 45/g;}' /etc/login.defs
status="$?"
if [[ $status="0" ]]
then
echo ".................................................................."
echo "..........Password Change Warning duration set to 45 days........."
echo ".................................................................."
elif [[ $status="1" ]]
then
echo "........................................"
echo "....Could not set password warn age....."
echo "........................................"
cp -r /etc/tmp/LoginDefsBackup/LoginDefsBackups.$ExtrTimeStamp /etc/login.defs
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i '/^PASS_MIN_DAYS[ \t]\+\w\+$/{s//PASS_MIN_DAYS 0/g;}' /etc/login.defs
status="$?"
if [[ $status="0" ]]
then
echo "......................................................................"
echo "....Minimum Number of days before password change is set to 0 days...."
echo "......................................................................"
elif [[ $status="1" ]]
then
echo "........................................"
echo "....Could not set password min days....."
echo "........................................"
cp -r /etc/tmp/LoginDefsBackup/LoginDefsBackups.$ExtrTimeStamp /etc/login.defs
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* PASS_MIN_LEN .*/PASS_MIN_LEN 8/g;' /etc/login.defs
status="$?"
if [[ $status="0" ]]
then
echo "..............................................................................................."
echo ".....The minimum length of characters that must be used to set a password is set to 8........."
echo "..............................................................................................."
elif [[ $status="1" ]]
then
echo "....................................................."
echo "..........Could not set minimum length to 8.........."
echo "....................................................."
cp -r /etc/tmp/LoginDefsBackup/LoginDefsBackups.$ExtrTimeStamp /etc/login.defs
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* difok .*/difok = 2/g;' /etc/security/pwquality.conf
status="$?"
if [[ $status="0" ]]
then
echo "..............................................................................................."
echo ".....The minimum number of characters that must be different from old password is set to 2....."
echo "..............................................................................................."
elif [[ $status="1" ]]
then
echo "............................................"
echo "..........Could not set difok to 2.........."
echo "............................................"
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* ocredit .*/ocredit = 2/g;' /etc/security/pwquality.conf
status="$?"
if [[ $status="0" ]]
then
echo "..............................................................................................."
echo ".....The number of special characters that can be used when setting password is set to .2....."
echo "..............................................................................................."
elif [[ $status="1" ]]
then
echo ".............................................."
echo "..........Could not set ocredit to 2.........."
echo ".............................................."
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* dcredit .*/dcredit = 2/g;' /etc/security/pwquality.conf
status="$?"
if [[ $status="0" ]]
then
echo ".................................................................................................."
echo ".....The number of Number characters that can be used when setting a new passwod is set to 2....."
echo ".................................................................................................."
elif [[ $status="1" ]]
then
echo ".............................................."
echo "..........Could not set dcredit to 2.........."
echo ".............................................."
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* ucredit .*/ucredit = 2/g;' /etc/security/pwquality.conf
status="$?"
if [[ $status="0" ]]
then
echo "......................................................................................................"
echo ".....The number of upper-case characters that can be used when setting a new password is set to 2....."
echo "......................................................................................................"
elif [[ $status="1" ]]
then
echo ".............................................."
echo "..........Could not set ucredit to 2.........."
echo ".............................................."
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* lcredit .*/lcredit = 50/g;' /etc/security/pwquality.conf
status="$?"
if [[ $status="0" ]]
then
echo "........................................................................................................"
echo ".....The number of lower-case characters that can be used when setting a new password is set to 50....."
echo "........................................................................................................"
elif [[ $status="1" ]]
then
echo ".............................................."
echo "..........Could not set lcredit to 2.........."
echo ".............................................."
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
sed -i -e 's/.* minlen .*/minlen = 8/g;' /etc/security/pwquality.conf
status="$?"
if [[ $status="0" ]]
then
echo ".........................................................................."
echo ".....The minimum length of characters a password can have is set to 8....."
echo ".........................................................................."
elif [[ $status="1" ]]
then
echo "............................................"
echo "..........Could not set minlen to 8.........."
echo "............................................"
cp -r /etc/security/pwquality.conf /tmp/pamConfBackup/PamConfBackups.$ExtrTimeStamp
echo "........................................"
echo "...........Rollback Initiated..........."
echo "........................................"
else
echo "exit status=$status"
fi
| true
|
e7a337144734d1c9c0e8f660a9332fadbd8918d2
|
Shell
|
jovanlanik/dotfiles
|
/stow/i3/.local/bin/i3-swap.sh
|
UTF-8
| 275
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
# Jovan Lanik's i3 swap script 1.0
# www.github.com/jovanlanik
# clean and simple
timeout=1.6
[ "$1" ] && mode="$1" || mode='drag'
i3-msg "mode $mode"
i3-msg "gaps inner all set 8"
sleep $timeout
i3-msg "mode default"
i3-msg "gaps inner all set 2"
i3-msg unmark swap
| true
|
98da78eaf54ff85ce82346bfead2c5d9830291ff
|
Shell
|
AvanzaBank/astrix
|
/scripts/cd/build-and-test.sh
|
UTF-8
| 314
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# Script that executes integration tests (i.e. mvn verify) in a deterministic environment.
pushd "${BASH_SOURCE%/*}/../.."
echo "Executing Maven command in: '$PWD'"
docker run -it --rm -v $PWD:/opt/javabuild avanzabank/ubuntu-openjdk8:0.1.2 /bin/bash -c "cd /opt/javabuild; ./mvnw clean verify"
| true
|
4462093ac41f80b9caa39839c50626238777b7e1
|
Shell
|
xianlinfeng/Documents
|
/scip/applications/CycleClustering/check/configuration_tmpfile_setup_scip.sh
|
UTF-8
| 5,498
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
#* *
#* This file is part of the program and library *
#* SCIP --- Solving Constraint Integer Programs *
#* *
#* Copyright (C) 2002-2019 Konrad-Zuse-Zentrum *
#* fuer Informationstechnik Berlin *
#* *
#* SCIP is distributed under the terms of the ZIB Academic License. *
#* *
#* You should have received a copy of the ZIB Academic License *
#* along with SCIP; see the file COPYING. If not email to scip@zib.de. *
#* *
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
### resets and fills a batch file TMPFILE to run SCIP with
### sets correct limits, reads in settings, and controls
### display of the solving process
# environment variables passed as arguments
INSTANCE=$1 # instance name to solve
SCIPPATH=$2 # - path to working directory for test (usually, the check subdirectory)
TMPFILE=$3 # - the batch file to control SCIP
SETNAME=$4 # - specified basename of settings-file, or 'default'
SETFILE=$5 # - instance/settings specific set-file
THREADS=$6 # - the number of LP solver threads to use
SETCUTOFF=$7 # - should optimal instance value be used as objective limit (0 or 1)?
FEASTOL=$8 # - feasibility tolerance, or 'default'
TIMELIMIT=$9 # - time limit for the solver
MEMLIMIT=${10} # - memory limit for the solver
NODELIMIT=${11} # - node limit for the solver
LPS=${12} # - LP solver to use
DISPFREQ=${13} # - display frequency for chronological output table
REOPT=${14} # - true if we use reoptimization, i.e., using a difflist file instead if an instance file
OPTCOMMAND=${15} # - command that should per executed after reading the instance, e.g. optimize, presolve or count
CLIENTTMPDIR=${16}
SOLBASENAME=${17}
VISUALIZE=${18}
SOLUFILE=${19} # - solu file, only necessary if $SETCUTOFF is 1
#args=("$@")
#for ((i=0; i < $#; i++)) {
# echo "argument $((i+1)): ${args[$i]}"
#}
# new environment variables after running this script
# -None
#set solfile
SOLFILE=$CLIENTTMPDIR/${USER}-tmpdir/$SOLBASENAME.sol
# reset TMPFILE
echo > $TMPFILE
# read in settings (even when using default, see bugzilla 600)
SETTINGS=$SCIPPATH/../settings/$SETNAME.set
if test $SETNAME == "default"
then
# create empty settings file
test -e $SETTINGS || touch $SETTINGS
fi
echo set load $SETTINGS >> $TMPFILE
# set non-default feasibility tolerance
if test $FEASTOL != "default"
then
echo set numerics feastol $FEASTOL >> $TMPFILE
fi
# if permutation counter is positive add permutation seed (0 = default)
if test $p -gt 0
then
echo set randomization permutationseed $p >> $TMPFILE
fi
# set random seed shift
SEED=$(($s + $GLBSEEDSHIFT))
if test $SEED -gt 0
then
echo set randomization randomseedshift $SEED >> $TMPFILE
fi
# avoid solving LPs in case of LPS=none
if test "$LPS" = "none"
then
echo set lp solvefreq -1 >> $TMPFILE
fi
# set reference value
if test "$OBJECTIVEVAL" != ""
then
#echo "Reference value $OBJECTIVEVAL"
echo set misc referencevalue $OBJECTIVEVAL >> $TMPFILE
fi
echo set limits time $TIMELIMIT >> $TMPFILE
echo set limits nodes $NODELIMIT >> $TMPFILE
echo set limits memory $MEMLIMIT >> $TMPFILE
echo set lp advanced threads $THREADS >> $TMPFILE
echo set timing clocktype 1 >> $TMPFILE
echo set display freq $DISPFREQ >> $TMPFILE
# avoid switching to dfs - better abort with memory error
echo set memory savefac 1.0 >> $TMPFILE
echo set save $SETFILE >> $TMPFILE
if test "$VISUALIZE" = true
then
BAKFILENAME="`basename $TMPFILE .tmp`.dat"
echo visualization output set to "$BAKFILENAME"
echo set visual bakfilename "$OUTPUTDIR/${BAKFILENAME}" >> $TMPFILE
fi
if test "$REOPT" = false
then
# read and solve the instance
echo read $INSTANCE >> $TMPFILE
# set objective limit: optimal solution value from solu file, if existent
if test $SETCUTOFF = 1 || test $SETCUTOFF = true
then
if test ""$OBJECTIVEVAL != ""
then
echo set limits objective $OBJECTIVEVAL >> $TMPFILE
echo set heur emph off >> $TMPFILE
fi
fi
echo display parameters >> $TMPFILE
echo $OPTCOMMAND >> $TMPFILE
echo display statistics >> $TMPFILE
echo checksol >> $TMPFILE
else
# read the difflist file
cat $INSTANCE >> $TMPFILE
fi
# currently, the solution checker only supports .mps-files.
# compare instance name (without .gz) to instance name stripped by .mps.
#if they are unequal, we have an mps-file
TMPINSTANCE=`basename $INSTANCE .gz`
TMPINSTANCEB=`basename $TMPINSTANCE .mps`
if test "$TMPINSTANCEB" != "$TMPINSTANCE"
then
echo write sol $SOLFILE >> $TMPFILE
fi
echo quit >> $TMPFILE
| true
|
f704482f936cdd1ceee2f02151fd7c60d293b82f
|
Shell
|
healthy8701/Caffe_Yolov3_Inference
|
/setup.sh
|
UTF-8
| 453
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
pushd convert
if [ ! -f "yolov3.weights" ]; then
wget -O yolov3.weights http://video.cambricon.com/models/CambriconECO/Caffe_Yolov3_Inference/yolov3.weights
else
echo "yolov3.weights exists."
fi
if [ ! -f "yolov3.caffemodel" ]; then
wget -O yolov3.caffemodel http://video.cambricon.com/models/CambriconECO/Caffe_Yolov3_Inference/yolov3.caffemodel
else
echo "yolov3.caffemodel exists."
fi
popd
| true
|
cab5182e04e7f3bc4d3b409ee129c2a8af3da147
|
Shell
|
jimwangzx/KOZ1OLMENU
|
/K0Z1OLMENU.sh
|
UTF-8
| 2,189
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# MAIN MENU
spinner=( '|' '/' '-' '\')
spin(){
echo -n "Sprawdzam dostępność aktualizacji"
for i in "${spinner[@]}"
do
echo -ne "."
sleep 0.2
done
}
echo "$(tput setaf 3)
____________ _______ ______ _____ __ __ _____ ____ _______ __
/ __/ ___/ _ \/ _/ _ /_ __/ / _ \ \/ / / //_/ _ /_ /< / __ \/ /
_\ \/ /__/ , __/ // ___// / / _ |\ / / ,< / // // /_/ / /_/ / /__
/___/\___/_/|_/___/_/ /_/ /____/ /_/ /_/|_|\___//___/_/\____/____/
"
spin
git pull
sleep 5
clear
echo "$(tput setaf 3)
____________ _______ ______ _____ __ __ _____ ____ _______ __
/ __/ ___/ _ \/ _/ _ /_ __/ / _ \ \/ / / //_/ _ /_ /< / __ \/ /
_\ \/ /__/ , __/ // ___// / / _ |\ / / ,< / // // /_/ / /_/ / /__
/___/\___/_/|_/___/_/ /_/ /____/ /_/ /_/|_|\___//___/_/\____/____/
"
echo "IF YOU CAN'T FIND VULNERABILITY YOU MUST MAKE IT !!!"
echo "Which option do you choose ?"
echo -e "1. METASPLOIT LOW DETACTION WINDOWS PAYLOAD \n2. METASPLOIT EXE PAYLOAD INJECT\n3. DOS SCRIPT \n4. NMAP TOOL \n5. PHISHING TOOL \n6. MAC CHANGER \n7. START NGROK"
echo "If you want stop script click ctrl+c"
while true
do
read -r -p "Which option do you choose ? - " input
case $input in
[1][eE][sS]|[1])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
sudo ./metasploit_bat_shell.sh
break
;;
[2][oO]|[2])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
sudo ./metasploit_exe_injection.sh
break
;;
[3][oO]|[3])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
sudo ./DOS.sh
break
;;
[4][oO]|[4])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
sudo ./nmap.sh
break
;;
[5][oO]|[5])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
./pishing.sh
break
;;
[6][oO]|[6])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
./macchanger.sh
break
;;
[7][oO]|[7])
echo "Starting script... "
cd /home/kali/Desktop/KOZ1OLMENU/metasploit
./servicengrok.sh
break
;;
*)
echo "Invalid input..."
;;
esac
done
| true
|
2f03324d4f628aee5a53be9b1564fcfe851d5075
|
Shell
|
myrao/adt-tools-base
|
/profiler/deploy_perfd.sh
|
UTF-8
| 1,376
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ "$PWD" != */tools/base/profiler ]]; then
echo "You must call this script from .../tools/base/profiler"
exit
fi
if [[ -z "$ANDROID_HOME" ]]; then
echo "You must define the ANDROID_HOME enviroment variable"
echo "This ensures we use the same adb binary that Studio uses"
exit
fi
adb_exe=$ANDROID_HOME/platform-tools/adb
if [[ ! -e $adb_exe ]]; then
echo "adb binary not found at: $adb_exe"
echo "Verify ANDROID_HOME?"
exit
fi
host=$1
if [[ -z "$host" ]]; then
script_name=`basename $0`;
echo "Usage: $script_name <host> [<build-type>]"
echo "Run again and specify the required arguments:"
echo "<host>: Required, one of: armeabi-v7a, arm64-v8a, x86"
echo "<build-type>: Optional, one of: debug, release (default: release)"
exit
fi
build_type=$2
if [[ -z "$build_type" ]]; then
build_type="release"
fi
perfd_path="../../../out/studio/native/out/$build_type/$host/perfd"
if [[ ! -e $perfd_path ]]; then
echo "Perfd binary not found at: $perfd_path"
echo "Verify host, perhaps rebuild perfd? (see profiler/native/README.md)"
exit
fi
perfd_install_path="/data/local/tmp/perfd/"
echo "Installing perfd onto device: $perfd_install_path"
"$adb_exe" shell mkdir -p $perfd_install_path
"$adb_exe" push $perfd_path $perfd_install_path && "$adb_exe" shell $perfd_install_path/perfd
| true
|
9083ec4a88298c49f7166835dc8fce1e08d627d4
|
Shell
|
imsky/scripts
|
/firefoxbackup/firefoxbackup.sh
|
UTF-8
| 946
| 2.8125
| 3
|
[] |
no_license
|
if [ $# -ne 2 ]
then
echo "Usage: firefoxbackup.sh [profile] [backup]"
echo "Where [profile] is the existing Firefox profile directory and [backup] is the desired backup directory."
exit
fi
mkdir $2
echo Copying Toolbar Customizations
cp $1/localstore.rdf $2
echo Copying Bookmarks
cp $1/places.sqlite $2
echo Copying Passwords
cp $1/key3.db $2/
cp $1/signons.sqlite $2/
echo Copying Site Preferences
cp $1/permissions.sqlite $2/
echo Copying Search Engines
cp $1/search.sqlite $2/
cp $1/search.json $2/
cp -R $1/searchplugins $2/
echo Copying Search Options
cp $1/search-metadata.json $2/
echo Copying Personal Dictionary
cp $1/persdict.dat $2/
echo Copying Security Certificate Settings
cp $1/cert8.db $2/
echo Copying Extensions
cp -R $1/extensions $2/
cp $1/extensions.sqlite $2/
echo Copying Preferences
cp $1/prefs.js $2/
echo Delete $2/prefs.js if your preferences were corrupt
if [ -e $1/adblockplus ]
then
cp -R $1/adblockplus $2/
fi
| true
|
cd246ad54b87075a28b641ec58c9c8f21f304535
|
Shell
|
xenolog/kargo-multirack
|
/CI/playbook_check_container_version.sh
|
UTF-8
| 896
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
function check_container_version {
if [ "$TRAVIS_PULL_REQUEST_BRANCH" == "" ]; then
echo "This check only for PR allowed, not for push."
exit 0
fi
local branch=$TRAVIS_BRANCH
for TAG in $(cat cluster.yaml | grep bgpd_container_tag | awk '{print $2}' | sort | uniq) ; do
if [ $branch == "master" ]; then
if [ $TAG != 'latest' ]; then
echo "cluster.yaml should contains 'bgpd_container_tag: latest', instead '$TAG' for branch '$branch'"
exit 1
fi
elif [ "${branch:0:8}" == "release-" ]; then
if [ $TAG != $branch ]; then
echo "cluster.yaml should contains 'bgpd_container_tag: $branch', instead '$TAG' for branch '$branch'"
exit 1
fi
fi
done
}
check_container_version
| true
|
6022f4da5069b9fbcf254863c772633a4bfa6abe
|
Shell
|
AdrianLopezGue/UNIVERSITY
|
/PAS/PRACTICA 1/ejercicio2.sh
|
UTF-8
| 623
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# COMPROBACION DE LOS ARGUMENTOS PASADOS
if [ $# -gt 0 ] && [ $# -lt 3 ]
then
if [ $# -eq 1 ]
then
numerobytes=0
else
numerobytes=$2
fi
else
echo "Tienes que introducir al menos un argumento"
exit 1
fi
nombredirectorio=$1
echo "Nombre, LongitudUsuario, FechaModificacion, FechaAcceso, Tamano, Bloques, Permisos, Ejecutable"
for i in $(find $nombredirectorio -size +$numerobytes)
do
echo -n "$(basename $i); $(echo -n $LOGNAME | wc -c); $(stat -c %w $i) $(stat -c %x $i); $(stat -c %s $i); $(stat -c %b $i); $(stat -c %A $i);"
if [ -x $i ]
then
echo 1
else
echo 0
fi
done | sort -nk 5
| true
|
1649b38903cda18948deaf791cc049d88eff7077
|
Shell
|
malkomich/unix-exercices
|
/pr3/2/ej1.sh
|
UTF-8
| 136
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# ejercicio 1 practica 3(parte 2)
ls -l $1 | grep ^- | tr -s " " | cut -d " " -f 5,9
echo ""
echo "Otra forma:"
du -cah $1
| true
|
fb8c51340b5cd4e501193db8dbe6736101dd5ddd
|
Shell
|
carlosroman/infrastructure-agent
|
/build/package/deb/postinst-sysv.sh
|
UTF-8
| 2,325
| 4
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# DEB and SYSV distros: Debian 7 'Wheezy'
serviceFile=/etc/init.d/newrelic-infra
# check the run mode
userMode=$(cat /tmp/nria_mode 2>/dev/null)
# check usermode is set
if [ -z "$userMode" ]; then
userMode="ROOT"
fi
# check the user mode
if [ "$userMode" != "ROOT" ] && [ "$userMode" != "PRIVILEGED" ] && [ "$userMode" != "UNPRIVILEGED" ]; then
# user mode is not valid so we set it by default: ROOT
userMode="ROOT"
fi
if [ "$userMode" = "PRIVILEGED" ] || [ "$userMode" = "UNPRIVILEGED" ]; then
runDir=/var/run/newrelic-infra
installDir=/var/db/newrelic-infra
logDir=/var/log/newrelic-infra
configDir=/etc/newrelic-infra
tmpDir=/tmp/nr-integrations
# Give nri-agent ownership over it's folder
chown -R nri-agent:nri-agent ${runDir}
chown -R nri-agent:nri-agent ${installDir}
chown -R nri-agent:nri-agent ${logDir}
chown -R nri-agent:nri-agent ${configDir}
chown -R nri-agent:nri-agent ${tmpDir} 2>/dev/null || true
if [ "$userMode" = "PRIVILEGED" ]; then
failFlag=0
# Give the Agent kernel capabilities if setcap command exists
setCap=$(command -v setcap) || setCap="/sbin/setcap" && [ -f $setCap ] || setCap=""
if [ ! -z $setCap ]; then
eval "$setCap CAP_SYS_PTRACE,CAP_DAC_READ_SEARCH=+ep /usr/bin/newrelic-infra" || failFlag=1
else
failFlag=1
fi
if [ $failFlag -eq 1 ]; then
(>&2 echo "Error setting PRIVILEGED mode. Fallbacking to UNPRIVILEGED mode")
fi
fi
if [ -e "$serviceFile" ]; then
# If the user or group is set to root, change it to nri-agent
# If no user or group is set, set it to nri-agent
if grep 'USER=root' $serviceFile >/dev/null ; then
sed -i 's/USER=root/USER=nri-agent/g' "$serviceFile"
elif ! grep 'USER=' $serviceFile >/dev/null ; then
sed -i '/### END INIT INFO/aUSER=nri-agent' "$serviceFile"
fi
fi
fi
# Previous versions had an incorrect `prerm` that didn't stop the service
# because it couldn't detect it was running, for that reason we have to make
# sure that there is not an older version running.
oldPid=/var/run/newrelic-infra.pid
if [ -e "$oldPid" ] ; then
. /lib/lsb/init-functions
killproc -p $oldPid /usr/bin/newrelic-infra
rm $oldPid
fi
if [ -e "$serviceFile" ]; then
insserv newrelic-infra || exit $?
${serviceFile} start || exit $?
fi
| true
|
149908381714e836bfe665216eb26892ffa627c7
|
Shell
|
sinkingpoint/personal-infra
|
/cookbooks/prometheus/templates/default/backup_data.sh.erb
|
UTF-8
| 232
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
to_backup=$(mktemp)
docker restart prometheus
tar -zcvf "${to_backup}" /opt/prometheus/storage
aws s3 cp --region eu-west-2 "${to_backup}" s3://sinking-database-backups/prometheus/"latest.bak"
rm "${to_backup}"
| true
|
a9b58b8ee1f35fdd9453709993770c49f8eff5d2
|
Shell
|
pieterdavid/bamboo-delphesexample
|
/DelphesIO/getSources.sh
|
UTF-8
| 1,057
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# != 1 ]; then
echo "One argument, the Delphes version, is required"
exit 1
fi
delphestag="${1}"
wget -q -P include "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/classes/DelphesClasses.h"
wget -q -P include "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/classes/DelphesFactory.h"
wget -q -P include "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/classes/SortableObject.h"
wget -q -P src "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/classes/DelphesClasses.cc"
wget -q -P src "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/classes/DelphesFactory.cc"
wget -q -P src "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/external/ExRootAnalysis/ExRootTreeBranch.h"
wget -q -P src "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/external/ExRootAnalysis/ExRootTreeBranch.cc"
wget -q -P src "https://raw.githubusercontent.com/delphes/delphes/${delphestag}/classes/ClassesLinkDef.h"
patch -p1 -i $(dirname $0)/sources.patch
| true
|
44359593f2be92e8322de9ae5ae13b501b66d629
|
Shell
|
odin-lang/Odin
|
/build_odin.sh
|
UTF-8
| 5,218
| 3.578125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -eu
: ${CXX=clang++}
: ${CPPFLAGS=}
: ${CXXFLAGS=}
: ${LDFLAGS=}
: ${ODIN_VERSION=dev-$(date +"%Y-%m")}
: ${GIT_SHA=}
CXXFLAGS="$CXXFLAGS -std=c++14"
LDFLAGS="$LDFLAGS -pthread -lm -lstdc++"
if [ -d ".git" ] && [ $(which git) ]; then
versionTag=( $(git show --pretty='%cd %h' --date=format:%Y-%m --no-patch --no-notes HEAD) )
if [ $? -eq 0 ]; then
ODIN_VERSION="dev-${versionTag[0]}"
GIT_SHA="${versionTag[1]}"
CPPFLAGS="$CPPFLAGS -DGIT_SHA=\"$GIT_SHA\""
fi
fi
CPPFLAGS="$CPPFLAGS -DODIN_VERSION_RAW=\"$ODIN_VERSION\""
DISABLED_WARNINGS="-Wno-switch -Wno-macro-redefined -Wno-unused-value"
OS=$(uname)
panic() {
printf "%s\n" "$1"
exit 1
}
version() { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
config_darwin() {
local ARCH=$(uname -m)
: ${LLVM_CONFIG=llvm-config}
# allow for arm only llvm's with version 13
if [ "${ARCH}" == "arm64" ]; then
MIN_LLVM_VERSION=("13.0.0")
else
# allow for x86 / amd64 all llvm versions beginning from 11
MIN_LLVM_VERSION=("11.1.0")
fi
if [ $(version $($LLVM_CONFIG --version)) -lt $(version $MIN_LLVM_VERSION) ]; then
if [ "${ARCH}" == "arm64" ]; then
panic "Requirement: llvm-config must be base version 13 for arm64"
else
panic "Requirement: llvm-config must be base version greater than 11 for amd64/x86"
fi
fi
MAX_LLVM_VERSION=("14.999.999")
if [ $(version $($LLVM_CONFIG --version)) -gt $(version $MAX_LLVM_VERSION) ]; then
echo "Tried to use " $(which $LLVM_CONFIG) "version" $($LLVM_CONFIG --version)
panic "Requirement: llvm-config must be base version smaller than 15"
fi
LDFLAGS="$LDFLAGS -liconv -ldl -framework System"
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
LDFLAGS="$LDFLAGS -lLLVM-C"
}
config_freebsd() {
: ${LLVM_CONFIG=}
if [ ! "$LLVM_CONFIG" ]; then
if [ -x "$(command -v llvm-config11)" ]; then
LLVM_CONFIG=llvm-config11
elif [ -x "$(command -v llvm-config12)" ]; then
LLVM_CONFIG=llvm-config12
elif [ -x "$(command -v llvm-config13)" ]; then
LLVM_CONFIG=llvm-config13
else
panic "Unable to find LLVM-config"
fi
fi
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
}
config_openbsd() {
: ${LLVM_CONFIG=/usr/local/bin/llvm-config}
LDFLAGS="$LDFLAGS -liconv"
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
}
config_linux() {
: ${LLVM_CONFIG=}
if [ ! "$LLVM_CONFIG" ]; then
if [ -x "$(command -v llvm-config)" ]; then
LLVM_CONFIG=llvm-config
elif [ -x "$(command -v llvm-config-11)" ]; then
LLVM_CONFIG=llvm-config-11
elif [ -x "$(command -v llvm-config-11-64)" ]; then
LLVM_CONFIG=llvm-config-11-64
elif [ -x "$(command -v llvm-config-14)" ]; then
LLVM_CONFIG=llvm-config-14
else
panic "Unable to find LLVM-config"
fi
fi
MIN_LLVM_VERSION=("11.0.0")
if [ $(version $($LLVM_CONFIG --version)) -lt $(version $MIN_LLVM_VERSION) ]; then
echo "Tried to use " $(which $LLVM_CONFIG) "version" $($LLVM_CONFIG --version)
panic "Requirement: llvm-config must be base version greater than 11"
fi
MAX_LLVM_VERSION=("14.999.999")
if [ $(version $($LLVM_CONFIG --version)) -gt $(version $MAX_LLVM_VERSION) ]; then
echo "Tried to use " $(which $LLVM_CONFIG) "version" $($LLVM_CONFIG --version)
panic "Requirement: llvm-config must be base version smaller than 15"
fi
LDFLAGS="$LDFLAGS -ldl"
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs --libfiles) -Wl,-rpath=\$ORIGIN"
# Creates a copy of the llvm library in the build dir, this is meant to support compiler explorer.
# The annoyance is that this copy can be cluttering the development folder. TODO: split staging folders
# for development and compiler explorer builds
cp $(readlink -f $($LLVM_CONFIG --libfiles)) ./
}
build_odin() {
case $1 in
debug)
EXTRAFLAGS="-g"
;;
release)
EXTRAFLAGS="-O3"
;;
release-native)
local ARCH=$(uname -m)
if [ "${ARCH}" == "arm64" ]; then
# Use preferred flag for Arm (ie arm64 / aarch64 / etc)
EXTRAFLAGS="-O3 -mcpu=native"
else
# Use preferred flag for x86 / amd64
EXTRAFLAGS="-O3 -march=native"
fi
;;
nightly)
EXTRAFLAGS="-DNIGHTLY -O3"
;;
*)
panic "Build mode unsupported!"
;;
esac
set -x
$CXX src/main.cpp src/libtommath.cpp $DISABLED_WARNINGS $CPPFLAGS $CXXFLAGS $EXTRAFLAGS $LDFLAGS -o odin
set +x
}
run_demo() {
./odin run examples/demo/demo.odin -file
}
have_which() {
if ! command -v which > /dev/null 2>&1 ; then
panic "Could not find \`which\`"
fi
}
have_which
case $OS in
Linux)
config_linux
;;
Darwin)
config_darwin
;;
OpenBSD)
config_openbsd
;;
FreeBSD)
config_freebsd
;;
*)
panic "Platform unsupported!"
;;
esac
if [[ $# -eq 0 ]]; then
build_odin debug
run_demo
exit 0
fi
if [[ $# -eq 1 ]]; then
case $1 in
report)
if [[ ! -f "./odin" ]]; then
build_odin debug
fi
./odin report
exit 0
;;
*)
build_odin $1
;;
esac
run_demo
exit 0
else
panic "Too many arguments!"
fi
| true
|
e9395a08e6bb6e7a39b850bca06fd2401d286e7f
|
Shell
|
Xeuhua/usb-live-linux
|
/config/FSFW-Uni-Stick_KDE_jessie_amd64/config
|
UTF-8
| 3,021
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
DISTRIBUTION=jessie
DESKTOP=KDE
FSFW_UNI_STICK_VERSION="$(echo "$(../tools/calc-version-number.sh)")"
lb config noauto \
--distribution ${DISTRIBUTION} \
--architectures amd64 \
--archive-areas "main contrib non-free" \
--binary-images iso-hybrid \
--updates true --backports=true --security=true \
--parent-mirror-bootstrap http://ftp.de.debian.org/debian/ \
--parent-mirror-chroot http://ftp.de.debian.org/debian/ \
--parent-mirror-chroot-security http://security.debian.org/ \
--parent-mirror-binary http://ftp.de.debian.org/debian/ \
--parent-mirror-binary-security http://security.debian.org/ \
--mirror-chroot http://ftp.de.debian.org/debian/ \
--mirror-chroot-security http://ftp.de.debian.org/debian-security/ \
--mirror-bootstrap http://ftp.de.debian.org/debian/ \
--mirror-binary http://ftp.de.debian.org/debian/ \
--mirror-binary-security http://ftp.de.debian.org/debian-security/ \
--bootappend-live "boot=live components locales=de_DE.UTF-8 keyboard-layouts=de vga=current" \
--bootappend-live-failsafe "boot=live components memtest noapic noapm nodma nomce nolapic nomodeset nosmp nosplash vga=normal single" \
--linux-packages linux-image \
--linux-flavours "amd64" \
--keyring-packages "debian-keyring deb-multimedia-keyring" \
--chroot-filesystem squashfs \
--binary-filesystem ext2 \
--initsystem systemd \
--initramfs live-boot \
--image-name FSFW-Uni-Stick_${FSFW_UNI_STICK_VERSION}_${DESKTOP}_${DISTRIBUTION} \
--apt-source-archives false \
--apt-recommends false \
--cache-packages true \
--source false \
"${@}"
# Branchname im Image-Nammen setzen
nbranch=$(git branch --column)
nbranch=${nbranch////_}
nbranch=${nbranch//-/_}
nbranch=${nbranch##*"* "}
nbranch=${nbranch%% *}
if [[ ! "${nbranch}" = "master" ]]; then
lb config noauto \
--image-name FSFW-Uni-Stick_${FSFW_UNI_STICK_VERSION}__${nbranch}__${DESKTOP}_${DISTRIBUTION} \
"${@}"
fi
# Verlinke allgemeine Hooks
# mkdir config/hooks/live
# mkdir config/hooks/normal
# Teste welches Debian Release eingesetzt wird und passe die config an
if cat /etc/os-release | grep -q jessie ; then
echo "Releas jessie - config wird angepasst "
lb config noauto \
--bootloader grub2 \
"${@}"
ln -s ../binary_local-hooks/1000-grub2-hotfix.hook.binary config/hooks/1000-grub2-hotfix.hook.binary
ln -s ../chroot_local-hooks/1050-install-recent-ipython-and-configure-it.hook.chroot config/hooks/1050-install-recent-ipython-and-configure-it.hook.chroot
fi
if cat /etc/os-release | grep -q stretch ; then
echo "Releas stretch - config wird angepasst "
lb config noauto \
--bootloader grub-pc \
"${@}"
ln -s ../../binary_local-hooks/1000-stretch-boot-hotfix.hook.binary config/hooks/normal/1000-stretch-hotfix.hook.binary
ln -s ../../chroot_local-hooks/1050-install-recent-ipython-and-configure-it.hook.chroot config/hooks/normal/1050-install-recent-ipython-and-configure-it.hook.chroot
fi
| true
|
8f5d5c80da19ebe2e102db33293c629c30fb1357
|
Shell
|
peter0749/BashScript
|
/LeetCode_193_Valid_Phone_Numbers.sh
|
UTF-8
| 164
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
## Valid Number: (xxx) xxx-xxxx or xxx-xxx-xxxx for all x in [0-9]
grep -P '^(\([[:digit:]]{3}\)\ |[[:digit:]]{3}\-)[[:digit:]]{3}\-[[:digit:]]{4}$' $1
| true
|
d32083443c0f7ec23e686e30f4dd8859cf766357
|
Shell
|
RHayabusa87/CYB6004_Scripting_Languages
|
/Portfolio/Week3/Before_Comments.sh
|
UTF-8
| 1,087
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
bash ../Week2/PasswordCheck.sh
# https://stackoverflow.com/questions/13567947/run-bash-commands-from-txt-file
while true; do
if [[ $? -eq 0 ]] ; then
echo -e "\e[31m
1. Create a folder
2. Copy a folder
3. Set a password
4. Calculator
5. Create Week Folders
6. Check Filenames
7. Download a File
8. Exit\e[0m"
echo
read -p "Please press corresponding number and press enter >" BeforeMessage
case "$BeforeMessage" in
"1")
echo
bash ../Week2/FolderCreator.sh
;;
"2")
echo
bash ../Week2/FolderCopier.sh
;;
"3")
echo
bash ../Week2/SetPassword.sh
;;
"4")
echo
bash ../Week3/Placeholder_File.sh
;;
"5")
echo
bash ../Week3/Placeholder_File.sh
;;
"6")
echo
bash ../Week3/Placeholder_File.sh
;;
"7")
bash ../Week3/Placeholder_File.sh
;;
"8")
echo -e "\e[45mSorry to see you go? Whatever, I don't need you anyway...\e[0m"
exit
;;
*)
echo "Please type a number from 1-8 and press enter."
;;
esac
else
echo "You make me sick! Goodbye!"
exit 1
fi
done
exit 0
| true
|
3a9c884fde4ce93c1732c497079c6d4832f9a0fd
|
Shell
|
sensedata1/script-skip
|
/all_functions.sh
|
UTF-8
| 480
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
source some_functions
prompt_confirm() {
while true; do
read -r -n 1 -p "${1:-Continue?} [y/n]: " REPLY
case $REPLY in
[yY]) echo ; return 0 ;;
[nN]) echo ; return 255 ;;
*) printf " \033[31m %s \n\033[0m" "invalid input"
esac
done
}
declare -a arr=(func1 \
func2 \
func3 \
func4)
for i in "${arr[@]}"
do
prompt_confirm "Proceed with $i?"
if [ "$?" -eq 0 ];
then
"$i"
else
echo "$i aborted"
fi
done
| true
|
b5e3c68a86cf8a1f09a75e4ea8a6ad8a48dacfd2
|
Shell
|
ksarkar/dotfiles
|
/.bashrc
|
UTF-8
| 1,730
| 2.78125
| 3
|
[] |
no_license
|
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
# General fzf settings.
export FZF_DEFAULT_OPTS=" \
--inline-info \
--reverse \
--extended \
--height=70% \
--preview='bat --color "always" --style "numbers" {}' \
--preview-window=right:60%:wrap \
--color=fg+:#F8F8F8,bg+:#515559,pointer:#F8F8F8,marker:226 \
--bind=ctrl-e:select-all+accept \
--bind=ctrl-d:half-page-down \
--bind=ctrl-e:half-page-up
--bind=ctrl-t:toggle+down
--bind=ctrl-b:toggle+up
--bind=ctrl-g:select-all+accept \
"
RED=`tput setaf 1`
GREEN=`tput setaf 2`
YELLOW=`tput setaf 3`
BLUE=`tput setaf 4`
MAGENTA=`tput setaf 5`
CYAN=`tput setaf 6`
RESET=`tput sgr0`
# d opens the fuzzy finder and changes to the selected directory.
# Dosen't work inside citc clients.
function d() {
# to exclude hidden files/directories use the command below
# local dir=$(find ${1:-.} -path '*/\.*' -prune -o -type d -print 2> /dev/null | fzf +m --preview-window=:hidden)
local dir=$(find ${1:-.} -path '*/ksark-macbookair\.restore\.2019-09-23-10-37-22*' -prune \
-o -path '*/\.Trash*' -prune \
-o -path '*/Library*' -prune \
-o -path '*/Applications*' -prune \
-o -type d -print 2> /dev/null \
| fzf +m --preview="tree -C -L 2 {}" --preview-window=50%)
echo "${YELLOW}cd $dir${RESET}"
cd "$dir"
}
# e opens the fuzzy finder and then opens the selected file with vim.
function e() {
local f=$(find ${1:-.} -path '*/ksark-macbookair\.restore\.2019-09-23-10-37-22*' -prune \
-o -path '*/\.Trash*' -prune \
-o -path '*/Library*' -prune \
-o -path '*/Applications*' -prune \
-o -type f -print 2> /dev/null | fzf --preview="bat --color 'always' --style 'numbers' {}")
echo "${YELLOW}vim $f${RESET}"
vim "$f"
}
| true
|
1a7a77ea9d7847f893deff19bed4b5ec3475a2aa
|
Shell
|
SYSC-4001-OS/assignment2
|
/testBench.bash
|
UTF-8
| 2,504
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
arg=$1
write=$false
case $arg in
-h | --help)
echo -e "This test bench tests four scripts:\n
fcfsScheduler
fcfsSchedulerIO
priorityQueueScheduler
priorityQueueSchedulerIO\n\n
The -w or --write option specifies that whatever is in testBench.bash (this script) should be written to the test files, those are:\n
1 22 0\n2 11 9\n3 12 12\n4 13 11\n5 14 17\n6 70 40\n10 2 15\nFor fcfsScheduler\n\n
1 22 10 1 0\n2 12 8 2 10\n4 15 5 1 45\n10 100 20 8 80\n12 8 5 1 20\nFor fcfsSchedulerIO\n\n
1 22 4 0\n2 11 6 9\n3 12 3 12\n4 13 12 11\n5 14 8 17\n6 70 10 40\n10 2 45 15\nFor priorityQueueScheduler\n\n
1 40 10 2 1 10\n2 100 20 10 2 0\n3 25 5 2 3 0\n10 50 12 6 10 0\n12 20 10 50 5 0\n8 25 10 12 5 0\nFor priorityQueueSchedulerIO\n
See the README for more information on what these numbers mean\n\n
This can be useful for cleaning up files\nOtherwise, whatever is in files PCBData.fcfs, PCBData.fcfsx, PCBData.dat and PCBData.datx is used."
exit 0 ;;
-w | --write)
echo "*******************************
write enabled, writing to files
*******************************"
write=$true
;;
*)
;;
esac
if $write ; then
NONIOPCBDATA="1 22 0\n2 11 9\n3 12 12\n4 13 11\n5 14 17\n6 70 40\n10 2 15"
echo -e "writing PCB data to PCBdata.fcfs\n"
echo -e $NONIOPCBDATA > PCBdata.fcfs
echo -e "Data written, starting ./fcfsScheduler\n\n"
else
echo -e "Using data from PCBData.fcfs"
fi
./fcfsScheduler #run it, must be executable
if $write ; then
IOPCBDATA="1 22 10 1 0\n2 12 8 2 10\n4 15 5 1 45\n10 100 20 8 80\n12 8 5 1 20"
echo -e "\n\nwriting PCB data to PCBdata.fcfsx\n"
echo -e $IOPCBDATA > PCBdata.fcfsx
echo -e "Data written, starting ./fcfsSchedulerIO\n\n"
else
echo -e "Using data from PCBData.fcfsx, starting ./fcfsSchedulerIO\n\n"
fi
./fcfsSchedulerIO #run the IO one
if $write ; then
NONIOPCBDATA="1 22 4 0\n2 11 6 9\n3 12 3 12\n4 13 12 11\n5 14 8 17\n6 70 10 40\n10 2 45 15"
echo -e "\n\nwriting PCB data to PCBdata.dat\n"
echo -e $NONIOPCBDATA > PCBdata.dat
echo -e "Data written, starting ./priorityQueueScheduler\n\n"
else
echo -e "Using data from PCBData.dat, starting ./priorityQueueScheduler\n\n"
fi
./priorityQueueScheduler
if $write ; then
IOPCBDATA="1 40 10 2 1 10\n2 100 20 10 2 0\n3 25 5 2 3 0\n10 50 12 6 10 0\n12 20 10 50 5 0\n8 25 10 12 5 0"
echo -e "\n\nwriting PCB data to PCBdata.datx\n"
echo -e $IOPCBDATA > PCBdata.fcfsx
echo -e "Data written, starting ./prioritySchedulerIO\n\n"
else
echo -e "Using data from PCBData.datx"
fi
./priorityQueueSchedulerIO
| true
|
f82a424042c2d57481a41f53af4ac30736944e12
|
Shell
|
mikegriffin/holland-prepost
|
/holland_post_rename_dir
|
UTF-8
| 626
| 3.796875
| 4
|
[] |
no_license
|
#! /bin/bash
# Script assumes that "backups-to-keep = 0" and "purge-policy = before-backup"
# Be sure to "chmod +x holland_post_rename_dir"
# example:
# after-backup-command="/usr/local/bin/holland_post_rename_dir ${backupdir}"
[[ "${#}" -eq 1 && -n "${1}" ]] || { echo "Bad arugments. Expecting Holland to invoke with \${backupdir} as the only argument." >&2; exit 1; }
bkdir=${1}
nightly=$(echo "${bkdir}" | sed 's/2.*//')
## Backup this script itself
cp -a "${0}" "${bkdir}" || exit 1
mv ${bkdir} ${nightly}/nightly && echo "Successfully renamed ${bkdir} to ${nightly}/nightly" || { echo "$? failed" >&2; exit 1; }
| true
|
a6aae7af173fa00400363b4098610f9bdd0072ff
|
Shell
|
genus-machina/recruit
|
/bin/make-image
|
UTF-8
| 251
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash -e
DIRECTORY=$(dirname "${BASH_SOURCE[0]}")
IMAGE="${1}"
if [ -z "${IMAGE}" ]
then
echo "An image is required"
exit 1
fi
shift
${DIRECTORY}/download-image "${IMAGE}"
${DIRECTORY}/prepare-image "${IMAGE}" ${DIRECTORY}/../scripts/S*.sh
| true
|
93f3dad7294eea090b00a19c777aeeff5ea1096a
|
Shell
|
naveen230/docker-uhttpd
|
/mac2.sh
|
UTF-8
| 283
| 2.71875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#This script generates MAC address and saves it to output.txt file.
#!/bin/bash
front="$(echo 00:0D:56: 00:0D:60: 00:0D:9D: 00:0D:28: 00:24:D6: | xargs shuf -n1 -e)" #order of vendors - DELL IBM HP CISCO INTEL
end="7B:0C:9F"
NEW_MAC=$front$end
echo $NEW_MAC > "/root/output.txt"
| true
|
26a106e1e9ace331bf1c8e2b9e93b5df0cdaf47d
|
Shell
|
elvisassis/Docker-PostGIS
|
/Nimble_Newt/packages/run.sh
|
UTF-8
| 1,737
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
log(){
echo "$(date +"%Y-%m-%d %T") > $1" >> /log.txt
}
# Generate locale
LANG=${LOCALE}.${ENCODING}
locale-gen ${LANG} > /dev/null
log "Locale ${LOCALE}.${ENCODING} generated"
# Check if command is just "run_default"
if [ "$1" = 'run_default' ]; then
log "Running server"
# Check if data folder is empty. If it is, configure the dataserver
if [ -z "$(ls -A "/data/")" ]; then
log "Initializing datastore..."
# Create datastore
su postgres -c "initdb --encoding=${ENCODING} --locale=${LANG} --lc-collate=${LANG} --lc-monetary=${LANG} --lc-numeric=${LANG} --lc-time=${LANG} -D /data/"
log "Datastore created..."
# Create log folder
mkdir -p /data/logs
chown postgres:postgres /data/logs
log "Log folder created..."
# Erase default configuration and initialize it
su postgres -c "rm /data/pg_hba.conf"
su postgres -c "pg_hba_conf a \"${PG_HBA}\""
# Modify basic configuration
su postgres -c "rm /data/postgresql.conf"
PG_CONF="${PG_CONF}#lc_messages='${LANG}'#lc_monetary='${LANG}'#lc_numeric='${LANG}'#lc_time='${LANG}'"
su postgres -c "postgresql_conf a \"${PG_CONF}\""
# Establish postgres user password and run the database
su postgres -c "pg_ctl -w -D /data/ start"
su postgres -c "psql -h localhost -U postgres -p 5432 -c \"alter role postgres password '${POSTGRES_PASSWD}';\""
log "Configurating and adding postgres user to the database..."
log "Stopping the server..."
# Stop the server
su postgres -c "pg_ctl -w -D /data/ stop"
else
log "Datastore already exists..."
fi
log "Starting the server..."
# Start the database
exec gosu postgres postgres -D /data/
else
exec env "$@"
fi
| true
|
81278cad9c391e5db2e4069736365d8218dbb54d
|
Shell
|
krasin/qemu-cortex-m3-exp
|
/hello/test.sh
|
UTF-8
| 498
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
set -ue
# This script expects qemu-system-arm 1.4.0+ installed
# Also, you will need Sourcery CodeBench Lite Edition toolchain
# http://www.mentor.com/embedded-software/sourcery-tools/sourcery-codebench/editions/lite-edition/
# Don't forget to add toolchain's /bin directory to $PATH
arm-none-eabi-gcc -o hello.elf hello.c -g -mcpu=cortex-m3 -mthumb -T generic-m-hosted.ld
qemu-system-arm -cpu cortex-m3 -nographic -monitor null -serial null -semihosting -kernel hello.elf
echo "OK"
| true
|
6aae34c8e0bf664a279e8c68d515b0de70335029
|
Shell
|
jan-kaspar/analysis_elastic.1380GeV.beta11
|
/simulation/run_multiple_predefined
|
UTF-8
| 3,370
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
datasets=(
"DS1"
)
list=(
"none"
# #"si_th_y-1_th_x-1"
# "si_th_y-1_th_x+0"
# #"si_th_y-1_th_x+1"
# "si_th_y+1_th_x+0"
#
# "de_p+1"
#
# "de_x+1+1"
# "de_x-1+1"
# "de_x+1-1"
#
# "de_y+1+1"
# "de_y+1-1"
# "de_y-1-1"
#
# "tilt+1+1"
# "tilt+1-1"
#
## "L_y+1+1"
## "L_y-1+1"
#
## "L_x+1+1"
## "L_x+1-1"
#
## "v_x+1+1"
## "v_x+1-1"
#
# "opt+1m1"
# "opt+1m2"
# "opt+1m3"
# "opt+1m4"
# "opt+1m5"
# "opt+1m6"
# "opt+1m7"
# "opt+1m8"
)
events="1E6"
useLxBatch="0"
queue="1nd"
bsub_options=""
#----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------
subdir="predefined_scenarios_eb"
#----------------------------------------------------------------------------------------------------
function RunCompile()
{
mkdir -p "$dataset"
cp "mc.cc" "$dataset/mc.cc"
cd "$dataset"
g++ `root-config --libs` -lMinuit `root-config --cflags` -O3 -Wall -Wextra mc.cc -o mc || return 1
cd - > /dev/null
return 0
}
#----------------------------------------------------------------------------------------------------
function MakeJobFile()
{
dir="$(pwd)/$subdir"
job_file="$dir/$scenario.job"
(
echo "export HOME=\"/afs/cern.ch/exp/totem/scratch/jkaspar\""
echo "export RFIO_USE_CASTOR_V2=YES"
echo "export STAGE_HOST=castorpublic"
echo "export STAGE_SVCCLASS=default"
echo "source \"/afs/cern.ch/cms/cmsset_default.sh\""
echo "cd \"/afs/cern.ch/exp/totem/scratch/jkaspar/software/offline/704/src\""
echo "eval \`scram runtime -sh\`"
cwd=`pwd`
echo "work_dir=\"$cwd\""
echo "cd \"\$work_dir\""
#echo ""
#echo "uname -a &> \"$dir/$scenario.debug_log\""
echo ""
echo "./mc \\"
echo " -generator-mode \"weights\"\\"
echo " -model-file \"$dir/../../input_distributions/t-distributions,pp,2760GeV.root\"\\"
echo " -model-object \"full range/petrov (3p) [02]/PH/differential cross-section\"\\"
#echo " -model-file \"$dir/../../input_distributions/fit_DS4_cdf.root\"\\"
#echo " -model-object \"df\"\\"
echo " -scenario-type \"predefined\"\\"
echo " -scenario-label \"$scenario\"\\"
echo " -seed \"1\"\\"
echo " -output \"$dir/$scenario.root\"\\"
echo " -events \"$events\"\\"
echo " -binning \"eb\"\\"
echo " -diff-plots \\"
echo " &> \"$dir/$scenario.log\""
) > "$job_file"
chmod u+x "$job_file"
}
#----------------------------------------------------------------------------------------------------
function RunOneJobLocal()
{
"$job_file"
echo ">> done: $dataset, $scenario"
}
#----------------------------------------------------------------------------------------------------
function RunOneJob()
{
cd "$dataset" || return 1
mkdir -p "$subdir"
echo ">> RunOneJob: $dataset, $scenario"
MakeJobFile
if [ $useLxBatch -eq 0 ]
then
RunOneJobLocal &
else
result=`bsub -R "$bsub_options" -q $queue -o /dev/null -e /dev/null "$job_file"`
echo " $result"
fi
cd - > /dev/null
}
#----------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------
for dataset in "${datasets[@]}"
do
echo "* $dataset"
RunCompile || continue
for scenario in "${list[@]}"
do
RunOneJob
done
done
| true
|
3c0b31e99f93c3c97fe6fe4d192cec42dca45fe5
|
Shell
|
passwordlandia/nagios
|
/syslogfinalserver.sh
|
UTF-8
| 713
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
yum -y install net-tools
sed -i 's/#$ModLoad imudp/$ModLoad imudp/g' /etc/rsyslog.conf
sed -i 's/#$UDPServerRun 514/$UDPServerRun 514/g' /etc/rsyslog.conf
sed -i 's/#$ModLoad imtcp/$ModLoad imtcp/g' /etc/rsyslog.conf
sed -i 's/#$InputTCPServerRun 514/$InputTCPServerRun 514/g' /etc/rsyslog.conf
systemctl restart rsyslog.service
echo "[nti-320]
name=Extra Packages for Centos from NTI-320 7 - $basearch
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch <- example epel repo
# Note, this is putting repodata at packages instead of 7 and our path is a hack around that.
baseurl=http://10.142.0.2/centos/7/extras/x86_64/Packages/
enabled=1
gpgcheck=0
" >> /etc/yum.repos.d/NTI-320.repo
| true
|
cbbf7252054afb0e215cba506959f00d1a374a3b
|
Shell
|
herry13/smartfrog-lang
|
/haskell/Bin/runSF.sh
|
UTF-8
| 2,012
| 4.21875
| 4
|
[
"BSD-3-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
#!/bin/sh
# run an external SF compiler on the specified source file
# put the JSON output in the specified destination file
# munge any error messages and include them in the output file
# (so that they can be more easily compared with messages from hsf)
COMPILER=$1
SRCFILE=$2
DSTFILE=$3
CPATH=$4
usage() {
echo "usage: runSF scala|ocaml source dest path-to-compiler" >&2
exit 2
}
doScala() {
$CPATH -json $SRCFILE 2>$DSTFILE# | grep -v '^(' >$DSTFILE
cat $DSTFILE# >>$DSTFILE || exit 2
if grep -q 'Exception' $DSTFILE ; then
grep Exception <$DSTFILE >$DSTFILE# || exit 2
sed 's/.*Exception: //' <$DSTFILE# >$DSTFILE || exit 2
fi
rm -f $DSTFILE# || exit 2
}
doOCaml() {
$CPATH -json $SRCFILE 2>$DSTFILE# >$DSTFILE
cat $DSTFILE# >>$DSTFILE || exit 2
rm -f $DSTFILE# || exit 2
}
doHP() {
$CPATH -v $SRCFILE 2>&1 \
|grep -v 'at org.smartfrog' \
|grep -v 'Parser - SmartFrog' \
|grep -v '(C) Copyright' \
|grep -v 'SFHOME undefined' \
|grep -v '^\s*at org.smartfrog' \
|grep -v '^\s*$' \
>$DSTFILE
}
noCompiler() {
case $COMPILER in
scala) TYPE=scala ; VAR=SF_SCALA_COMPILER ;;
ocaml) TYPE=ocaml ; VAR=SF_OCAML_COMPILER ;;
hp) TYPE=hp ; VAR=SF_HP_COMPILER ;;
esac
echo "can't find executable \"$1\" for external $TYPE compiler" >&2
echo "try setting $VAR in options.mk or the environment" >&2
exit 2
}
test -z "$SRCFILE" && usage
test -z "$DSTFILE" && usage
test -z "$CPATH" && usage
test ! -x "$CPATH" && noCompiler "$CPATH"
# change to the directory containing the source file
# so that #includes get interpreted in the same way
cd `dirname $SRCFILE`
# then use the relative filename
SRCFILE=`basename $SRCFILE`
# check for excluded tests
# this is necessary because soem compilers fail to terminate
# for some inputs (eg. the HP compiler on recursive includes)
if grep "<<<< not $COMPILER >>>>" $SRCFILE >$DSTFILE ; then
exit 0
fi
# execute the compiler
case $COMPILER in
scala) doScala ;;
ocaml) doOCaml ;;
hp) doHP ;;
*) usage ;;
esac
exit 0
| true
|
171785eebe19d2ae6abe62049457c3d06bf982a4
|
Shell
|
plus3it/misc-tools
|
/bash/getglacierobjects.sh
|
UTF-8
| 2,521
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Really ugly script to perform ad hoc storage-reporting against
# S3 buckets with lifecycle storage policies attached
#
#################################################################
REGION=${REGION:-us-east-1}
PROFILE=${PROFILE:-default}
PAGESZ=${PAGESZ:-10000}
MAXITM=${MAXITM:-100000}
BUCKET=${QUERYBUCKET:-UNDEF}
COUNT=0
GlacierCompute() {
COUNT=$((${COUNT} + 1))
if [ "${1}" = "" ]
then
NXTOKN=$(
aws --profile ${PROFILE} --region ${REGION} s3api list-objects-v2 \
--page-size ${PAGESZ} --max-items ${MAXITM} --bucket ${BUCKET} \
--query 'NextToken' | sed 's/"//g'
)
SIZE=$(
aws --profile ${PROFILE} --region ${REGION} s3api list-objects-v2 \
--page-size ${PAGESZ} --max-items ${MAXITM} --bucket ${BUCKET} \
--query 'Contents[?StorageClass==`GLACIER`].Size' |
sed -e 's/,//' -e '/[][]/d' -e 's/^[ ][ ]//' -e '/^$/d' |
awk '{ sum += $1 } END { print sum }'
)
printf "Chunk #%s: \t%15s\n" "${COUNT}" "${SIZE}"
GlacierCompute "${NXTOKN}" "${SIZE}"
else
SIZE=$(
aws --profile ${PROFILE} --region ${REGION} s3api list-objects-v2 \
--starting-token "${NXTOKN}" \
--page-size ${PAGESZ} --max-items ${MAXITM} --bucket ${BUCKET} \
--query 'Contents[?StorageClass==`GLACIER`].Size' |
sed -e 's/,//' -e '/[][]/d' -e 's/^[ ][ ]//' -e '/^$/d' |
awk '{ sum += $1 } END { print sum }'
)
NXTOKN=$(
aws --profile ${PROFILE} --region ${REGION} s3api list-objects-v2 \
--starting-token "${NXTOKN}" \
--page-size ${PAGESZ} --max-items ${MAXITM} --bucket ${BUCKET} \
--query 'NextToken' | sed 's/"//g'
)
if [ "${SIZE}" = "" ]
then
SIZE=0
fi
TOTSIZE=$( expr $2 + ${SIZE} )
TOTSIZEINK=$( expr ${TOTSIZE} / 1024 )
TOTSIZEINM=$( expr ${TOTSIZEINK} / 1024 )
TOTSIZEING=$( expr ${TOTSIZEINM} / 1024 )
printf "Chunk #%s: \t" "${COUNT}"
printf "%15s + %15s + %15s (%sGiB)\n" "$2" "${SIZE}" "${TOTSIZE}" "${TOTSIZEING}"
if [[ ${SIZE} -eq 0 ]]
then
echo "All bucket-objects queried."
else
GlacierCompute "${NXTOKN}" "${TOTSIZE}"
fi
fi
}
#######
## Main
#######
if [[ ${BUCKET} = UNDEF ]]
then
echo "Please set the QUERYBUCKET environmental to proceed. Exiting."
fi
echo "Chunking at ${MAXITM} objects per query"
GlacierCompute ""
| true
|
3f914568629edcb2fbdb00b36108653c48d244ec
|
Shell
|
xpaulz/jhipster-devbox
|
/scripts/setup.sh
|
UTF-8
| 6,583
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
# update the system
sudo apt-get update
sudo apt-get upgrade
################################################################################
# This is a port of the JHipster Dockerfile,
# see https://github.com/jhipster/jhipster-docker/
################################################################################
export JAVA_VERSION='8'
export JAVA_HOME='/usr/lib/jvm/java-8-oracle'
export MAVEN_VERSION='3.3.9'
export MAVEN_HOME='/usr/share/maven'
export PATH=$PATH:$MAVEN_HOME/bin
export LANGUAGE='en_US.UTF-8'
export LANG='en_US.UTF-8'
export LC_ALL='en_US.UTF-8'
sudo locale-gen en_US.UTF-8
sudo dpkg-reconfigure locales
# we need to update to assure the latest version of the utilities
sudo apt-get update
sudo apt-get install -y git-core
sudo apt-get install --nodeps -y etckeeper
# install utilities
sudo apt-get install -y install vim git sudo zip bzip2 fontconfig curl
# install Java 8
sudo echo 'deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main' >> /etc/apt/sources.list
sudo echo 'deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main' >> /etc/apt/sources.list
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C2518248EEA14886
sudo apt-get update
sudo echo oracle-java-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
sudo apt-get install -y --force-yes oracle-java${JAVA_VERSION}-installer
sudo update-java-alternatives -s java-8-oracle
# install maven
sudo curl -fsSL http://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz | sudo tar xzf - -C /usr/share && sudo mv /usr/share/apache-maven-${MAVEN_VERSION} /usr/share/maven && sudo ln -s /usr/share/maven/bin/mvn /usr/bin/mvn
# install node.js
sudo curl -sL https://deb.nodesource.com/setup_4.x | sudo bash -
sudo apt-get install -y nodejs unzip python g++ build-essential
# update npm
sudo npm install -g npm
# install yeoman grunt bower grunt gulp
sudo npm install -g yo bower grunt-cli gulp
# install JHipster
sudo npm install -g generator-jhipster@2.26.1
################################################################################
# Install the graphical environment
################################################################################
# force encoding
sudo echo 'LANG=en_US.UTF-8' >> /etc/environment
sudo echo 'LANGUAGE=en_US.UTF-8' >> /etc/environment
sudo echo 'LC_ALL=en_US.UTF-8' >> /etc/environment
sudo echo 'LC_CTYPE=en_US.UTF-8' >> /etc/environment
sudo locale-gen en_US en_US.UTF-8
# run GUI as non-privileged user
sudo echo 'allowed_users=anybody' > /etc/X11/Xwrapper.config
# install Ubuntu desktop and VirtualBox guest tools
sudo apt-get install -y ubuntu-desktop virtualbox-guest-dkms virtualbox-guest-utils virtualbox-guest-x11
sudo apt-get install -y gnome-session-flashback
################################################################################
# Install the development tools
################################################################################
# install Spring Tool Suite
export STS_VERSION='3.7.2.RELEASE'
cd /opt && wget http://dist.springsource.com/release/STS/${STS_VERSION}/dist/e4.5/spring-tool-suite-${STS_VERSION}-e4.5.1-linux-gtk-x86_64.tar.gz
cd /opt && tar -zxvf spring-tool-suite-${STS_VERSION}-e4.5.1-linux-gtk-x86_64.tar.gz
cd /opt && rm -f spring-tool-suite-${STS_VERSION}-e4.5.1-linux-gtk-x86_64.tar.gz
sudo chown -R vagrant:vagrant /opt
cd /home/vagrant
# install Chromium Browser
sudo apt-get install -y chromium-browser
# install MySQL with default passwoard as 'vagrant'
export DEBIAN_FRONTEND=noninteractive
#echo 'mysql-server mysql-server/root_password password vagrant' | sudo debconf-set-selections
#echo 'mysql-server mysql-server/root_password_again password vagrant' | sudo debconf-set-selections
#sudo apt-get install -y mysql-server mysql-workbench
# install Postgres with default password as 'vagrant'
sudo apt-get install -y postgresql postgresql-client postgresql-contrib libpq-dev
sudo -u postgres psql -c "CREATE USER admin WITH PASSWORD 'vagrant';"
sudo apt-get install -y
# install Heroku toolbelt
sudo wget -O- https://toolbelt.heroku.com/install-ubuntu.sh | sh
# install Cloud Foundry client
cd /opt && sudo curl -L "https://cli.run.pivotal.io/stable?release=linux64-binary&source=github" | tar -zx
sudo ln -s /opt/cf /usr/bin/cf
cd /home/vagrant
#install Guake
sudo apt-get install -y guake
sudo cp /usr/share/applications/guake.desktop /etc/xdg/autostart/
# install Atom
wget https://github.com/atom/atom/releases/download/v1.3.2/atom-amd64.deb
sudo dpkg -i atom-amd64.deb
rm -f atom-amd64.deb
sudo dpkg --configure -a
# provide m2
mkdir -p /home/vagrant/.m2
git clone https://github.com/jhipster/jhipster-travis-build /home/vagrant/jhipster-travis-build
mv /home/vagrant/jhipster-travis-build/repository /home/vagrant/.m2/
rm -Rf /home/vagrant/jhipster-travis-build
# create shortcuts
sudo mkdir /home/vagrant/Desktop
ln -s /opt/sts-bundle/sts-${STS_VERSION}/STS /home/vagrant/Desktop/STS
sudo chown -R vagrant:vagrant /home/vagrant
# AWS tools
sudo apt-get install -y ec2-api-tools ec2-ami-tools
sudo apt-get install -y iamcli rdscli moncli ascli elasticache aws-cloudformation-cli elbcli
# install other tools
sudo apt-get install -y bash-completion byobu tmux cdargs htop lsof ltrace strace zsh tofrodos ack-grep
sudo apt-get install -y exuberant-ctags
sudo apt-get install -y unattended-upgrades
sudo apt-get install -y pssh clusterssh
# jq is a json formatter
sudo apt-get install -y jq
# install csv2json
sudo apt-get install -y golang-go
go get github.com/jehiah/json2csv
# install csvkit
sudo apt-get install -y python3-csvkit xmlstarlet
sudo npm install -g xml2json-command
# No screensaver on a VM as host will lock things down
gsettings set org.gnome.desktop.screensaver idle-activation-enabled false
sudo apt-get remove -y gnome-screensaver
# jekyll blogging
curl -L https://get.rvm.io | sudo bash -s stable --ruby=2.0.0
sudo gem install jekyll capistrano
# secure the system (later)
# http://www.howtogeek.com/121650/how-to-secure-ssh-with-google-authenticators-two-factor-authentication/
sudo apt-get remove -y libpam-google-authenticator
echo 'auth required pam_google_authenticator.so' >> /etc/pam.d/sshd
echo 'ChallengeResponseAuthentication yes' >> /etc/ssh/sshd_config
sudo service ssh restart
## TODO: Each user still has to run the 'google-authenticator' tool on their own account
# clean the box
sudo apt-get clean
dd if=/dev/zero of=/EMPTY bs=1M > /dev/null 2>&1
rm -f /EMPTY
| true
|
e4926f485c6fa9ac75ba3286e209cfc69667bb93
|
Shell
|
sviande/NuxConfig
|
/zsh/aliases.zsh
|
UTF-8
| 555
| 2.546875
| 3
|
[] |
no_license
|
alias dmesg='dmesg -L'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
alias grep='grep --color=auto'
alias c='clear'
alias l='ls -CF'
alias la='ls -A'
alias ll='ls -la --color'
alias ls='ls --color=auto'
alias vi="vim"
alias svi="sudo -E vim"
alias pong='ping free.fr -i 10'
alias dit='docker exec -i -t'
alias emoj="emoji-fzf preview | fzf --preview 'emoji-fzf get --name {1}' | cut -d \" \" -f 1 | emoji-fzf get"
alias ssh='TERM=xterm-256color \ssh'
watchFiles() { while inotifywait --exclude .swp -e modify -r .; do $@; done; }
| true
|
cffb543363fbac3e8879f1fc4b441469364c166b
|
Shell
|
setaman/diva
|
/core/web-client/entrypoint.sh
|
UTF-8
| 1,231
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
index_html="./index.html"
echo "Setting ENV's..."
for file in ./js/app.*.js*;
do
echo "Processing $file";
echo "VUE_APP_API_GATEWAY_URL: ${VUE_APP_API_GATEWAY_URL}"
sed -i 's|http://localhost:8000|'$VUE_APP_API_GATEWAY_URL'|g' $file
echo "VUE_APP_KEYCLOAK_URL: ${VUE_APP_KEYCLOAK_URL}"
sed -i 's|http://172.17.0.1:7000/auth|'$VUE_APP_KEYCLOAK_URL'|g' $file
echo "VUE_APP_KEYCLOAK_REALM: ${VUE_APP_KEYCLOAK_REALM}"
sed -i 's|diva-kc-realm|'$VUE_APP_KEYCLOAK_REALM'|g' $file
echo "VUE_APP_KEYCLOAK_CLIENT_ID: ${VUE_APP_KEYCLOAK_CLIENT_ID}"
sed -i 's|diva-kc-client|'$VUE_APP_KEYCLOAK_CLIENT_ID'|g' $file
echo "VUE_APP_REGISTER_AVAILABLE: ${VUE_APP_REGISTER_AVAILABLE}"
sed -i 's|register_available|'$VUE_APP_REGISTER_AVAILABLE'|g' $file
echo "Hashing $file"
hash=$(md5sum "$file" | cut -c1-8)
echo "$hash"
new_name="./js/app.$hash.js"
if [ "$file" = "$new_name" ]; then
echo "Nothing changed"
else
echo "Rename $file to $new_name"
mv "$file" "$new_name"
updated_file=${file/./}
updated_new_name=${new_name/./}
echo "Update $index_html"
sed -i 's|'$updated_file'|'$updated_new_name'|g' $index_html
fi
done
echo "Starting Nginx"
nginx -g 'daemon off;'
| true
|
caff925e78fbae853d89a03c92f432d250547603
|
Shell
|
scala/scala
|
/tools/pathResolver
|
UTF-8
| 159
| 2.796875
| 3
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
WHICH=`which scala`
BASE=`dirname $WHICH`
LIBDIR=$BASE/../lib
echo Using ${WHICH}.
echo
java -cp "${LIBDIR}/*" scala.tools.util.PathResolver $*
| true
|
52fcb3cbb8f7506f1e1e8ccb92ac9302e2a17b0e
|
Shell
|
RichardBruskiewich/RTX
|
/code/kg2/extract-semmeddb.sh
|
UTF-8
| 1,889
| 3.78125
| 4
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# extract-semmeddb.sh: download the SemMedDB release and convert it to a tuple-list JSON file
# Copyright 2019 Stephen A. Ramsey <stephen.ramsey@oregonstate.edu>
set -o nounset -o pipefail -o errexit
if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then
echo Usage: "$0 <output_file.json> [test]"
exit 2
fi
# Usage: extract-semmeddb.sh <output_file.json>
echo "================= starting extract-semmeddb.sh ================="
date
config_dir=`dirname "$0"`
source ${config_dir}/master-config.shinc
semmed_output_file=${1:-"${BUILD_DIR}/kg2-semmeddb-tuplelist.json"}
## supply a default value for the build_flag string
build_flag=${2:-""}
semmed_ver=VER42
semmed_year=2020
semmed_dir=${BUILD_DIR}/semmeddb
semmed_output_dir=`dirname "${semmed_output_file}"`
semmed_sql_file=semmed${semmed_ver}_${semmed_year}_R_WHOLEDB.sql
mysql_dbname=semmeddb
mkdir -p ${semmed_dir}
mkdir -p ${semmed_output_dir}
## estimate amount of system ram, in GB
mem_gb=`${CODE_DIR}/get-system-memory-gb.sh`
${s3_cp_cmd} s3://${s3_bucket}/${semmed_sql_file}.gz ${semmed_dir}/
## if a "semmeddb" database already exists, delete it
mysql --defaults-extra-file=${mysql_conf} \
-e "DROP DATABASE IF EXISTS ${mysql_dbname}"
## create the "semmeddb" database
mysql --defaults-extra-file=${mysql_conf} \
-e "CREATE DATABASE IF NOT EXISTS ${mysql_dbname} CHARACTER SET utf8 COLLATE utf8_unicode_ci"
zcat ${semmed_dir}/${semmed_sql_file}.gz | mysql --defaults-extra-file=${mysql_conf} --database=${mysql_dbname}
if [[ "${build_flag}" == "test" || "${build_flag}" == 'alltest' ]]
then
test_arg=" --test"
else
test_arg=""
fi
${VENV_DIR}/bin/python3 ${CODE_DIR}/semmeddb_mysql_to_tuple_list_json.py \
${test_arg} \
${mysql_conf} \
${mysql_dbname} \
${semmed_output_file}
date
echo "================= finished extract-semmeddb.sh ================="
| true
|
af1267fae7bd07180f0be629cb29e7bd184e928e
|
Shell
|
annishoelzel/five
|
/e/easyvdr-installer-4.95.0/installer/build/init
|
UTF-8
| 443
| 2.640625
| 3
|
[] |
no_license
|
#! /bin/bash
# niemals im chroot fs ausfuehren weil dort der Private-Key zum Signieren erforderlich ist...
BASE="/home/ubuntu-builder/FileSystem/var/cache/apt-local-repo"
cd $BASE/
rm Packages Packages.gz Release Release.gpg
dpkg-scanpackages . /dev/null > Packages && gzip -9c Packages > Packages.gz
apt-ftparchive release . > Release
gpg --local-user CC5B84A0 --output Release.gpg -ba Release
#CC5B84A0 easyVDR Key for local-repos ...
| true
|
9164e204b850e5470233d90a3e78d8a8db6e5134
|
Shell
|
connectthefuture/dotfiles-16
|
/scripts/install-stow.sh
|
UTF-8
| 355
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
VERSION='2.2.2'
BUILD_DIR='/tmp/build_stow'
INSTALL_DIR="${HOME}/.local"
FOLDER="stow-${VERSION}"
FILE="${FOLDER}.tar.gz"
URL="https://ftp.gnu.org/gnu/stow/${FILE}"
mkdir -p "${BUILD_DIR}"
mkdir -p "${INSTALL_DIR}"
cd "${BUILD_DIR}"
wget "${URL}"
tar xzf "${FILE}"
cd "${FOLDER}"
./configure --prefix="${INSTALL_DIR}"
make -j
make install
| true
|
e1754a5f11a24d50d34d77e50126d797f1282f7b
|
Shell
|
boriphuth/Ansible-2-for-Configuration-Management
|
/setup/create.sh
|
UTF-8
| 142
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MY_DIR="$(cd "${0%/*}" 2>/dev/null; echo "$PWD")"
PLAYBOOK=${MY_DIR}/playbook.yaml
ansible-playbook -i localhost, ${PLAYBOOK} $*
| true
|
3e90cf666b31901fb023d04e29eb585821673761
|
Shell
|
AshyIsMe/bash-fun
|
/sysloggraph.sh
|
UTF-8
| 1,230
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# Graph a daily count of syslog messages
SCRIPTBASENAME=`basename $0`
TMPFILE=`mktemp /tmp/${SCRIPTBASENAME}.XXXXXX` || error "Error openening temp file"
# Data feed:
# <Count> <Date>
# eg. 42 "Aug 20"
#syslog | cut -c 1-6 | uniq -c > $TMPFILE
#syslog | sed -E '/^(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)/p' | head -n 10
filterOnlyDates() {
cut -c 1-6 | grep -E '^(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ([[:digit:]]+)'
}
#syslog | cut -c 1-6 | grep -E '^(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ([[:digit:]]+)'
syslog | filterOnlyDates | uniq -c | awk -F' ' '{ print $1 " " $2 $3; }' > $TMPFILE
cols=$(tput cols);
rows=$(tput lines); let "rows -= 8";
script="set terminal dumb ${cols} ${rows}; "
#The X labels don't print vertically in terminal unfortunately.
#Uncomment the next line and gnuplot will show the chart in a gui window
#script=""
script="$script set xtics rotate;"
#script="$script plot 'FILE' using 1:xtic(2) with impulses title 'syslog messages per day';"
script="$script plot 'FILE' using 1 with impulses title 'syslog messages per day';"
#script="plot 'data-file.dat'"
script="${script/FILE/$TMPFILE}"
gnuplot -persist -e "${script/FILE/$TMPFILE}"
rm $TMPFILE
| true
|
084eb6ea3bcaa411a9abcc586d8f9377aa7051bb
|
Shell
|
wzc314/gem5-script
|
/run-x86.sh
|
UTF-8
| 1,287
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
rows=4
benchmark=bzip2
maxinsts=20000000
while getopts r:b:o:m: opt
do
case $opt in
r) rows=$OPTARG;;
b) benchmark=$OPTARG;;
o) outdir=$OPTARG;;
m) maxinsts=$OPTARG;;
*) echo "Unknown option: $opt";;
esac
done
if [ -z $outdir ];then #if the length of $outdir is zero
outdir=m5out-$benchmark
fi
num=$[$rows*$rows]
benchmarks=$(awk 'BEGIN{OFS="'$benchmark',";NF='$num+1';print}' | sed 's/.$//')
if [ ! -d $outdir ];then
mkdir $outdir
elif [ -n "$(ls -A $outdir)" ];then
rm $outdir/*
fi
cp $0 $outdir
./build/X86_MESI_Two_Level/gem5.opt \
--outdir=$outdir configs/example/run_spec2006.py \
--benchmarks=$benchmarks \
--cpu-type=detailed --ruby --num-cpus=$num \
--caches --cacheline_size=128 \
--l1i_size=16kB --l1i_assoc=2 \
--l1d_size=16kB --l1d_assoc=2 \
--l2cache --l2_size=128kB --l2_assoc=4 --num-l2caches=$num \
--topology=MeshDirCorners_XY --mesh-rows=$rows \
--vcs-per-vnet=4 \
--num-dirs=4 --mem-size=8GB \
--sys-clock=1GHz --ruby-clock=2GHz --cpu-clock=2GHz \
--output=$outdir --errout=$outdir \
--maxinsts=$maxinsts \
--network=garnet2.0 &> $outdir/runscript.log
IPC=$(sed -n 10p $outdir/stats.txt | tr -s ' ' | cut -d ' ' -f 2)
printf "%-15s %12s %10.4f\n" $(basename $(pwd)) $benchmark $IPC
tput bel
| true
|
6c8207de8f64cd65937f66c15cec13bebf9b70e9
|
Shell
|
igodorogea/neos-vagrant-demo
|
/provisioning/windows.sh
|
UTF-8
| 532
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ ! `which ansible-playbook` ]]; then
sudo apt install -y software-properties-common
sudo apt-add-repository ppa:ansible/ansible -y
sudo apt update
sudo apt upgrade -y
sudo apt autoremove
sudo apt autoclean
sudo apt install -y ansible
fi
# Setup Ansible for Local Use and Run
# cp /vagrant/inventories/dev /etc/ansible/hosts -f
# chmod 666 /etc/ansible/hosts
# sudo ansible-playbook /vagrant/playbook.yml -e hostname=$1 -c local
sudo ansible-playbook /vagrant/playbook.yml -i "localhost," -c local
| true
|
ad9c5abcbfe22c0b112b571318a32a64369e9d18
|
Shell
|
kyrisu/desk
|
/bash_completion.sh
|
UTF-8
| 487
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
_desk() {
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="init list ls . go help version"
case "${prev}" in
.|go|e|edit)
IFS=$'\n' tmp=( $(compgen -W "$(ls ~/.desk/desks/)" -- "${COMP_WORDS[$COMP_CWORD]}" ))
COMPREPLY=( "${tmp[@]%.*}" )
return 0
;;
*)
;;
esac
if [[ ${cur} == -* ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -F _desk desk
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.