blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5534ff2348503c713a7afaa337f2aa8359714bc2
|
Shell
|
vatadepalli/bash
|
/assignments/Assign_1.sh
|
UTF-8
| 2,372
| 3.328125
| 3
|
[] |
no_license
|
# 1
# Create some files and sub directories.
mkdir shell
cd shell
touch one two three
mkdir cat1
mkdir cat2
cd cat1
touch four five six
cd ..
cd cat2C
touch seven eight nine
cd ..
# a) display files row wise, column wise, 1 per row
# rowwise
ls -l
# b) sort the files in ascending, descending order
# c) Create some hidden files and sub directories and display them using “ls -
# a”
# d) Change time stamp of some files and directories using touch command
# and display the files using ls -t ls -rt
# e) Recursive display contents of your home directory.
# f) display all attributes of files and directories using ls -l
# g) display attributes of a directory using ls -ld
# 2.
# Create three directory a b and c and create a.txt,b.txt and c.txt
# in each directory respectively and then copy c directory into the a.
mkdir a
mkdir b
mkdir c
cd a
touch a.txt
cd ..
cd b
touch b.txt
cd ..
cd c
touch c.txt
cd ..
mv c a
# 3.
# Move Directory b to c.
cd ..
mv b ./a/c/
# 4
# Create alias of ls -lh command to your name.
alias dac51='ls -lh'
unalias dac51
# 5
# Change Directory name a to cdac.
mv a cdac
# 6
# Create five files file1.txt , file2.txt ,file3.txt file4.txt and
# file5.txt with some text inside it. Search for 's' character
# inside all the files using grep command.
# Also Use cat to view all file content together.
mkdir 6
cd 6
touch file1.txt file2.txt file3.txt file4.txt file5.txt
echo "Hello one" > file1.txt
echo "Hello two" > file2.txt
echo "Hello three" > file3.txt
echo "Hello four" > file4.txt
echo "Hello five" > file5.txt
grep -R 'ello'
# 7
# Create file.txt using cat and edit that using nano editor.
cat /dev/null > catnull.txt
echo 'mrow' > catnull.txt
nano catnull.txt
# 8
# Create 5 empty files using touch command.
mkdir 8
cd 8
touch file1.txt file2.txt file3.txt file4.txt file5.txt
# 9
# Remove previously created directory a , b and c.
rm cdac -r
# 10
# Explore the following commands with various options, refer man pages for
# further help a) date b) cal c) bc d) echo e) who f) whoami g) logname g) uname h)
# seq i) clear
date # a
# 11 < FOR ADITYA >
# Create a file with some content using vi editor
# a) Display the file using cat command
# b) Display line no.s along with content
# c) Display the file in reverse order using tac
# d) Combine multiple files using cat command
| true
|
ba33bd60fc842f21de217e7369cd3c1462182416
|
Shell
|
jbsparks/katacode
|
/hpc-container-training-scenario/backgroundInstall.sh
|
UTF-8
| 709
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Installing prerequisit container runtime alternates, podman and singularity"
. /etc/os-release
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
apt-get update
apt-get install -y build-essential libssl-dev uuid-dev libgpgme11-dev squashfs-tools libseccomp-dev pkg-config podman
echo "get installed versions of docker and podman..."
docker --version
podman --version
echo "done" >> /opt/.backgroundfinished
| true
|
d2525a90a4cf944af1afa46416d555090855647e
|
Shell
|
UAVWorks/groundsystem-client
|
/build-scripts/install_opencv.sh
|
UTF-8
| 719
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# Install OpenCV
INITIAL_DIR=$(pwd)
cd $HOME
sudo apt-get -y install build-essential
sudo apt-get -y install cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get -y install python-dev python-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev libv4l-dev
echo "UAS Installation: OpenCV dependencies complete"
git clone https://github.com/opencv/opencv.git
mv opencv OpenCV
cd OpenCV/platforms
if [ -d build-desktop ]
then
rm -rf build-desktop
fi
mkdir build-desktop
cd build-desktop
cmake ../.. -DCMAKE_INSTALL_PREFIX=/usr
make -j 5
sudo make install
cd $INITIAL_DIR
echo "UAS Installation: OpenCV install script complete"
| true
|
4739119be5ab07f8caa17f9acae9dc9fd6caeeb7
|
Shell
|
calh/octopus
|
/sample_app/script/ci_build
|
UTF-8
| 359
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dir=`readlink -f $0`
dir="`dirname $dir`/.."
RAILS_ROOT=`readlink -f $dir`
cd $RAILS_ROOT
bundle install --path=$RAILS_ROOT/.bundle
for stage in test development production; do
PGPASSWORD=$POSTGRES_PASSWORD psql -h "$POSTGRES_HOST" -U "$POSTGRES_USER" -c "create database octopus_sample_app_$stage"
done
# Not working...
bundle exec cucumber
| true
|
4d021663c7d8a2e62bdf72e912f68d6cc08b7613
|
Shell
|
EricCousineau-TRI/repro
|
/python/bindings/pybind_clang/tools/bazel
|
UTF-8
| 171
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -eu
workspace_dir=$(cd $(dirname ${BASH_SOURCE})/.. && pwd)
# Ensure that the venv is setup.
${workspace_dir}/setup.sh
# Run bazel.
exec bazel-real "$@"
| true
|
dc3a0876dacd4f4a222914a39db5557a00777e06
|
Shell
|
kontena/ruby-packer
|
/.travis/install_deps.sh
|
UTF-8
| 267
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -uex
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
brew update
brew install squashfs
brew install texinfo
brew install openssl
else
sudo apt-get update
sudo apt-get install -y -q openssl squashfs-tools curl install-info info texinfo texi2html
fi
| true
|
6302c519f08b8223080fb40839d3e02336fdce6e
|
Shell
|
samuelkarp/amazon-ecs-init
|
/ecs-init
|
UTF-8
| 4,252
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the
# "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
#
# description: Starts an Amazon EC2 Container Service Agent container
# config: /etc/ecs/ecs.config
confdir="/etc/ecs"
ecs_config="${confdir}/ecs.config"
ecs_json_config="${confdir}/ecs.config.json"
docker="/usr/bin/docker"
agent_tarball="/var/cache/ecs/ecs-agent.tar"
agent_remote_tarball="https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-latest.tar"
agent_remote_tarball_md5="https://s3.amazonaws.com/amazon-ecs-agent/ecs-agent-latest.tar.md5"
image_name="amazon/amazon-ecs-agent"
logdir="/var/log/ecs"
initlogfile="${logdir}/ecs-init.log"
agentlogfile="ecs-agent.log"
datadir="/var/lib/ecs/data"
printt() {
[ -d ${logdir} ] || mkdir -p ${logdir}
echo "ecs-init [$(date)]: $1" | tee -a "${initlogfile}"
}
throw_error() {
printt "ERROR $1"
exit 1
}
download_tarball() {
printt "Downloading Amazon EC2 Container Service Agent from ${agent_remote_tarball}"
tmp_tarball="$(mktemp)"
tmp_md5="$(mktemp)"
curl "${agent_remote_tarball}" -o "${tmp_tarball}"
curl "${agent_remote_tarball_md5}" -o "${tmp_md5}"
download_md5="$(md5sum ${tmp_tarball} | sed 's/ .*//')"
expected_md5="$(cat ${tmp_md5})"
if [ ! "${download_md5}" = "${expected_md5}" ]; then
rm ${tmp_tarball}
rm ${tmp_md5}
throw_error "Downloaded tarball with md5sum ${download_md5} did not match ${expected_md5}"
fi
mv ${tmp_tarball} ${agent_tarball}
rm ${tmp_md5}
return "$?"
}
check_latest() {
agent_md5="$(md5sum ${agent_tarball} | sed 's/ .*//')"
curl -I "${agent_remote_tarball}" -H "If-None-Match:${agent_md5}" | grep '304 Not Modified'
return "$?"
}
pre_start() {
printt "pre-start"
[ -x ${docker} ] || return 3
if [ -z "$(docker images -q ${image_name})" ]; then
[ -e $agent_tarball ] || download_tarball
printt "Loading Amazon EC2 Container Service Agent from file ${agent_tarball}"
docker load <${agent_tarball}
[ "$?" -eq "0" ] || throw_error "Cannot load Amazon EC2 Container Service Agent ${agent_tarball} into docker"
fi
}
start() {
printt "start"
[ -e ${ecs_config} ] || touch ${ecs_config}
[ -e ${ecs_json_config} ] || touch ${ecs_json_config}
[ -d ${logdir} ] || mkdir -p ${logdir}
[ -d ${datadir} ] || mkdir -p ${datadir}
[ -r ${ecs_config} ] || return 2
[ -x ${docker} ] || return 3
printt "Starting Amazon EC2 Container Service Agent"
existing_agent_container_id="$(docker ps -a | awk '$NF ~ /^ecs-agent$/ {print $1;}')"
if [ -n "${existing_agent_container_id}" ]; then
printt "Removing existing agent container ID: ${existing_agent_container_id}"
docker rm -f ${existing_agent_container_id} 2>&1 | tee -a ${initlogfile}
sleep 1
fi
docker run \
--name ecs-agent \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ${logdir}:/log \
-v ${datadir}:/data \
-v ${confdir}:${confdir} \
-p 127.0.0.1:51678:51678 \
--env-file ${ecs_config} \
-e ECS_LOGFILE=/log/${agentlogfile} \
-e ECS_DATADIR=/data \
-e ECS_AGENT_CONFIG_FILE_PATH=${ecs_json_config} \
${image_name}:latest
printt "Exited $?"
}
pre_stop() {
printt "pre-stop"
[ -x ${docker} ] || return 3
printt "Stopping Amazon EC2 Container Service Agent"
docker stop ecs-agent
}
update_cache() {
printt "update-cache"
if [ ! -r ${agent_tarball} ]; then
printt "Could not find cached Amazon EC2 Container Service Agent"
download_tarball
return "$?"
fi
check_latest
if [ "$?" -ne "0" ]; then
printt "Cached Amazon EC2 Container Service Agent does not match latest at ${agent_remote_tarball}"
download_tarball
fi
return "$?"
}
case "$1" in
pre-start)
pre_start
;;
start)
start
;;
pre-stop)
pre_stop
;;
update-cache)
update_cache
;;
*)
echo "Usage: $0 {pre-start|start|pre-stop|update-cache}"
exit 1
esac
exit $?
| true
|
02ecba61b34b19e2f906e946f8daaa0ee64b4b46
|
Shell
|
hyperion-ml/hyperion
|
/egs/sre19-cmn2/v1/steps_kaldi_xvec/run_xvector_3a.1_adapt.sh
|
UTF-8
| 7,123
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2019 Johns Hopkins University (Author: Jesus Villalba)
# 2017 David Snyder
# 2017 Johns Hopkins University (Author: Daniel Garcia-Romero)
# 2017 Johns Hopkins University (Author: Daniel Povey)
#
# Apache 2.0.
# This script adapts F-TDNN 3a
. ./cmd.sh
set -e
stage=1
train_stage=0
use_gpu=true
remove_egs=false
num_epochs=3
nodes=b0
storage_name=$(date +'%m_%d_%H_%M')
data=data/train
init_nnet_dir=exp/xvector_nnet_x
init_nnet_file=final.raw
nnet_dir=exp/xvector_nnet_x
egs_dir=exp/xvector_nnet_x/egs
lr=0.001
final_lr=0.0001
batch_size=128
num_repeats=16
frames_per_iter=100000000
. ./path.sh
. ./cmd.sh
. ./utils/parse_options.sh
# Now we create the nnet examples using steps_kaldi_xvec/get_egs.sh.
# The argument --num-repeats is related to the number of times a speaker
# repeats per archive. If it seems like you're getting too many archives
# (e.g., more than 200) try increasing the --frames-per-iter option. The
# arguments --min-frames-per-chunk and --max-frames-per-chunk specify the
# minimum and maximum length (in terms of number of frames) of the features
# in the examples.
#
# To make sense of the egs script, it may be necessary to put an "exit 1"
# command immediately after stage 3. Then, inspect
# exp/<your-dir>/egs/temp/ranges.* . The ranges files specify the examples that
# will be created, and which archives they will be stored in. Each line of
# ranges.* has the following form:
# <utt-id> <local-ark-indx> <global-ark-indx> <start-frame> <end-frame> <spk-id>
# For example:
# 100304-f-sre2006-kacg-A 1 2 4079 881 23
# If you're satisfied with the number of archives (e.g., 50-150 archives is
# reasonable) and with the number of examples per speaker (e.g., 1000-5000
# is reasonable) then you can let the script continue to the later stages.
# Otherwise, try increasing or decreasing the --num-repeats option. You might
# need to fiddle with --frames-per-iter. Increasing this value decreases the
# the number of archives and increases the number of examples per archive.
# Decreasing this value increases the number of archives, while decreasing the
# number of examples per archive.
if [ $stage -le 6 ]; then
echo "$0: Getting neural network training egs";
# dump egs.
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $egs_dir/storage ]; then
dir_name=$USER/hyp-data/kaldi-xvector/$storage_name/egs/storage
if [ "$nodes" == "b0" ];then
utils/create_split_dir.pl \
/export/b{04,05,06,07,08,09}/$dir_name $egs_dir/storage
elif [ "$nodes" == "b1" ];then
utils/create_split_dir.pl \
/export/b{14,15,16,17}/$dir_name $egs_dir/storage
elif [ "$nodes" == "fs01" ];then
utils/create_split_dir.pl \
/export/fs01/$dir_name $egs_dir/storage
elif [ "$nodes" == "c0" ];then
utils/create_split_dir.pl \
/export/c{06,07,08,09}/$dir_name $egs_dir/storage
elif [ "$nodes" == "bc" ];then
utils/create_split_dir.pl \
/export/{b07,b08,b10,b15,b16,b17,b19,c04,c05,c08,c09,c10}/$dir_name $egs_dir/storage
fi
fi
steps_kaldi_xvec/get_egs.sh --cmd "$train_cmd" \
--nj 8 \
--stage 0 \
--frames-per-iter $frames_per_iter \
--frames-per-iter-diagnostic 100000 \
--min-frames-per-chunk 300 \
--max-frames-per-chunk 400 \
--num-diagnostic-archives 3 \
--num-repeats $num_repeats \
"$data" $egs_dir
fi
# he first step is to create the example egs for your data. That means running the sre16/v2 recipe (using your data), until you reach stage 4 of local/nnet3/xvector/tuning/run_xvector_1a.sh. Even though you're using your data, you might still want to perform some kind of augmentation (e.g., with MUSAN noises and music, and with reverberation).
# Once your examples are created, you'll need to do the following:
# 1. Look for a file called "pdf2num" in your new egs directory. This is the number of speakers in your egs. Let's call this value num_speakers.
# 2. Create an nnet3 config file (let's call it your_nnet_config), that looks similar to the following. Replace num_speakers with the actual number of speakers in your training egs.
# component name=output.affine type=NaturalGradientAffineComponent input-dim=512 output-dim=num_speakers param-stddev=0.0 bias-stddev=0.0 max-change=1.5
# component-node name=output.affine component=output.affine input=tdnn7.batchnorm
# component name=output.log-softmax type=LogSoftmaxComponent dim=num_speakers
# component-node name=output.log-softmax component=output.log-softmax input=output.affine
# output-node name=output input=output.log-softmax objective=linear
# 3. Run the following command:
# nnet3-copy --nnet-config=your_nnet_config exp/xvector_nnet_1a/final.raw exp/your_experiment_dir/0.raw
# 0.raw should be identical to the pretrained model, but the final layer has been reinitialized, and resized to equal the number of speakers in your training data.
# 4. Now, run local/nnet3/xvector/tuning/run_xvector_1a.sh from --stage 6 with --train-stage 0. If everything went smoothly, this should start training the pretrained DNN further, using your egs.
# eso me lo dijo David
if [ $stage -le 7 ]; then
echo "$0: creating config file to reinit last layer";
num_targets=$(wc -w $egs_dir/pdf2num | awk '{print $1}')
feat_dim=$(cat $egs_dir/info/feat_dim)
mkdir -p $nnet_dir/configs
cp $init_nnet_dir/*_chunk_size $nnet_dir
cp $init_nnet_dir/*.config $nnet_dir
cp -r $init_nnet_dir/configs/* $nnet_dir/configs
cat <<EOF > $nnet_dir/configs/adapt.config
component name=output.affine type=NaturalGradientAffineComponent input-dim=512 output-dim=${num_targets} param-stddev=0.0 bias-stddev=0.0 max-change=1.5
component-node name=output.affine component=output.affine input=tdnn12.batchnorm
component name=output.log-softmax type=LogSoftmaxComponent dim=${num_targets}
component-node name=output.log-softmax component=output.log-softmax input=output.affine
output-node name=output input=output.log-softmax objective=linear
EOF
nnet3-copy --nnet-config=$nnet_dir/configs/adapt.config $init_nnet_dir/$init_nnet_file $nnet_dir/configs/ref.raw
cp $nnet_dir/configs/ref.raw $nnet_dir/0.raw
fi
dropout_schedule='0,0@0.20,0.1@0.50,0'
srand=123
if [ $stage -le 8 ]; then
python2 steps/nnet3/train_raw_dnn.py --stage=$train_stage \
--cmd="$train_cmd" \
--trainer.optimization.proportional-shrink 10 \
--trainer.optimization.momentum=0.5 \
--trainer.optimization.num-jobs-initial=3 \
--trainer.optimization.num-jobs-final=8 \
--trainer.optimization.initial-effective-lrate=$lr \
--trainer.optimization.final-effective-lrate=$final_lr \
--trainer.optimization.minibatch-size=$batch_size \
--trainer.srand=$srand \
--trainer.max-param-change=2 \
--trainer.num-epochs=$num_epochs \
--trainer.dropout-schedule="$dropout_schedule" \
--trainer.shuffle-buffer-size=1000 \
--egs.frames-per-eg=1 \
--egs.dir="$egs_dir" \
--cleanup.remove-egs $remove_egs \
--cleanup.preserve-model-interval=10 \
--use-gpu=true \
--dir=$nnet_dir || exit 1;
fi
exit 0;
| true
|
918196ba8b5613639953a131de0f4badfe3d08d8
|
Shell
|
Ahjema/docker_scripts
|
/jenkins_update.sh
|
UTF-8
| 592
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#this script will update (or install and update after) a jenkins server. It uses a docker volume to retain settings/configs)
#stops current jenkins (if named jenkins)
docker stop jenkins
#for science
sleep 5
#Delete stopped jenkins container
docker rm jenkins
#Delete all 'untagged/dangling' (<none>) images - comment this out if it makes you nervous
docker rmi $(docker images -q -f dangling=true)
#Downloads and starts the new LTS jenkins container
docker run -p 8080:8080 -p 50000:50000 -v jenkins_home:/var/jenkins_home --restart=always --name jenkins jenkins/jenkins:lts
| true
|
3bcde467f3e8f075978e18f59ab717139c671925
|
Shell
|
zandbelt/ping-scripts
|
/pingfed-admin-api/sp-connection-add-verification-cert.sh
|
UTF-8
| 970
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# sample script to add a new verification certificate to a SP Connection
ADM_USER=administrator
ADM_PWD=2Federate
PF_API=https://localhost:9999/pf-admin-api/v1
FLAGS="-k -s -u ${ADM_USER}:${ADM_PWD} --header X-XSRF-Header:\ pingfed"
SPID=$2
#SPID=iIsFjLr8PxZhoE1h8zFEG4Rre3P
PEM=$3
case $1 in
list)
echo ${FLAGS} | xargs curl ${PF_API}/idp/spConnections
;;
add)
DATA=`cat "${PEM}" | sed '$d' | sed '1,1d' | tr -d '\n'`
JSON_DATA=`cat <<JSON
{
"primaryVerificationCert": false,
"secondaryVerificationCert": false,
"x509File": {
"fileData": "${DATA}"
},
"activeVerificationCert": false,
"encryptionCert": false
}
JSON`
echo ${FLAGS} | xargs curl ${PF_API}/idp/spConnections/${SPID} | \
jq ".credentials.certs += [ ${JSON_DATA} ]" | \
curl ${FLAGS} -H "Content-Type: application/json" -X PUT -d @- ${PF_API}/idp/spConnections/${SPID}
;;
*)
echo "Usage: $0 [ list | add <sp-connection-id> <pem-filename>"
;;
esac
| true
|
0115cca1aa2f524a49b7aa0b5c481f3470f7713c
|
Shell
|
GrowSense/Index
|
/view-garden-devices-info.sh
|
UTF-8
| 546
| 3.5
| 4
|
[] |
no_license
|
DEVICES_DIR="devices"
DIR=$PWD
echo ""
echo "Garden devices info..."
echo ""
if [ -d "$DEVICES_DIR" ]; then
for d in $DEVICES_DIR/*; do
DEVICE_TYPE=$(cat $d/type.txt)
DEVICE_NAME=$(cat $d/name.txt)
DEVICE_LABEL=$(cat $d/label.txt)
DEVICE_PORT=$(cat $d/port.txt)
echo "$DEVICE_LABEL"
echo " Name: $DEVICE_NAME"
echo " Type: $DEVICE_TYPE"
echo " Port: $DEVICE_PORT"
echo ""
done
else
echo "No device info found in $DEVICES_DIR"
fi
echo ""
| true
|
0c6e38a4557e560bc258fe7bf20075a6da7bf19b
|
Shell
|
lovebaicai/SmallScript
|
/auto_install_python3.sh
|
UTF-8
| 1,551
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
export PATH
# Check if user is root
if [ $(id -u) != "0" ]; then
echo "Error: You must be root to run this script, please use root to initialization OS."
exit 1
fi
version="3.7.5"
echo "Please enter the version number you need:"
read -p "(Default version: 3.7.5):" version
if [ "$version" = "" ];then
version="3.7.5"
fi
name="Python"
pyfile="$name-$version.tgz"
# Check if user is root
if [ $(id -u) != "0" ]; then
echo "Error: You must be root to run this script, please use root to install"
exit 1
fi
yum -y install wget gcc gcc-c++ make openssl-devel bzip2-devel libffi-devel readline-devel zlib-devel ncurses-devel sqlite-devel autoconf bison automake zlib* fiex* libxml* ncurses-devel libmcrypt* libtool-ltdl-devel*
if [ -s $pyfile ];then
echo -e "\033[40;31m $pyfile [found]\033[40;37m"
else
wget https://www.python.org/ftp/python/$version/$pyfile
fi
tar zxf $pyfile
cd $name-$version
./configure --prefix=/usr/local/python3 --enable-optimizations
make altinstall
ln -s /usr/local/python3/bin/python3.7 /usr/bin/python3
ln -s /usr/local/python3/bin/pip3.7 /usr/bin/pip3
if [ -d /root/.pip ];then
echo -e "\033[40;31m file is [found]\033[40;37m"
else
mkdir ~/.pip
cat > ~/.pip/pip.conf <<EOF
[global]
trusted-host=mirrors.aliyun.com
index-url=https://mirrors.aliyun.com/pypi/simple/
EOF
fi
pip3 install --upgrade pip
echo -e "\nInstalled Python and pip version is ... "
python3 -V && pip3 -V
echo -e "\033[32m \nInstall Successfully! \033[0m"
| true
|
8140490dfae2d741e81acea03118e11578d0dd73
|
Shell
|
sullenel/dotfiles
|
/zsh/functions/splac
|
UTF-8
| 572
| 3.609375
| 4
|
[] |
no_license
|
#
# Split a flac file into multiple files/tracks
# Usage: $0 <cue file> <flac file> <directory to save tracks>
#
if [[ $# -eq 3 ]]; then
local cuefile="$1"
local flacfile="$2"
local dir="$3"
echo "-> Creating '$dir'"
mkdir -p $dir
echo "\n-> Splitting $flacfile"
shnsplit -w -f $cuefile -t '%n_%p_%t' -o 'flac flac' -d $dir -P spin -- $flacfile
cuetag.sh $cuefile $dir/*.flac
echo "\n-> Deleting"
rm -i $cuefile
rm -i $flacfile
echo "\n-> Done"
else
echo "Usage: $0 <cue> <flac> <directory>"
fi
# vim: set ft=zsh:
| true
|
ca7e3c4fafb03ff7e19a345a72806d588b08c8f9
|
Shell
|
Artemigos/Dotfiles
|
/verify_software
|
UTF-8
| 158
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
result=0
for prog in $(cat required_software.txt) ; do
if [ ! "$(which $prog)" ]; then echo "Missing $prog"; result=1; fi
done
exit $result
| true
|
a26b18099f9931f486e56043b144f0aa53931edb
|
Shell
|
spiritsree/docker-torrent-client
|
/app/scripts/health.sh
|
UTF-8
| 257
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# http://www.tldp.org/LDP/abs/html/parameter-substitution.html
health_host=${HEALTH_CHECK_HOST:-www.google.com}
if ! ping -c 1 "${health_host}" > /dev/null 2>&1; then
echo "Network down"
exit 1
else
echo "Network up"
exit 0
fi
| true
|
eb324a225ee1001181f00f450fa7560ac66e68c7
|
Shell
|
eurunuela/EuskalIBUR_dataproc
|
/04.first_level_analysis/07.compute_rsfc.sh
|
UTF-8
| 1,524
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
if_missing_do() {
if [ $1 == 'mkdir' ]
then
if [ ! -d $2 ]
then
mkdir "${@:2}"
fi
elif [ ! -e $3 ]
then
printf "%s is missing, " "$3"
case $1 in
copy ) echo "copying $2"; cp $2 $3 ;;
mask ) echo "binarising $2"; fslmaths $2 -bin $3 ;;
* ) "and you shouldn't see this"; exit ;;
esac
fi
}
replace_and() {
case $1 in
mkdir) if [ -d $2 ]; then rm -rf $2; fi; mkdir $2 ;;
touch) if [ -d $2 ]; then rm -rf $2; fi; touch $2 ;;
esac
}
sub=$1
ses=$2
wdr=${3:-/data}
tmp=${4:-.}
### print input
printline=$( basename -- $0 )
echo "${printline} " "$@"
######################################
######### Script starts here #########
######################################
cwd=$(pwd)
fdir=${wdr}/sub-${sub}/ses-${ses}/func_preproc
flpr=sub-${sub}_ses-${ses}
mask=${wdr}/sub-${sub}/ses-01/reg/sub-${sub}_sbref_brain_mask
tmp=${tmp}/tmp.${flpr}_07cr
replace_and mkdir ${tmp}
cd ${wdr} || exit
if_missing_do mkdir Mennes_replication
if_missing_do mkdir Mennes_replication/fALFF Mennes_replication/RSFA
cd Mennes_replication
for run in $( seq -f %02g 1 4 )
do
input=00.${flpr}_task-rest_run-${run}_optcom_bold_native_processed
3dRSFC -input ${fdir}/${input}.nii.gz -band 0.01 0.1 \
-mask ${mask}.nii.gz -no_rs_out -nodetrend \
-prefix ${tmp}/${input}
3dresample -input ${tmp}/${input}_fALFF+orig -prefix fALFF/${flpr}_task-rest_run-${run}_fALFF.nii.gz
3dresample -input ${tmp}/${input}_RSFA+orig -prefix RSFA/${flpr}_task-rest_run-${run}_RSFA.nii.gz
done
rm -rf ${tmp}
cd ${cwd}
| true
|
87252b582c9f66c6e8983e9ecc3cd7e85bf50a5b
|
Shell
|
simpsonw/dotfiles
|
/bash/install.sh
|
UTF-8
| 376
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
git clone git@github.com:jonmosco/kube-ps1.git
find `pwd` -name *.bash -exec echo "source" {} \; > bash/dotfiles.bashrc
chmod +x $PWD/bash/dotfiles.bashrc
grep -q -F "source $PWD/bash/dotfiles.bashrc" ~/.bashrc || echo "source $PWD/bash/dotfiles.bashrc" >> ~/.bashrc
grep -q -F "export PATH=\$PATH:$PWD/bin/" ~/.bashrc || echo "export PATH=\$PATH:$PWD/bin/" >> ~/.bashrc
| true
|
ac59a67fcbd25d7528d523ef8eb5fe861e8c414d
|
Shell
|
halfhappy/checkout-js-old-structure
|
/fs_watch.sh
|
UTF-8
| 2,103
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
SOURCE_FILE=$1 # $1 absolute source file
SOURCE_PATH=$2 # $2 source path
DEST_PATH=$3 # $3 target path root
ROOT_PATH=$4 # $4 root path of the project
RELATIVE_STENCIL_PATH="../BigCommerceStencil/base/templates"
# Check source file against source path and do nothing if matching
SOURCE_PATH_LEN=${#ROOT_PATH} # Length of the root path (character count)
TARGET_FILE=${DEST_PATH}${SOURCE_FILE:${SOURCE_PATH_LEN}} # Target file path = dest path with the source file with the root path length trimmed
# Remove the comment to debug.
# echo "\n\n---------------------------------------------------------------------------------------------------------------"
# echo "SYNC FILE\n"
# echo "----"
# echo "SOURCE_FILE: ${SOURCE_FILE}"
# echo "----"
# echo "SOURCE_PATH: ${SOURCE_PATH}"
# echo "DEST_PATH: ${DEST_PATH}"
# echo "ROOT_PATH: ${ROOT_PATH}"
# echo "----"
# echo "TARGET_FILE: ${TARGET_FILE}"
# echo "---------------------------------------------------------------------------------------------------------------\n\n"
stencilHotReload() {
if [ -d "${RELATIVE_STENCIL_PATH}" ]
then
echo "\nRunning force update for hot-module reload in BrandCollectiveStencil\n"
touch "$RELATIVE_STENCIL_PATH/.checkout-js-sync.html" &
(
sleep 10 && # Give time to ensure other process picks up on polling rsync
if [ -f "${RELATIVE_STENCIL_PATH}/.checkout-js-sync.html" ]
then
echo "\nRemoving .checkout-js-sync.html file\n" &
rm "$RELATIVE_STENCIL_PATH/.checkout-js-sync.html" &
fi
) &
fi
}
if [ '$SOURCE_PATH' = '$DEST_PATH' ] # If source path matches the dest path don't copy. Note: This should never occur due to the fswatch exclusion of the serving path / brand collective extension path
then
echo "Source path matches target path, ignoring copying file."
else
cp ${SOURCE_FILE} ${TARGET_FILE}
echo "\nCopying..."
echo "From: ${SOURCE_FILE}"
echo "To: ${TARGET_FILE}"
stencilHotReload;
fi
exit
| true
|
43d8fdc6951fc37efde09a53142811832681f640
|
Shell
|
mamewotoko/text2qr
|
/bin/text2qr.sh
|
UTF-8
| 137
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
# $1: text
# $2: output filename
MYDIR=$(realpath $(dirname "$0"))
java -jar $MYDIR/../build/libs/text2qr-all.jar "$1" "$2"
| true
|
2bf9d4689fb5f790af12cea3dceb722cea43b58b
|
Shell
|
fluffybunnies/sire
|
/raptor/chef-config.sh
|
UTF-8
| 491
| 2.546875
| 3
|
[] |
no_license
|
# Pull configs down from waglabs/chef-deploy
#
#
git clone git@github.com:waglabs/chef-deploy.git /tmp/chef-deploy
# @todo: pick based on $CHEF_ENV instead of hardcoding DEV
if [ -f /tmp/chef-deploy/raptor/files/DEV.config.local.sh ]; then
cp -f /tmp/chef-deploy/raptor/files/DEV.config.local.sh "$installDir/config.local.sh"
fi
if [ -f /tmp/chef-deploy/raptor/files/DEV.config.local.json ]; then
cp -f /tmp/chef-deploy/raptor/files/DEV.config.local.json "$installDir/config.local.json"
fi
| true
|
ac2dcfb0f238c00c8fb1769e5d9d0c6b48cd5205
|
Shell
|
tofu-James/KIC-SSH_AutoLogin
|
/KIC_SSH-Darwin.sh
|
UTF-8
| 2,686
| 3.765625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
waitTime=10
VMNAME="$1"
if [$VMNAME == ""]; then
VMNAME="$(vboxmanage list vms |grep -e ""| sed -e 's/"//g' -e 's/ .*//')"
fi
VMNAME_C="$VMNAME"
PROTOCODE="ssh"
HOST_IP="$(vboxmanage showvminfo $VMNAME | grep -e "name = $PROTOCODE" | sed -e 's/.*host ip = \(.*\)\, host port.*/\1/')"
HOST_PORT="$(vboxmanage showvminfo $VMNAME | grep -e "name = $PROTOCODE" | sed -e 's/.*host port = \(.*\)\, guest ip.*/\1/')"
check_status (){
INFO_State="$(vboxmanage showvminfo $VMNAME | grep -e "State:" | tr -s ' ' | sed -e 's/.*State: \(.*\)\ (since.*/\1/')"
echo "Status = $INFO_State"
}
main_menu(){
clear
echo ------------------------------------
echo " KIC VM SSH AUTO LOGIN SYSTEM "
echo ------------------------------------
echo "VM Name = $VMNAME_C"
echo "Protocode = $PROTOCODE"
echo "Host IP = $HOST_IP"
echo "Host Port = $HOST_PORT"
check_status
echo ------------------------------------
echo " [1] linux"
echo " [2] sql"
if [ "$INFO_State" != "powered off" ]; then
echo " [q] turn off $VMNAME_C";
fi
echo ------------------------------------
wait_for_input
}
ssh_login (){
if [ "$INFO_State" != "running" ]; then
expect -c "
spawn VBoxManage startvm $VMNAME --type headless;
expect {
\"Waiting\" { exp_continue; }
\"successfully\" { puts \"Expecting $waitTime seconds to boot\"; } }
";
for (( i=0 ; i<$waitTime ; i++ )); do
sleep 1; echo ".";
done;
echo "";
fi
expect -c "
set timeout 20;
puts \"Waiting for $PROTOCODE connection... Expecting 15 seconds.\";
spawn $PROTOCODE $USER@$HOST_IP -p$HOST_PORT;
expect {
\"assword:\" { send \"$PASSWORD\r\"; interact; } }
"
main_menu;
}
wait_for_input(){
read Input
case "$Input" in
1)
USER="linux";
PASSWORD="penguin";
ssh_login;;
2)
USER="sql";
PASSWORD="sql";
ssh_login;;
"q")
if [ "$INFO_State" != "powered off" ]; then
echo "You are going to push the Acpi Power Button of $VMNAME_C";
echo "Are you sure ? (Y/N) (Default=No)";
read ConfirmInput;
case "$ConfirmInput" in
"q" | "yes"| "Yes" | "Y" | "y")
VBoxManage controlvm $VMNAME acpipowerbutton;
echo "Waiting for $VMNAME_C to shutdown. Expecting $waitTime seconds";
for (( i=0 ; i<$waitTime ; i++ )); do
sleep 1; echo ".";
done;;
*) ;;
esac
else check_status; sleep 1;
fi
main_menu ;;
"r")
check_status;
main_menu ;;
*) ;;
esac
}
main_menu;
echo "\n~~~~~~~~~~~~~~~~~~~~ END ~~~~~~~~~~~~~~~~~~~~\n"
sleep 1;
osascript -e 'tell application "Terminal"
quit
end tell'
exit 1;
| true
|
1ee318fec7c6f5531c9fdc959d87dff7946b6e62
|
Shell
|
eshack94/dotfiles-1
|
/bash_profile
|
UTF-8
| 885
| 3.59375
| 4
|
[] |
no_license
|
. ~/.bashrc
if [[ $OS = Linux && $DISPLAY ]]; then
# Configure the CAPSLOCK key to be ESC when pressed alone or CONTROL
# when held with another key.
setxkbmap -option 'caps:ctrl_modifier'
xcape -e 'Caps_Lock=Escape'
# Use Karabiner Elements to achieve the same on macOS.
elif [[ $OS = Darwin ]]; then
# On Mac we want to use the Homebrew SSH agent (newer and better) in
# preference to the system supplied SSH agent (older and worse).
# Use an existing Homebrew SSH agent if it is running and available.
if [[ -f ~/.ssh-agent-env.sh ]]; then
. ~/.ssh-agent-env.sh
ps $SSH_AGENT_PID | grep ssh-agent > /dev/null
if [[ $? -eq 0 ]]; then
return
fi
fi
# Homebrew SSH agent is not running, must start it and source it.
ssh-agent | grep SSH_ >| ~/.ssh-agent-env.sh
. ~/.ssh-agent-env.sh
fi
| true
|
f1820c41aa0c8e3629e4734892a2da538cefb324
|
Shell
|
cyber-labrat/pos-install-ubuntu-dde
|
/programs_to_install_with_apt.sh
|
UTF-8
| 259
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
APT_PROGRAMAS=(
git
git-flow
deepin-screen-recorder
deepin-calculator
ubuntu-restricted-extras
unrar
gimp
curl
inkscape
vim
)
for program in ${APT_PROGRAMAS[@]}; do
sudo apt install "$program" -y
done
| true
|
b3537cbea2c4fc76536a42c18ea3e23d14573b24
|
Shell
|
stephenmathieson/release.sh
|
/release.sh
|
UTF-8
| 833
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# release
#
# Copyright (c) 2014 Stephen Mathieson
# MIT licensed
#
# tag to release
TAG="$1"
# dependencies
REQUIRED="git-release git-changelog"
# places to increment version number
FILES="package.json component.json"
# must be in a git repo
[ -d ".git" ] || {
echo >&2 "not in a git repo";
exit 1
}
# must specify a tag
[ -z $TAG ] && {
echo >&2 "tag required";
exit 1
}
# check required bins
for e in $REQUIRED; do
command -v $e >/dev/null 2>&1 || {
echo >&2 "$e must be installed.";
exit 1
}
done
# populate changelog
git changelog --tag $TAG || true
# find changelog
CHANGELOG=`ls | egrep 'change|history' -i`
git add $CHANGELOG
# open files to bump version numbers
for file in $FILES; do
test -f "$file" && {
$EDITOR $file;
git add $file
}
done
# actually release
git release $TAG
| true
|
64553d97f244bdeabacffa4c22a9c5770d66b2ea
|
Shell
|
blajos/puppet-openstack.tsm
|
/profiles/p_libvirt/files/create-ceph-secret.sh
|
UTF-8
| 403
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
SECRET=`mktemp`
cat > $SECRET <<EOF
<secret ephemeral='no' private='no'>
<usage type='ceph'>
<name>client.$1 secret</name>
</usage>
</secret>
EOF
#Secret 73f9b93b-7246-4dc6-b56a-f7e2f18c3fab created
uuid=`virsh secret-define --file $SECRET|sed -e 's/^Secret \([0-9a-f-]*\) created/\1/'`
rm $SECRET
virsh secret-set-value --secret $uuid --base64 $2
| true
|
dc66b5fdc5f482a10e3a1341bbe7dfc7500bc054
|
Shell
|
ostelco/ostelco-core
|
/sample-agent/generate-test-scripts.sh
|
UTF-8
| 3,412
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##
##
## This sets up a set of demo scripts that can be used
## in conjunction with the script "apply-yaml.sh" to
## apply changes to the product/segment/offer configuration
## in Prime. It is intended as a vehicle for testing
## the interaction in basic ways, and will most likely
## be removed or replaced when we are more confident that
## the design of the import mechanism is fit for purpose.
## In the mean time, we'll use this mechanism as it provides
## great flexibility and transparency in to what is actually
## applied.
##
if [[ $# -ne 3 ]] ; then
echo "$0 ERROR: requires exactly three parameters"
echo "$0 ERROR: $0 target-dir userid1 userid2"
exit 1
fi
TARGET_DIR=$1
USER_1=$2
USER_2=$3
SEGMENT_1="demoSegment1"
SEGMENT_2="demoSegment2"
SEGMENT_3="demoSegment3"
if [[ ! -d "$TARGET_DIR" ]] ; then
echo "$0 ERROR: Target directory '$TARGET_DIR' does not exist or is not a directory"
exit 1
fi
cat > $TARGET_DIR/init1.yml <<EOF
createOffer:
id: demoOffer1
createProducts:
- sku: 1GB_200NOK
price:
amount: 20000
currency: NOK
properties:
noOfBytes: 1_000_000_000
presentation:
isDefault: true
offerLabel: Top Up
priceLabel: 200 NOK
productLabel: +1GB
createSegments:
- id: $SEGMENT_1
EOF
cat > $TARGET_DIR/init2.yml <<EOF
createOffer:
id: demoOffer2
createProducts:
- sku: 2GB_200NOK
price:
amount: 20000
currency: NOK
properties:
noOfBytes: 2_000_000_000
presentation:
isDefault: true
offerLabel: Top Up
priceLabel: 200 NOK
productLabel: +2GB
createSegments:
- id: $SEGMENT_2
EOF
cat > $TARGET_DIR/init3.yml <<EOF
createOffer:
id: demoOffer3
createProducts:
- sku: 1GB_50NOK
price:
amount: 5000
currency: NOK
properties:
noOfBytes: 1_000_000_000
presentation:
offerDescription: Need more data? Get 1GB for the special price of 50 NOK
isDefault: true
offerLabel: Special offer
priceLabel: 50 NOK
productLabel: +1GB
createSegments:
- id: $SEGMENT_3
EOF
cat > $TARGET_DIR/step1.yml <<EOF
updateSegments:
- id: $SEGMENT_1
subscribers:
- $USER_2
- id: $SEGMENT_2
subscribers:
- $USER_1
- id: $SEGMENT_3
EOF
cat > $TARGET_DIR/step2.yml <<EOF
updateSegments:
- id: $SEGMENT_1
subscribers:
- $USER_2
- id: $SEGMENT_2
subscribers:
- $USER_1
- id: $SEGMENT_3
subscribers:
- $USER_1
EOF
cat > $TARGET_DIR/reset.yml <<EOF
updateSegments:
- id: $SEGMENT_1
subscribers:
- $USER_1
- $USER_2
- id: $SEGMENT_2
- id: $SEGMENT_3
EOF
echo "$0: INFO Successfully created demo scripts in directyory $TARGET_DIR"
echo "$0: INFO To initialize run initialization scripts:"
echo "$0: INFO"
echo "$0: INFO ./apply_yaml.sh offer $TARGET_DIR/init1.yml"
echo "$0: INFO ./apply_yaml.sh offer $TARGET_DIR/init2.yml"
echo "$0: INFO ./apply_yaml.sh offer $TARGET_DIR/init3.yml"
echo "$0: INFO"
echo "$0: INFO During the test, run the test steps:"
echo "$0: INFO"
echo "$0: INFO ./apply_yaml.sh segments $TARGET_DIR/step1.yml"
echo "$0: INFO ./apply_yaml.sh segments $TARGET_DIR/step2.yml"
echo "$0: INFO"
echo "$0: INFO To reset to initial state (e.g. before running a demo/test again):"
echo "$0: INFO"
echo "$0: INFO ./apply_yaml.sh segments $TARGET_DIR/reset.yml"
| true
|
a0a6f086bfc313bb59d39509b198f5746f0903fc
|
Shell
|
coms-team/lineeye_lanio
|
/libLANIO/libLANIO_uninstall.sh
|
UTF-8
| 370
| 3.0625
| 3
|
[] |
no_license
|
#! /bin/sh
libdir=/usr/local/lib
libdir=/usr/local/lib
realname=libLANIO.so.1.0.?
soname=libLANIO.so.1
linkername=libLANIO.so
ldcommand=ldconfig
rm -f $libdir/$realname
rm -f $libdir/$soname
rm -f $libdir/$linkername
if [ `which $ldcommand` ]; then
ldcommand=ldconfig
elif [ -e /sbin/ldconfig ]; then
ldcommand=/sbin/ldconfig
fi
$ldcommand
echo "Uninstalled."
| true
|
c2f42e10d8918e4cba8365b8b3bb2ee8dc37a774
|
Shell
|
cagdass/p4
|
/test.sh
|
UTF-8
| 479
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
cursize=32
blocksize=512
rm block_size_512b_varying_chunksize.txt
touch block_size_512b_varying_chunksize.txt
while [ ${cursize} -lt 65536 ];
do
while [ ${blocksize} -lt $((cursize*1024)) ];
do
for i in `seq 1 5`;
do
echo "Iteration ${i} for chunksize ${cursize} and blocksize ${blocksize}";
./app ${cursize} $((blocksize-10)) >> block_size_512b_varying_chunksize.txt
done
blocksize=$((blocksize*2))
done
blocksize=512
cursize=$((cursize*2))
done
| true
|
9eab1e864291c6a5bb015dff442e62982670b387
|
Shell
|
fa35/pinguinsoft
|
/woche1/installenv.sh
|
UTF-8
| 1,350
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
groupadd pa
groupadd pl
groupadd ewa
groupadd sb
# options used:
# -M: do not create home directory
# -N: Do not create a group with the same name as the user
useradd -c "Müller Christina" -M -N --groups pl,pa pla
echo "pla:osz" | chpasswd
useradd -c "Perterson Peter" -M -N --groups pl plx
echo "plx:osz" | chpasswd
useradd -c "Ratlos Rudi" -M -N --groups ewa,pa ewa
echo "ewa:osz" | chpasswd
useradd -c "Duck Duffy" -M -N ewx
echo "ewx:osz" | chpasswd
useradd -c "Chastur Toni" -M -N --groups sb,pa sb
echo "sb:osz" | chpasswd
mkdir -p /home/michael/temp/projekte/projekt_a/docs
mkdir -p /home/michael/temp/projekte/projekt_a/org
mkdir -p /home/michael/temp/projekte/projekt_a/src
mkdir -p /home/michael/temp/projekte/projekt_a/temp
chown root:pl /home/michael/temp/projekte/
chown pla:pa /home/michael/temp/projekte/projekt_a
chown pla:pa /home/michael/temp/projekte/projekt_a/docs
chown pla:sb /home/michael/temp/projekte/projekt_a/org
chown pla:ewa /home/michael/temp/projekte/projekt_a/src
chown pla:ewa /home/michael/temp/projekte/projekt_a/temp
chmod 751 /home/michael/temp/projekte/
chmod 710 /home/michael/temp/projekte/projekt_a
chmod 750 /home/michael/temp/projekte/projekt_a/docs
chmod 770 /home/michael/temp/projekte/projekt_a/org
chmod 775 /home/michael/temp/projekte/projekt_a/src
chmod 770 /home/michael/temp/projekte/projekt_a/temp
| true
|
c784832918a451777810a50e72d0f9afcbab2360
|
Shell
|
mmmmmrob/Vertere-RDF
|
/Examples/ourairports.sh
|
UTF-8
| 3,300
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
cat /dev/null > ourairports.com/output_data/full.rdf.nt
echo Starting with turtle files in hand-written rdf
for file in $(ls ourairports.com/handwritten/*.rdf.ttl)
do
rapper -i turtle -o ntriples "$file" >> ourairports.com/output_data/full.rdf.nt
done
echo Describing airports
cat ourairports.com/2011-11-09/airports.csv | ../vertere_mapper.php ourairports.com/airports.csv.spec.ttl >> ourairports.com/output_data/full.rdf.nt
echo Describing countries
cat ourairports.com/2011-11-09/countries.csv | ../vertere_mapper.php ourairports.com/countries.csv.spec.ttl >> ourairports.com/output_data/full.rdf.nt
echo Describing regions
cat ourairports.com/2011-11-09/regions.csv | ../vertere_mapper.php ourairports.com/regions.csv.spec.ttl >> ourairports.com/output_data/full.rdf.nt
echo Describing runways
cat ourairports.com/2011-11-09/runways.csv | ../vertere_mapper.php ourairports.com/runways.csv.spec.ttl >> ourairports.com/output_data/full.rdf.nt
echo Sorting and de-duping descriptions
sort -u ourairports.com/output_data/full.rdf.nt > ourairports.com/output_data/ourairports.rdf.nt
rm ourairports.com/output_data/full.rdf.nt
#echo De-duping and extending descriptions
#cat ourairports.com/output_data/sorted.rdf.nt | ../vertere_reducer.php > ourairports.com/output_data/ourairports.rdf.nt
echo Listing properties used
cat ourairports.com/output_data/ourairports.rdf.nt | awk '{ print $2 }' | sort -u > ourairports.com/output_data/ourairports.properties_used.txt
echo Listing classes used
cat ourairports.com/output_data/ourairports.rdf.nt | awk '{ print $2 " " $3 }' | grep "^<http://www.w3.org/1999/02/22-rdf-syntax-ns#type> " | awk '{ print $2 }' | sort -u > ourairports.com/output_data/ourairports.classes_used.txt
echo Converting descriptions to turtle
rapper -i ntriples -o turtle -f'xmlns:conv="http://example.com/schema/data_conversion#"' -f'xmlns:bibo="http://example.com/bibo#"' -f'xmlns:fly="http://data.kasabi.com/dataset/airports/schema/"' -f'xmlns:foaf="http://xmlns.com/foaf/0.1/"' -f'xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#"' -f'xmlns:georss="http://www.georss.org/georss/"' -f'xmlns:owl="http://www.w3.org/2002/07/owl#"' -f'xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"' -f'xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"' -f'xmlns:spacerel="http://data.ordnancesurvey.co.uk/ontology/spatialrelations/"' -f'xmlns:xsd="http://www.w3.org/2001/XMLSchema#"' ourairports.com/output_data/ourairports.rdf.nt > ourairports.com/output_data/ourairports.rdf.ttl
echo Converting descriptions to rdfxml
rapper -i ntriples -o rdfxml-abbrev -f'xmlns:conv="http://example.com/schema/data_conversion#"' -f'xmlns:bibo="http://example.com/bibo#"' -f'xmlns:fly="http://data.kasabi.com/dataset/airports/schema/"' -f'xmlns:foaf="http://xmlns.com/foaf/0.1/"' -f'xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#"' -f'xmlns:georss="http://www.georss.org/georss/"' -f'xmlns:owl="http://www.w3.org/2002/07/owl#"' -f'xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"' -f'xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"' -f'xmlns:spacerel="http://data.ordnancesurvey.co.uk/ontology/spatialrelations/"' -f'xmlns:xsd="http://www.w3.org/2001/XMLSchema#"' ourairports.com/output_data/ourairports.rdf.nt > ourairports.com/output_data/ourairports.rdf.xml
| true
|
2b4354f381769cde6455f4c2c7a25ae0f25d74c3
|
Shell
|
instedd/cdx-sync-sshd
|
/files/sshd.sh
|
UTF-8
| 567
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
if [ ! -f /etc/ssh/keys/ssh_host_rsa_key ]; then
ssh-keygen -t rsa -N "" -f /etc/ssh/keys/ssh_host_rsa_key
fi
EXISTING_SYNC_UID=`id -u cdx-sync`
if [ $? -eq 0 ]; then
if [ $EXISTING_SYNC_UID -ne $SYNC_UID ]; then
echo "The cdx-sync user is already created but the UID is different from the one specified: $EXISTING_SYNC_UID"
exit 1
fi
else
adduser --uid $SYNC_UID --disabled-password --gecos "" cdx-sync
mkdir -p /home/cdx-sync/tmp/sync /home/cdx-sync/.ssh
chown cdx-sync:cdx-sync -R /home/cdx-sync
fi
exec /usr/sbin/sshd -D -e 2>&1
| true
|
85b81cb989e900fcaec2df95cd849caef05bee9d
|
Shell
|
5l1v3r1/Blackbox-3
|
/Desktop/Linux Mint Custom/Caja Plugins /70 Nautilus scripts/Internet/YouTube-DL(cc)
|
UTF-8
| 1,170
| 3.609375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
video()
{
echo
var=`zenity --entry --title="YouTubeDL" --text="Ingrese el link del video youtube que va a descargar:"`
{
if [ $? = 0 ] ; then
youtube-dl -t "$var" | zenity --progress --pulsate --auto-close --auto-kill --title="YouTubeDL" --text="Descargando el archivo $var !"
fi
}
}
subtitulo()
{
echo
var=`zenity --entry --title="YouTubeDL(cc)" --text="Ingrese el link del video youtube que va a descargar:"`
{
if [ $? = 0 ] ; then
youtube-dl -t --write-srt "$var" | zenity --progress --pulsate --auto-close --auto-kill --title="YouTubeDL(cc)" --text="Descargando el archivo $var subtitulado !"
fi
}
}
dialogo_principal()
{
# video_solo o con_subtitulo
opcion=$( zenity --list --radiolist --title="Video solo o con subtitulo" \
--column='Pick' --column='Opción' \
'FALSE' "con_subtitulo" \
'FALSE' "video" )
}
inicio()
{
dialogo_principal
if [ ${opcion} = 'video' ]; then
video
elif [ ${opcion} = 'con_subtitulo' ]; then
subtitulo
else
echo "Error en selección."
exit 1
fi
}
#Llamado a función principal
inicio
zenity --info --title="YouTubeDL" --text="Descarga terminada! $var Script YouTubeDL"
exit 0
| true
|
7f4567758710e8019e7cb371bfd173af7bdc11c6
|
Shell
|
sjfloat/stack-container
|
/genDev
|
UTF-8
| 435
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
user=$1
shell=$2
if [ -z $user ]; then
if [ ! -z $DOCKER_USER ]; then
user=$DOCKER_USER
else
user=`whoami`
fi
fi
if [ -z $shell ]; then
if [ ! -z $DOCKER_SHELL ]; then
shell=$DOCKER_SHELL
elif [ ! -z $SHELL ]; then
shell=$SHELL
else
shell=/bin/sh
fi
fi
perl -pe "s/{{user}}/$user/g; s#{{shell}}#$shell#g" < gen-dev/Dockerfile.tmpl > gen-dev/Dockerfile
| true
|
1df785228f18a713bb3c8400b1bd163851f6afd8
|
Shell
|
Geovation/plastic-patrol
|
/scripts/backup.sh
|
UTF-8
| 745
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# TODO: do it cron
# ..
export PROJECT=plastic-patrol-fd3b3
export FOLDER=`date +%Y-%m-%d`
# save firebase
gcloud beta firestore export gs://plastic-patrol-bks/$FOLDER/firestore --async --project $PROJECT
# save users
mkdir -p bks/users
firebase auth:export bks/users/users.json --project $PROJECT
gsutil -m rsync -r bks/users gs://plastic-patrol-bks/$FOLDER/users
# save storage
gsutil -m rsync -r gs://plastic-patrol-fd3b3.appspot.com gs://plastic-patrol-bks/$FOLDER/storage
# delete old one. Leave the newyer 6
TO_DELETE=`(gsutil ls gs://plastic-patrol-bks/ | awk 'n>=6 { print a[n%6] } { a[n%6]=$0; n=n+1 }')`
echo $TO_DELETE | xargs gsutil -m rm -r
# echo $TO_DELETE | gsutil -m rm -r -I
gsutil ls gs://plastic-patrol-bks/
| true
|
d3e3aae7e46409c1a0a79b34e4732685b804afed
|
Shell
|
random220/gcode
|
/howto/pdf-bookmark/indexmaker-shell.txt
|
UTF-8
| 1,283
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
cat <<'EOF'
EOF
function make_index() {
cat <<'EOF' >index.info
--COOKED-INDEX-HERE--
EOF
/usr/bin/gs -sDEVICE=pdfwrite -q -dBATCH -dNOPAUSE -sOutputFile=out.pdf -dPDFSETTINGS=/prepress index.info -f ~/in.pdf
}
function repackit() {
# -----------------------------------------------------------------------------
# How to repack
# -----------------------------------------------------------------------------
cat >a.txt <<'_EOF'
#
# -----------------------------------------------------------------------------
# How to generate bookmarks for this pdf book
# -----------------------------------------------------------------------------
# cat a.sh|gzip - -9|openssl enc - base64 >a.txt
cat <<EOF | base64 -d|gunzip - >a.sh
_EOF
uname=$(uname)
if [[ $uname == 'Darwin' ]]; then
cat $0|gzip -9 -|base64 -b 120 >>a.txt
elif [[ $uname == 'Linux' ]]; then
cat $0|gzip -9 -|base64 -w 120 >>a.txt
else
echo 'ERROR: Not Linux or Darwin'
exit 1
fi
cat >>a.txt <<'_EOF'
EOF
chmod a+x a.sh
# -----------------------------------------------------------------------------
#
_EOF
}
if [[ $1 == idx ]]; then
make_index
elif [[ $1 == repack ]]; then
repackit
else
echo "$0 idx|repack"
fi
| true
|
16072da7b271a85f89a327a3c8f800c37dfc0901
|
Shell
|
chrichards/Learning-Purposes
|
/macOS/Update OS (Old)/updateOS.sh
|
UTF-8
| 13,103
| 3.296875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
##############################################
# VARIABLES
##############################################
# If using Jamf, start on $4
if [[ "$1" =~ "/" ]]; then
for (( i=1; i<4; i++ )); do
shift 1
done
fi
Parameter=$@
if [ -n $Parameter ]; then
Organization=$(echo "$Parameter" | awk '{print tolower($0)}')
else
Organization="stevecorp"
fi
# Where all scripts will be stored
Store="/Library/Application Support/$Organization"
# The first alert that lets the user know
# updates are going to be installed; asks when to install/reboot
LaunchAgentName1="com.$Organization.alert"
LaunchAgentPath1="/Library/LaunchAgents/$LaunchAgentName1.plist"
LaunchAgentScriptPath1="$Store/alertUser.sh"
# Launch agent that is activated if updates
# did not install after reboot
LaunchAgentName2="com.$Organization.remediation"
LaunchAgentPath2="/Library/LaunchAgents/$LaunchAgentName2.plist"
LaunchAgentScriptPath2="$Store/remediation.sh"
# Launch Agent that makes sure the user presses the update button
LaunchAgentName3="com.$Organization.watcher"
LaunchAgentPath3="/Library/LaunchAgents/$LaunchAgentName3.plist"
LaunchAgentScriptPath3="$Store/watcher.sh"
# Verify updates were installed on reboot
LaunchDaemonName="com.$Organization.removealert"
LaunchDaemonPath="/Library/LaunchDaemons/$LaunchDaemonName.plist"
LaunchDaemonScriptPath="$Store/removeAlert.sh"
# Used in conjunction with LaunchAgent1
PythonTimerScriptPath="$Store/timer.py"
PythonChoiceScriptPath="$Store/choice.py"
# Update settings
SoftwareUpdatePlist="/Library/Preferences/com.apple.SoftwareUpdate.plist"
PreferredConfigs=(
"AutomaticCheckEnabled"
"AutomaticDownload"
"ConfigDataInstall"
"CriticalUpdateInstall"
"AutomaticallyInstallMacOSUpdates"
)
UpdateLog=/tmp/update$(date +%F).log
# Finally, how long the timer should run for (in seconds)
TimerCount=300
##############################################
# SCRIPTS AND PLISTS
##############################################
IFS='' read -r -d '' PythonTimerScript <<EOF
#!/usr/bin/python
import time
import tkinter as tk
def countdown(t):
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
label1['text'] = timer
if t > 0:
parent.after(1000, countdown, t-1)
else:
parent.quit()
t = $TimerCount
parent = tk.Tk()
parent.geometry("250x100")
parent.title("Restart Required")
label1 = tk.Label(parent, font = "Impact 48 bold")
label2 = tk.Label(parent, text = "Your machine is about to restart.")
label2.pack()
label1.pack()
countdown(int(t))
parent.eval('tk::PlaceWindow . center')
parent.attributes("-topmost", True)
parent.resizable(False, False)
parent.overrideredirect(1)
parent.mainloop()
EOF
IFS='' read -r -d '' PythonChoiceScript <<EOF
#!/usr/bin/python
from tkinter import *
options = [
"Install updates and restart now",
"Snooze for 1 hour",
"Snooze for 3 hours",
"Snooze for 8 hours"
]
def select():
parent.quit()
parent = Tk()
parent.geometry("400x100")
parent.title("macOS Updates")
choice = StringVar(parent)
choice.set(options[0])
labelText = "Your system needs to install important updates which will require a restart. Please select from the following options:"
label1 = Label(parent, text=labelText, font="Vedana 12", wraplength=390, justify=CENTER)
label1.pack()
menu = OptionMenu(parent, choice, *options)
menu.pack()
button = Button(parent, text="Select", command=select)
button.pack()
parent.eval('tk::PlaceWindow . center')
parent.attributes("-topmost", True)
parent.resizable(False, False)
parent.overrideredirect(1)
parent.mainloop()
print choice.get()
EOF
IFS='' read -r -d '' LaunchDaemon <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>$LaunchDaemonName</string>
<key>ProgramArguments</key>
<array>
<string>/bin/sh</string>
<string>$LaunchDaemonScriptPath</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
EOF
IFS='' read -r -d '' LaunchDaemonScript <<EOF
#!/bin/bash
Month_to_Number () {
case \$1 in
'Jan') Month=1 ;;
'Feb') Month=2 ;;
'Mar') Month=3 ;;
'Apr') Month=4 ;;
'May') Month=5 ;;
'Jun') Month=6 ;;
'Jul') Month=7 ;;
'Aug') Month=8 ;;
'Sep') Month=9 ;;
'Oct') Month=10 ;;
'Nov') Month=11 ;;
'Dec') Month=12 ;;
*) Month=0 ;;
esac
echo \$Month
}
Date_to_Number () {
Month=\$(Month_to_Number \$(echo \$1 | awk '{print \$1}'))
Day=\$(echo \$1 | awk '{print \$2}')
Time=\$(echo \$1 | awk '{print \$3}' | sed 's/://g')
if [[ \$(echo \$1 | awk -F: '{print NF-1}') == 1 ]]; then
Time="\${Time}00"
fi
echo "\$Month\$Day\$Time"
}
LastReboot=\$(last reboot | head -1 | grep -oE "[aA-zZ]{3} [0-9]{2} [0-9]{2}:[0-9]{2}")
CurrentTime=\$(date | awk '{print \$2" "\$3" "\$4}')
CompareA=\$((\$(Date_to_Number "\$LastReboot")+1000000)) # Add a day
CompareB=\$(Date_to_Number "\$CurrentTime")
if (( CompareA > CompareB )); then
Rebooted='true'
else
Rebooted='false'
fi
if [[ "\$Rebooted" == true ]]; then
CurrentDate=\$(date +%Y%m%d)
LastUpdate=\$(softwareupdate --history | grep -oE "[0-9]{2}/[0-9]{2}/[0-9]{4}" | tail -1)
LastUpdateFormatted=\$(date -j -f %m/%d/%Y -v+3d \$LastUpdate +%Y%m%d)
if (( LastUpdateFormatted <= CurrentDate )); then
RemediationNeeded="true"
fi
/bin/launchctl unload "$LaunchAgentPath1"
/bin/rm -f "$LaunchAgentPath1"
/bin/rm -f "$LaunchDaemonPath"
elif [[ "\$Rebooted" == false ]]; then
exit 0
else
exit 1
fi
if [[ "\$RemediationNeeded" != true ]]; then
/bin/rm -f "$LaunchAgentPath2"
/bin/rm -f "$LaunchAgentPath3"
/bin/rm -rf "$Store"
exit 0
fi
/usr/bin/defaults write "$LaunchAgentPath2" "StartInterval" 1800
/usr/bin/defaults write "$LaunchAgentPath2" "RunAtLoad" -bool true
/usr/bin/defaults write "$LaunchAgentPath3" "RunAtLoad" -bool true
/bin/chmod 644 "$LaunchAgentPath2"
/bin/chmod 644 "$LaunchAgentPath3"
/bin/launchctl load "$LaunchAgentPath2"
/bin/launchctl load "$LaunchAgentPath3"
EOF
IFS='' read -r -d '' LaunchAgent1 <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>$LaunchAgentName1</string>
<key>ProgramArguments</key>
<array>
<string>/bin/sh</string>
<string>$LaunchAgentScriptPath1</string>
</array>
<key>RunAtLoad</key>
<false/>
</dict>
</plist>
EOF
IFS='' read -r -d '' LaunchAgent2 <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>$LaunchAgentName2</string>
<key>ProgramArguments</key>
<array>
<string>/bin/sh</string>
<string>$LaunchAgentScriptPath2</string>
</array>
<key>RunAtLoad</key>
<false/>
</dict>
</plist>
EOF
IFS='' read -r -d '' LaunchAgent3 <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>$LaunchAgentName3</string>
<key>ProgramArguments</key>
<array>
<string>/bin/sh</string>
<string>$LaunchAgentScriptPath3</string>
</array>
<key>RunAtLoad</key>
<false/>
</dict>
</plist>
EOF
IFS='' read -r -d '' LaunchAgentScript1 <<EOF
#!/bin/bash
"$PythonTimerScriptPath" &> /dev/null &
for (( i=0; i<$TimerCount; i++ )); do
sleep 1
done
/usr/sbin/softwareupdate --install --all --restart
EOF
IFS='' read -r -d '' LaunchAgentScript2 <<EOF
#!/bin/bash
notification="Your system was unable to update on its own and requires your attention. If you choose not to update now, you will be reminded every 30 minutes."
title="System Updates Required"
icon="/System/Library/PreferencePanes/SoftwareUpdate.prefPane/Contents/Resources/SoftwareUpdate.icns"
choice=\$(/usr/bin/osascript -e 'display dialog "'"\$notification"'" with title "'"\$title"'" with icon {"'"\$icon"'"} with text buttons {"Now","Later"}')
if [[ "\$choice" =~ "Now" ]]; then
open -b com.apple.systempreferences /System/Library/PreferencePanes/SoftwareUpdate.prefPane
fi
exit 0
EOF
IFS='' read -r -d '' LaunchAgentScript3 <<EOF
#!/bin/bash
check=\$(date +%F)
while :
do
if [[ \$(cat "/var/log/install.log" | grep \$check".*SUAppStoreUpdateController: authorize") ]]; then
break
fi
sleep 1
done
/bin/launchctl unload "$LaunchAgentPath2"
/bin/launchctl unload "$LaunchAgentPath3"
/bin/rm -f "$LaunchAgentPath2"
/bin/rm -f "$LaunchAgentPath3"
/bin/rm -rf "$Store"
EOF
##############################################
# MAIN AREA
##############################################
# Make sure the scripts have somewhere to live
if [ ! -d "$Store" ]; then
mkdir "$Store"
fi
if [ ! -f "$PythonChoiceScriptPath" ]; then
echo "$PythonChoiceScript" > "$PythonChoiceScriptPath"
/usr/sbin/chown root:wheel "$PythonChoiceScriptPath"
/bin/chmod 755 "$PythonChoiceScriptPath"
fi
if [ ! -f "$PythonTimerScriptPath" ]; then
echo "$PythonTimerScript" > "$PythonTimerScriptPath"
/usr/sbin/chown root:wheel "$PythonTimerScriptPath"
/bin/chmod 755 "$PythonTimerScriptPath"
fi
# Drop the LaunchAgents in
if [ ! -f "$LaunchAgentPath1" ]; then
echo "$LaunchAgent1" > "$LaunchAgentPath1"
/usr/sbin/chown root:wheel "$LaunchAgentPath1"
/bin/chmod 644 "$LaunchAgentPath1"
fi
if [ ! -f "$LaunchAgentPath2" ]; then
echo "$LaunchAgent2" > "$LaunchAgentPath2"
/usr/sbin/chown root:wheel "$LaunchAgentPath2"
/bin/chmod 644 "$LaunchAgentPath2"
fi
if [ ! -f "$LaunchAgentPath3" ]; then
echo "$LaunchAgent3" > "$LaunchAgentPath3"
/usr/sbin/chown root:wheel "$LaunchAgentPath3"
/bin/chmod 644 "$LaunchAgentPath3"
fi
# Now add all the LaunchAgent Scripts to the mix
if [ ! -f "$LaunchAgentScriptPath1" ]; then
echo "$LaunchAgentScript1" > "$LaunchAgentScriptPath1"
/usr/sbin/chown root:wheel "$LaunchAgentScriptPath1"
/bin/chmod 755 "$LaunchAgentScriptPath1"
fi
if [ ! -f "$LaunchAgentScriptPath2" ]; then
echo "$LaunchAgentScript2" > "$LaunchAgentScriptPath2"
/usr/sbin/chown root:wheel "$LaunchAgentScriptPath2"
/bin/chmod 755 "$LaunchAgentScriptPath2"
fi
if [ ! -f "$LaunchAgentScriptPath3" ]; then
echo "$LaunchAgentScript3" > "$LaunchAgentScriptPath3"
/usr/sbin/chown root:wheel "$LaunchAgentScriptPath3"
/bin/chmod 755 "$LaunchAgentScriptPath3"
fi
# Drop the update checking daemon in place
if [ ! -f "$LaunchDaemonPath" ]; then
echo "$LaunchDaemon" > "$LaunchDaemonPath"
/usr/sbin/chown root:wheel "$LaunchDaemonPath"
/bin/chmod 644 "$LaunchDaemonPath"
fi
# Make sure the daemon has something to run
if [ ! -f "$LaunchDaemonScriptPath" ]; then
echo "$LaunchDaemonScript" > "$LaunchDaemonScriptPath"
/usr/sbin/chown root:wheel "$LaunchDaemonScriptPath"
/bin/chmod 755 "$LaunchDaemonScriptPath"
fi
# Check to make sure automatic updating is setup
for Config in "${PreferredConfigs[@]}"; do
Check=$(/usr/libexec/PlistBuddy -c "Print :$Config" $SoftwareUpdatePlist 2>&1)
if [[ $Check =~ "Does Not Exist" || $Check != 'true' ]]; then
echo "Remediating '$Config : $Check'"
/usr/bin/defaults write "$SoftwareUpdatePlist" $Config -bool true
fi
done
# Run the updater
/usr/sbin/softwareupdate --install --all &> $UpdateLog
# Check the output
if [[ $(cat $UpdateLog | grep "No updates are available.") ]]; then
echo "System is up-to-date."
/bin/rm -f "$LaunchDaemonPath"
/bin/rm -f "$LaunchAgentPath1"
/bin/rm -f "$LaunchAgentPath2"
/bin/rm -f "$LaunchAgentPath3"
/bin/rm -rf "$Store"
exit 0
elif [[ $(cat $UpdateLog | grep "Please restart immediately.") ]]; then
echo "System needs to update."
elif [[ $(cat $UpdateLog | grep "Done.") ]]; then
echo "System updated - no need for restart."
/bin/rm -f "$LaunchDaemonPath"
/bin/rm -f "$LaunchAgentPath1"
/bin/rm -f "$LaunchAgentPath2"
/bin/rm -f "$LaunchAgentPath3"
/bin/rm -rf "$Store"
exit 0
else
echo "Unable to determine system state."
exit 1
fi
# Is there a user logged in?
Username=$(scutil <<< "show State:/Users/ConsoleUser" | awk '/Name :/ && ! /loginwindow/ {print $3}')
if [ -z $Username ]; then
echo "No users logged in. Running updates."
/usr/sbin/softwareupdate --install --all --restart &
exit 0
fi
# Prompt user and adjust from there
# Times will have -10 minutes to account for timer
UserChoice=$(/usr/bin/python "$PythonChoiceScriptPath")
case "$UserChoice" in
'Install updates and restart now')
StartTime=0 ;;
'Snooze for 1 hour')
StartTime=1 ;;
'Snooze for 3 hours')
StartTime=3 ;;
'Snooze for 8 hours')
StartTime=8 ;;
esac
echo "User has chosen '$UserChoice'"
if [ "$StartTime" == 0 ]; then
/usr/sbin/softwareupdate --install --all --restart &
exit 0
else
CurrentHour=$(date +%H)
StartHour=$((CurrentHour+StartTime))
StartMinute=$(date +%M)
if (( StartHour >= 24 )); then
StartHour=$((StartHour-24))
fi
/usr/libexec/PlistBuddy -c "add :StartCalendarInterval dict" -c "add :StartCalendarInterval:Hour integer $StartHour" -c "add :StartCalendarInterval:Minute integer $StartMinute" "$LaunchAgentPath1"
/bin/chmod 644 "$LaunchAgentPath1"
/bin/launchctl load "$LaunchAgentPath1"
exit 0
fi
| true
|
8faa7b50adfd8d862f9eea5785b5da40b67f319a
|
Shell
|
lijaesela/lush
|
/lc
|
UTF-8
| 7,942
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# lush shell compiler -- add local variables to POSIX shell!
#
# KNOWN LIMITATIONS/TODOS:
# - invocations of local variables cannot be nested, e.g. '@{var%@{chop}}'.
# \-> The function has been made, now I must figure out the recursion.
# \-> FUCK imagine making recursion in a language that doesn't have locals oh my fucking god.
# - functions cannot be cleanly declared inside of other functions due to the linear, stack-like storage of the scope.
# \-> I am planning so simply remove this problem by adding local functions.
# - no error is reported when using 'let' without a proper initialization, e.g. 'let var'.
# \-> There should be a simple regex that defines a POSIX variable initialization that I could check against.
# - only the (granted, more essential) "verbose" usage of variables is supported (@{var}), not the bracketless usage (@var).
# - the "compile_line" function could do with a lot less global variables and some general refactoring.
#
set -e
LOCAL_IMPL="true"
trap set INT
lush_basename () {
# thanks dylan :)
_lush_basename_dir=${1%${1##*[!/]}}
_lush_basename_dir=${_lush_basename_dir##*/}
printf '%s\n' "${_lush_basename_dir:-/}"
unset _lush_basename_dir
}
error () {
printf "\033[31merror:\033[m %s\n\033[32min line %s:\033[m %s\n" \
"$1" "$2" "$3" >&2
exit 1
}
tab () {
_tab_tabchar=" "
_tab_i=0
while [ $_tab_i -lt $1 ]; do
printf '%s' "$_tab_tabchar"
_tab_i=$((_tab_i+1))
done
unset _tab_tabchar _tab_i
}
is_in () {
# $1: item, $2: list, $3: delim
_is_in_oldifs="$IFS"
IFS="$3"
for item in $2; do
[ "$1" = "$item" ] && return 0
done
return 1
IFS="$_is_in_oldifs"
unset _is_in_oldifs
}
trim_str () {
# thanks dylan :)
_trim_str_trim=${1#${1%%[![:space:]]*}}
_trim_str_trim=${_trim_str_trim%${_trim_str_trim##*[![:space:]]}}
printf '%s\n' "$_trim_str_trim"
unset _trim_str_trim
}
if [ "$LOCAL_IMPL" ]; then
mangle_vars () {
# $1: src, $2: mangle/context
local out=""
local tmp="$1"
local before
local middle
local after
local inside
while # do-while loop
case "$tmp" in *"@{"*"}"*) true;; *) false;; esac
do
before="${tmp%%@\{*\}*}"
after="${tmp#*@\{*\}}"
middle="${tmp#${before}}"
middle="${middle%${after}}"
tmp="$after"
inside="${middle#@\{}"
inside="${inside%\}}"
# TODO: making this entire goddamn thing local hasn't fixed the recursion not working
#inside="$(mangle_vars "$inside" "$2")"
out="${out}${before}\${${2}_${inside}}"
tmp="$after"
done
if [ "$before" ]; then
printf '%s%s\n' "$out" "$after"
else
# just print the original source if no match was found
printf '%s\n' "$1"
fi
}
else
#mangle_vars () {
# # $1: src, $2: mangle/context
# # OG implementation that does not support nesting of parameter expansions
# _mangle_vars_out=""
# _mangle_vars_tmp="$1"
# while # do-while loop
# case "$_mangle_vars_tmp" in *"@{"*"}"*) true;; *) false;; esac
# do
# _mangle_vars_before="${_mangle_vars_tmp%%@\{*\}*}"
# _mangle_vars_after="${_mangle_vars_tmp#*@\{*\}}"
# _mangle_vars_middle="${_mangle_vars_tmp#${_mangle_vars_before}}"
# _mangle_vars_middle="${_mangle_vars_middle%${_mangle_vars_after}}"
# _mangle_vars_tmp="$_mangle_vars_after"
# _mangle_vars_inside="${_mangle_vars_middle#@\{}"
# _mangle_vars_inside="${_mangle_vars_inside%\}}"
# _mangle_vars_out="${_mangle_vars_out}${_mangle_vars_before}\${${2}_${_mangle_vars_inside}}"
# _mangle_vars_tmp="$_mangle_vars_after"
# done
# if [ "$_mangle_vars_before" ]; then
# printf '%s%s\n' "$_mangle_vars_out" "$_mangle_vars_after"
# else
# # just print the original source if no match was found
# printf '%s\n' "$1"
# fi
# unset _mangle_vars_out _mangle_vars_tmp _mangle_vars_before \
# _mangle_vars_after _mangle_vars_middle _mangle_vars_inside
#}
mangle_vars () {
# $1: src, $2: mangle/context
_mv_out=""
_mv_tmp="$1"
# URGENT TODO: do this complicated iterative solution
while
case "$_mv_tmp" in *"@{"*"}"*) true;; *) false;; esac
do
done
}
fi
compile_line () {
trim="$(trim_str "$1")"
case "$trim" in
[A-Za-z]*"("*")"*"{") # function declaration
tab $nest; printf '%s\n' "$trim"
scope_mangle="${scope_mangle}_${trim%% *}"
nest=$((nest+1))
# cute little idea for named parameters in functions
# might add it in later
#params="${line#*(}"
#params="${params%)*}"
# compute name length by counting underscores
_cl_fname="${trim%% *}"
fnamelen=1 # deliberately starts at 1
while [ "$_cl_fname" ]; do
_cl_tail="${_cl_fname#?}"
_cl_head="${_cl_fname%${_cl_tail}}"
[ "$_cl_head" = "_" ] && fnamelen=$((fnamelen+1))
_cl_fname="${_cl_tail}"
done
;;
"{") # simple scope
tab $nest; printf '%s\n' "$trim"
scope_mangle="${scope_mangle}_b${bscopes}"
bscopes=$((bscopes+1))
nest=$((nest+1))
fnamelen=1
;;
"let "*) # local variable
if [ $nest = 0 ]; then
error "use of 'let' in global scope" \
"$lineno" "$trim"
fi
def="${trim#'let '}"
tab $nest; printf '%s_%s\n' "$scope_mangle" "$(mangle_vars "$def" "$scope_mangle")"
if eval "is_in \"\${def%=*}\" \"\$locals${nest}\" \" \""; then
true
else
eval "locals${nest}=\"\${locals${nest}} \${def%=*}\""
eval "mangles${nest}=\"\${mangles${nest}} \${scope_mangle}_\${def%%=*}\""
fi
;;
"}") # function/scope end
if [ $nest -le 0 ]; then
error "mismatched '}', nest level has gone negative" \
"$lineno" "$trim"
fi
if eval "[ \"\$mangles${nest}\" ]"; then
tab $nest; eval "printf '%s\\n' \"unset\${mangles${nest}}\""
eval "unset mangles${nest}"
fi
nest=$((nest-1))
_cpl_i=0
while [ $_cpl_i -lt $fnamelen ]; do
scope_mangle="${scope_mangle%_*}"
_cpl_i=$((_cpl_i+1))
done
tab $nest; printf '%s\n' "$trim"
;;
*)
tab $nest; printf '%s\n' "$(mangle_vars "$trim" "$scope_mangle")"
;;
esac
lineno=$((lineno+1))
}
lineno=1
nest=0
# what this variable starts as serves as the global prefix for all lush variables
scope_mangle="_"
fnamelen=0 # how many underscore-separated words are in the current function name
bscopes=0 # keeps track of which non-function brackets we're in
IFS=
if [ "$(lush_basename "$0")" = "lush" ]; then
# act like an interpreter if called as "lush"
for src in $@; do
while read -r line; do
compile_line "$line"
done < "$src" | sh -s
done
else
# act like a compiler if called as anything else
while read -r line; do
compile_line "$line"
done
fi
# set
: '
# random fucking token parser I am not using
IFS=
while read line; do
while [ "$line" ]; do
token="${line%% *}"
echo "token: ${token}"
line="${line#$token}"
line=${line#${line%%[![:space:]]*}}
done
done
'
| true
|
8dd2c0e11f6a3d939ee9c4248b488fee16ff29e8
|
Shell
|
devinlimit/Clarusway_aws_devops_workshop
|
/aws/class-notes/for_loop.sh
|
UTF-8
| 79
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
for (( num = 1; num <= 20; num++ ))
do
echo "Number: $num"
done
| true
|
2ea17d171cb357d31bc09447250cd57e5226ea3b
|
Shell
|
idf/haystack
|
/alias.sh
|
UTF-8
| 1,669
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
WEBFRONT_CONTAINER=h_webfront
CACHE_CONTAINER=h_cache
STORAGE_CONTAINER1=h_storage1
STORAGE_CONTAINER2=h_storage2
STORAGE_IP1=172.20.0.6
STORAGE_IP2=172.20.0.7
STORAGE_SERVER_PORT1=8081
STORAGE_SERVER_PORT2=8082
STORAGE_IMAGE=hyoung/haystack_storage
STORAGE_INTERNAL_PORT=8080
LOCAL_STORAGE_DIR=$(pwd)'/storage'
NETWORK=haystack_network
WEBFRONT_IP=172.20.0.3
WEBFRONT_IMAGE=hyoung/haystack_webfront
LOCAL_WEBFRONT_DIR=$(pwd)'/webfront'
function update_webfront() {
# assume alredy running
docker stop $WEBFRONT_CONTAINER
docker cp webfront/server.js $WEBFRONT_CONTAINER:/root/app/
docker start $WEBFRONT_CONTAINER
}
function update_cache() {
docker stop $CACHE_CONTAINER
docker cp cache/server.js $CACHE_CONTAINER:/root/app
docker start $CACHE_CONTAINER
}
function restart_webfront() {
# after changing docker file
docker stop $WEBFRONT_CONTAINER
docker rm $WEBFRONT_CONTAINER
docker build -t $WEBFRONT_IMAGE $LOCAL_WEBFRONT_DIR
docker run -itd \
--name $WEBFRONT_CONTAINER \
--network $NETWORK \
--ip $WEBFRONT_IP \
$WEBFRONT_IMAGE
}
function restart_storage() {
docker stop $STORAGE_CONTAINER1 $STORAGE_CONTAINER2
docker rm $STORAGE_CONTAINER1 $STORAGE_CONTAINER2
docker build -t $STORAGE_IMAGE $LOCAL_STORAGE_DIR
docker run -itd \
--name $STORAGE_CONTAINER1 \
--network $NETWORK \
--ip $STORAGE_IP1 \
-p $STORAGE_SERVER_PORT1:$STORAGE_INTERNAL_PORT \
$STORAGE_IMAGE
docker run -itd \
--name $STORAGE_CONTAINER2 \
--network $NETWORK \
--ip $STORAGE_IP2 \
-p $STORAGE_SERVER_PORT2:$STORAGE_INTERNAL_PORT \
$STORAGE_IMAGE
}
# docker logs h_webfront # check container log
| true
|
fc3e2d234b6e00d8703ed2e2d74a920f681a23ab
|
Shell
|
Woona/src
|
/school/big3
|
UTF-8
| 1,950
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
bnum1=0; bnum2=0; bnum3=0; cap=$(/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport /usr/sbin/airport -I | grep "BSSID*" | awk '{print $2}')
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport /usr/sbin/airport -s | grep "BCBOE1" | while read line; do
if [[ $(echo $line | awk '{print $3}' | sed 's/-//g') -gt $bnum1 ]]; then
bnum1=$(echo $line | awk '{print $3}' | sed 's/-//g')
bline1=$line
elif [[ $(echo $line | awk '{print $3}' | sed 's/-//g') -gt $bnum2 ]]; then
bnum2=$(echo $line | awk '{print $3}' | sed 's/-//g')
bline2=$line
elif [[ $(echo $line | awk '{print $3}' | sed 's/-//g') -gt $bnum3 ]]; then
bnum3=$(echo $line | awk '{print $3}' | sed 's/-//g')
bline3=$line
fi
if [[ $(echo $line | awk '{print $2}') = "$cap" ]]; then
echo $(echo $line | awk '{print $3}') > ~/Desktop/.b30
fi
echo $(echo "$bline1" | awk '{print $2 _ $3}') > ~/Desktop/.b31
echo $(echo "$bline2" | awk '{print $2 _ $3}') > ~/Desktop/.b32
echo $(echo "$bline3" | awk '{print $2 _ $3}') > ~/Desktop/.b33
done
if [[ $1 = s ]]; then
echo "$(cat ~/Desktop/.b31 | sed 's/-/ /g' | awk '{print $1}') $(cat ~/Desktop/.b32 | sed 's/-/ /g' | awk '{print $1}') $(cat ~/Desktop/.b33 | sed 's/-/ /g' | awk '{print $1}')"
rm ~/Desktop/.b31; rm ~/Desktop/.b32; rm ~/Desktop/.b33
else
echo "+-----------------------------+"
echo "| 3 Closest APs RSI |"
echo "| $(cat ~/Desktop/.b31 | sed 's/-/ -/g') |"; rm ~/Desktop/.b31
echo "| $(cat ~/Desktop/.b32 | sed 's/-/ -/g') |"; rm ~/Desktop/.b32
echo "| $(cat ~/Desktop/.b33 | sed 's/-/ -/g') |"; rm ~/Desktop/.b33
echo "+-----------------------------+"
echo "| Current AP RSI |"
echo "| $cap $(cat ~/Desktop/.b30) |"; rm ~/Desktop/.b30
echo "+-----------------------------+"
echo "| Time Taken = $SECONDS sec |"
echo "+-----------------------------+"
fi
| true
|
25a0a3494f3d388097d1858047998df675519620
|
Shell
|
luis10814/Unix-Shell-scripting-and-C-projects
|
/assignment1/t/leaves.t
|
UTF-8
| 1,272
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
. ./t/harness.sh
use_files pc-*
use_files *.mf
export_set()
{
set +x
for name in "$@"; do
PKGMF="$name.mf" './pc-export-packages' "$name"
done
set -x
}
##test base set has no leaves
print_no_leaves()
{
export_set base
test "$(./pc-leaves base |wc -l)" -eq 0
}
##test vim set has one leaf
print_single_leaf()
{
export_set vim
test "$(./pc-leaves vim)" = vim-minimal
}
##test www set has many leaves
big_set_leaves()
{
use_files t/expected.www.leaves
export_set www
./pc-leaves www |sort -u >www.leaves
test -f www.leaves
test "$(wc -l <www.leaves)" -gt 5
grep -v '^glibc$' www.leaves
grep -v '^bash$' www.leaves
grep '^mercurial' www.leaves
join -v1 expected.www.leaves www.leaves >missing.leaves
join -v2 expected.www.leaves www.leaves >extra.leaves
result=true
set +x
if [ -s extra.leaves ]; then
result=false
while read leaf; do
echo "produced unexpected leaf $leaf" >&2
done <extra.leaves
fi
if [ -s missing.leaves ]; then
result=false
while read leaf; do
echo "did not produce expected leaf $leaf" >&2
done <missing.leaves
fi
set -x
$result
}
run_tests
# vim:ft=sh:
| true
|
2a5f0079233bbcb094c5d0d29202d623bdc41e71
|
Shell
|
rleschuk/ebot
|
/deploy/ebot.sh
|
UTF-8
| 1,232
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
ROOT=/opt
PROG_NAME=ebot
PROG_EXEC=ebot.py
PROG_PATH=$ROOT/$PROG_NAME
PROG_EXEC="$PROG_PATH/venv/bin/python $PROG_PATH/$PROG_EXEC"
PROG_PID=$PROG_PATH/$PROG_NAME.pid
PROG_LOG=$PROG_PATH/$PROG_NAME.log
PROG_ARGS="run --config production"
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/oracle/12.1/client64/lib
case $1 in
start)
echo "Starting $PROG_NAME ..."
if [ ! -f $PROG_PID ]; then
nohup $PROG_EXEC $PROG_ARGS >>$PROG_LOG 2>&1&
echo $! > $PROG_PID
echo "$PROG_NAME started ..."
else
echo "$PROG_NAME is already running ..."
fi
;;
stop)
if [ -f $PROG_PID ]; then
PID=$(cat $PROG_PID);
echo "$PROG_NAME stoping ..."
kill $PID;
echo "$PROG_NAME stopped ..."
rm $PROG_PID
else
echo "$PROG_NAME is not running ..."
fi
;;
restart)
if [ -f $PROG_PID ]; then
PID=$(cat $PROG_PID);
echo "$PROG_NAME stopping ...";
kill $PID;
echo "$PROG_NAME stopped ...";
rm $PROG_PID
echo "$PROG_NAME starting ..."
nohup $PROG_EXEC $PROG_ARGS >>$PROG_LOG 2>&1&
echo $! > $PROG_PID
echo "$PROG_NAME started ..."
else
echo "$PROG_NAME is not running ..."
fi
;; esac
| true
|
42335a374b7a7712c6b27887103b93bfc9107e33
|
Shell
|
alexey-larionov/rms_aws_2021
|
/s03_annotate/s05_update_chromosomes_in_header.sh
|
UTF-8
| 2,103
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# s05_update_chromosomes_in_header.sh
# Alexey Larionov 02Apr2021
# Use:
# ./s05_update_chromosomes_in_header.sh &> s05_update_chromosomes_in_header.log
# Notes:
# This step is optional: just to make some plots look nicier in VEP-html report.
# Of course, its only possible after we filtered al variants outside of
# standard chromosomes during the previous step.
# Stop at runtime errors
set -e
# Start message
echo $0
date
echo ""
# Files and folders
base_folder="/home/share"
base_name="zhang_hg38.bwa.QC.MA-flag.MA-split.ID.std-Chr"
scripts_folder="${base_folder}/scripts/s03_annotate"
cd "${scripts_folder}"
data_folder="${base_folder}/data/s03_annotate"
source_vcf="${data_folder}/${base_name}.vcf.gz"
output_vcf="${data_folder}/${base_name}.Reheaded.vcf.gz"
header_old="${data_folder}/header.old"
header_new="${data_folder}/header.new"
# Progress report
bcftools --version
echo ""
echo "source_vcf: ${source_vcf}"
echo "output_vcf: ${output_vcf}"
echo ""
# Extract old header
bcftools view -h "${source_vcf}" > "${header_old}"
# Modify old header: exclude the alt etc
cat "${header_old}" | \
grep -v ^##contig=\<ID=HLA | \
grep -v ^##contig=\<ID=chrUn | \
grep -v ^##contig=\<ID=chrEBV | \
grep -v ^##contig=\<ID=chr.*_alt | \
grep -v ^##contig=\<ID=chr.*_random \
> "${header_new}"
# Reheader VCF using bcftools
echo "Reheading ..."
bcftools reheader "${source_vcf}" \
--header "${header_new}" \
--output "${output_vcf}" \
--threads 4
echo ""
# Note that bcftools reheader does not accept '--output-type' option
# Index output vcf
echo "Indexing ..."
bcftools index "${output_vcf}"
echo ""
# Explore result
echo "Number of contigs in source vcf header:"
bcftools view -h "${source_vcf}" | grep ^##contig | wc -l
echo ""
echo "Number of contigs in output vcf header:"
bcftools view -h "${output_vcf}" | grep ^##contig | wc -l
echo ""
echo "Contigs in source vcf:"
bcftools view -h "${source_vcf}" | grep ^##contig | head -n 30
echo "..."
echo ""
echo "Contigs in output vcf:"
bcftools view -h "${output_vcf}" | grep ^##contig
echo ""
# Completion message
echo "Done"
date
| true
|
3dcadf69ac2160951676ba3ecb768ad154e9bcc0
|
Shell
|
kpmmmurphy/FinalYearProject
|
/wifi_direct/setup_ap.sh
|
UTF-8
| 807
| 2.6875
| 3
|
[] |
no_license
|
#Author: Kevin Murphy
#Date: 6 - Jan - 15
#Sets up raspberry pi as an access point
sudo apt-get install hostapd udhcpd
sudo cp ./ap_config_files/udhcpd.conf /etc/udhcpd.conf
sudo cp ./ap_config_files/udhcpd /etc/default/udhcpd
sudo cp ./ap_config_files/interfaces /etc/network/interfaces
sudo cp ./ap_config_files/hostapd.conf /etc/hostapd/hostapd.conf
sudo cp ./ap_config_files/hostapd /etc/default/hostapd
sudo echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
#Setting up IP Tables
sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
sudo iptables -A FORWARD -i eth0 -o wlan0 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -i wlan0 -o eth0 -j ACCEPT
sudo sh -c "iptables-save > /etc/iptables.ipv4.nat"
sudo update-rc.d hostapd enable
sudo update-rc.d udhcpd enable
| true
|
b4f2258d936424e1004e50c7cddbc58c26be4433
|
Shell
|
drozdowsky/rc-files
|
/.local/bin/vol
|
UTF-8
| 3,982
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# https://github.com/mitchweaver/bin
# https://github.com/drozdowsky
#
# Simple volume control
#
usage() {
echo "vol: change volume"
echo "Usage: vol [-+=value]"
echo " vol -mute"
echo " vol -unmute"
echo " vol -ismute"
exit 0
}
unmute() {
case $(uname) in
Linux) amixer sset Master unmute ;;
OpenBSD) mixerctl -q outputs.master.mute=off;;
Darwin) osascript -e 'set volume output muted false';;
esac
}
mute() {
case $(uname) in
Linux) amixer sset Master toggle ;;
OpenBSD) mixerctl -q outputs.master.mute=toggle;;
FreeBSD) mixer vol 0;;
Darwin) osascript -e 'set volume output muted true';;
esac
}
setv() {
unmute
case $(uname) in
Linux)
if [ -x "$(which pulseaudio)" ]; then
pactl set-sink-volume 0 "$1"%
else
amixer sset Master "$1"
fi
;;
OpenBSD) mixerctl -q outputs.master="$1";;
FreeBSD) mixer vol "$1";;
Darwin) osascript -e "set volume output volume $1";;
esac
}
incv() {
unmute
case $(uname) in
Linux)
if [ -x "$(which pulseaudio)" ]; then
pactl set-sink-volume 0 +"$1"%
else
amixer sset Master "$1"+
fi
;;
OpenBSD) mixerctl -q outputs.master="+$1";;
FreeBSD) mixer vol "+$1";;
Darwin) osascript -e "set volume output volume $1+(output volume of (get volume settings))";;
esac
}
decv() {
unmute
case $(uname) in
Linux)
if type pulseaudio > /dev/null 2>&1 ; then
pactl set-sink-volume 0 -"$1"%
else
amixer sset Master "$1"-
fi
;;
OpenBSD) mixerctl -q outputs.master="-$1";;
FreeBSD) mixer vol "-$1";;
Darwin) osascript -e "set volume output volume (output volume of (get volume settings))-$1";;
esac
}
ismute() {
case $(uname) in
Linux)
if $(amixer -q sget Master | grep -o '\[off\]' > /dev/null) ; then
echo "muted"
else
echo "unmuted"
fi ;;
OpenBSD)
if [ $(mixerctl -n outputs.master.mute) = on ] ; then
echo "muted"
else
echo "unmuted"
fi ;;
FreeBSD)
echo "unmuted"
;;
Darwin)
if [ "$(osascript -e 'output muted of (get volume settings)')" = "true" ]; then
echo "muted"
else
echo "unmuted"
fi
;;
esac
}
get_vol() {
if [ "$(ismute)" == "muted" ]; then
echo "muted"
exit 0
fi
case $(uname) in
Linux)
if [ -x "$(which pulseaudio)" ]; then
vol=`pactl list sinks | grep '^[[:space:]]Volume:' | \
head -n $(( $SINK + 1 )) | tail -n 1 | sed -e 's,.* \([0-9][0-9]*\)%.*,\1,'`
else
vol="$(amixer sget Master | \
grep -oE '\[*..?.?%\]' | \
sed -e 's/\[//' -e 's/\%\]//')"
fi
;;
OpenBSD)
vol="$(mixerctl -n outputs.master)"
vol=${vol%,*}
# convert 0-255 to 0-100%
vol=$(echo ${vol} \* 0.4 | bc)
# convert back to int
vol=${vol%.*}
;;
FreeBSD)
vol="$(mixer vol)"
vol="$(echo ${vol} | grep -Eo '[0-9]+$')"
;;
Darwin)
vol=$(osascript -e 'set ovol to output volume of (get volume settings)')
;;
esac
# clamp
[ $vol -lt 0 ] && vol=0 ||
[ $vol -gt 100 ] && vol=100
echo "${vol}%"
}
case "$1" in
-mute) mute ;;
-unmute) unmute ;;
-ismute) ismute && exit 0 ;;
--help|-h) usage ;;
-[0-9]*) decv "${1##-}" > /dev/null ;;
+[0-9]*) incv "${1##+}" > /dev/null ;;
=*|[0-9]*) setv "${1##=}" ;;
esac
get_vol
| true
|
fc1c7850eb3cddcad81d9c629853290a637f46f9
|
Shell
|
chenquzhao/20T2_COMP9044
|
/ass2/demo04.sh
|
UTF-8
| 309
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/dash
# Designed for Subset 4
lan="Java"
case $lan in
"C")
echo "C is the best language"
;;
"Java")
echo "Java is the best language"
;;
*)
echo "Python is the best language"
;;
esac
i=0
while true
do
echo $i >>'tmp.txt'
if [ $i -eq 5 ]
then
exit 0
fi
i=`expr $i + 1`
done
| true
|
1d07a1ae6b4a3b5cafb3ace7cfc6f71217d016cc
|
Shell
|
TMRolle/anthos-bare-metal-ref-arch
|
/scripts/gcp/instance_startup_script.sh
|
UTF-8
| 1,162
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Install packages
apt update
apt install -y jq
# Disable Uncomplicated Firewall (ufw)
ufw disable
# Disable AppArmor
systemctl stop apparmor.service
systemctl disable apparmor.service
# Add anthos group and user
addgroup \
--gid 2000 \
anthos
adduser \
--disabled-password \
--gecos "Anthos user" \
--gid 2000 \
--uid 2000 \
anthos
# Configure anthos user for passwordless sudo
echo -e "anthos\tALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/anthos
# Create the anthos user .ssh directory
mkdir --mode=700 -p ~anthos/.ssh
chown anthos:anthos ~anthos/.ssh
exit 0
| true
|
608810ffc67e883a404b34932f7708f081a61111
|
Shell
|
Tubbz-alt/WRF-ROMS-Coupled
|
/jobs/fcstrun.sh
|
UTF-8
| 1,617
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
ulimit -s unlimited
ulimit -c unlimited
. /usr/share/Modules/init/bash
#module purge
export I_MPI_OFI_LIBRARY_INTERNAL=1
#export WRFIO_NCD_LARGE_FILE_SUPPORT=1
#export GFORTRAN_CONVERT_UNIT='native;big_endian:10'
export GFORTRAN_CONVERT_UNIT='big_endian'
# module load wrfroms
module load gcc/6.5.0
module load mpi/intel/2020.0.166
module load netcdf/4.5
module load hdf5/1.10.5
module load libpng/1.5.30
module load esmf/8.0.0
ROTDIR=`dirname ${PWD}`
CDATE=${CDATE:-20110827}
HH=${HH:-'06'}
COMOUT=/com/wrfroms/$CDATE$HH
PTMP=/ptmp/wrfroms/$CDATE$HH
mkdir -p $COMOUT
if [ -e $PTMP ]; then rm -Rf $PTMP; fi
mkdir -p $PTMP
# Copy forcing data
#FRCDIR=$ROTDIR/forcing/$CDATE
#cp -p $FRCDIR/* $PTMP
# Copy the links needed by WRF, these were created during the build
cp -Pp $ROTDIR/SORC/data/* $PTMP
# Copy the inputs to PTMP
cd $COMOUT
cp -p * $PTMP
#cp -p coupling_esmf_atm_sbl.in $PTMP # Coupler/Mediator
#cp -p roms_doppio_coupling.in $PTMP # ROMS
#cp -p namelist.input $PTMP # WRF
export I_MPI_DEBUG=1
curdir=$PWD
export NODES=${NODES:-1}
export NPROCS=${NPROCS:-24} # Number of processors
export PPN=${PPN:-$((NPROCS/NODES))}
export HOSTFILE=${HOSTFILE:-$PWD/hosts}
export MPIOPTS=${MPIOPTS:-"-np $NPROCS -ppn $PPN"}
echo "NPROCS is $NPROCS"
export oceanin=coupling_esmf_atm_sbl.in
#export exec=romsG_atmsbl
export exec=romsM_atmsbl
# Copy the executable
cp -p $ROTDIR/exec/$exec $PTMP
cd $PTMP
mpirun $MPIOPTS $PTMP/$exec $oceanin
if [ $? -ne 0 ]; then
echo "ERROR returned from mpirun"
else
mv $PTMP/* $COMOUT
cd $COMOUT
rm -Rf $PTMP
fi
| true
|
6596f39f8d0737f7c847cf65af0dc196c83ff3a9
|
Shell
|
fabsta/TreeFam
|
/tools/get_homologs/start_array_jobs.sh
|
UTF-8
| 668
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
NoOfArrayJobs=11
echo About to start $NoOfArrayJobs
NoOfJobsPerArray=500
echo Each job with have $NoOfJobsPerArray
#TotalNumberOfJobs=$(cat first_50_families.txt | wc -l)
TotalNumberOfJobs=5050
echo There are $TotalNumberOfJobs jobs in total
for (( Counter=1; Counter<=TotalNumberOfJobs; Counter=Counter+NoOfJobsPerArray ))
do
let lastJob=$(( $Counter + $NoOfJobsPerArray - 1 ))
echo last Job is $lastJob
bsub -o %I.out -e %I.err -J "myArray[$Counter-$lastJob]" /nfs/users/nfs_f/fs9/bin/perl_modules/ensembl_main/treefam_tools/get_homologs/get_homologs_pairwise.pl -id input.\$LSB_JOBINDEX
echo bsub -J "myArray[$Counter-$lastJob]" myJob
done;
| true
|
2d898450090e7e3ff074f736fe45eaf3f13111c8
|
Shell
|
stvngrcia/holberton-system_engineering-devops-2018
|
/0x05-processes_and_signals/100-process_and_pid_file
|
UTF-8
| 418
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# creates a file, displays a string indefinitely, handles SIGTERM, SIGINT, SIGQUIT
touch /var/run/holbertonscript.pid
echo $$ > /var/run/holbertonscript.pid
trap "echo 'I hate the kill command'; rm /var/run/holbertonscript.pid; exit" SIGTERM
trap "echo 'Y U no love me?!'" SIGINT
trap "rm /var/run/holbertonscript.pid; exit" SIGQUIT
while true
do
echo "To infinity and beyond"
sleep 2
done
| true
|
bf9fd332def2ed54b425b7d2517a10d3e45c2f83
|
Shell
|
LighteningIce/partition-trees
|
/example/loop_run_small.sh
|
UTF-8
| 1,864
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
function ergodic(){
#dir_path="../result_noconstraint/$2";
# test_lessprocessor
org_path="../result_outMdeg/result_p$3"
mkdir $org_path
dir_path="../result_outMdeg/result_p$3/$2";
mkdir $dir_path;
#,2.00,3.00,8.00
for i in {0.01,0.10,1.00,5.00,10.00}
do
#i=1.00
echo $i;
for file in ` ls $1`
do
echo $1"/"$file
./call-heuristics -s -l -q -c $i -n $3 -d $1"/"$file >>$dir_path/$2_c$i.txt;
./call-heuristics -i -l -q -c $i -n $3 -d $1"/"$file >>$dir_path/$2_c$i.txt;
./call-heuristics -a -l -q -c $i -n $3 -d $1"/"$file >>$dir_path/$2_c$i.txt;
./select-heuristics -l -q -c $i -n $3 -d $1"/"$file >>$dir_path/$2_c$i.txt;
./call-heuristics-main -l -q -c $i -n $3 -m 0 -d $1"/"$file >>$dir_path/$2_c$i.txt
#./call-heuristics -s -l -q -c $i -n $3 -d $1"/"$file >>$dir_path/$2_c$i.txt;
#./select-heuristics -s -l -q -c $i -n $3 -d $1"/"$file >>$dir_path/$2_c$i.txt;
#./call-heuristics-main -s -l -q -c $i -n $3 -m 2 -d $1"/"$file >>$dir_path/$2_c$i.txt
# ./call-heuristics -s -l -q -c $i -n 50 -d $1"/"$file ;
# ./select-heuristics -s -l -q -c $i -n 50 -d $1"/"$file ;
# ./call-heuristics-main -s -l -q -c $i -n 50 -d $1"/"$file
done;
done
}
#
for p in {1,10,20,30,40,50}
do
# p=50
# "20","40","80","256","15_100","101_500","501_1000","1001_2000",
for x in {"2001_10000","10001_20000","20001_60000"}
do
#x=20001_60000
INIT_PATH="../randTrees/randTrees_"$x
RES_PATH="test_"$x
NPR=$p
ergodic $INIT_PATH $RES_PATH $NPR
# # echo $RES_PATH
# # echo $INIT_PATH
done
done
#INIT_PATH="../randTrees/randTrees_15_100"
#RES_PATH="testtest_15_100"
#NPR=1
#ergodic $INIT_PATH $RES_PATH $NPR
| true
|
997c0848f5d88e4014947d127640c3dfd083a236
|
Shell
|
osy/HaC-Mini
|
/Installer/scripts/install_kexts.sh
|
UTF-8
| 1,998
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
CONFIG="$1"
KEXTDIR="$2"
PLIST_BUDDY="/usr/libexec/PlistBuddy"
$PLIST_BUDDY -c "Delete :Kernel:Add" "$CONFIG" || true
$PLIST_BUDDY -c "Add :Kernel:Add array" "$CONFIG"
i=0
find "$KEXTDIR" -name '*.kext' -type d | \
while read line
do
_pf="$line.Priority.txt"
_priority=9999
if [ -f "$_pf" ]; then
_priority=`cat "$_pf"`
rm "$_pf"
fi
echo "$_priority $line"
done | sort -n | \
while read line
do
priority=`echo $line | awk '{ print $1 }'`
file=`echo $line | awk '{ $1=""; print substr($0,2) }'`
echo "Found $file (priority $priority)"
kext="${file/$KEXTDIR\//}"
$PLIST_BUDDY -c "Add :Kernel:Add:$i dict" "$CONFIG"
echo "BundlePath: $kext"
$PLIST_BUDDY -c "Add :Kernel:Add:$i:BundlePath string $kext" "$CONFIG"
info=`find "$file" -name 'Info.plist' -type f -maxdepth 2 | head -1`
info="${info/$file\//}"
echo "PlistPath: $info"
$PLIST_BUDDY -c "Add :Kernel:Add:$i:PlistPath string $info" "$CONFIG"
base=`basename $kext`
# Find MaxKernel config
maxKernel=`cat "$KEXTDIR/$base.MaxKernel.txt" 2> /dev/null || true`
if [ ! -z "$maxKernel" ]; then
echo "MaxKernel: $maxKernel"
$PLIST_BUDDY -c "Add :Kernel:Add:$i:MaxKernel string $maxKernel" "$CONFIG"
rm "$KEXTDIR/$base.MaxKernel.txt" # no longer needed
fi
# Find MinKernel config
minKernel=`cat "$KEXTDIR/$base.MinKernel.txt" 2> /dev/null || true`
if [ ! -z "$minKernel" ]; then
echo "MinKernel: $minKernel"
$PLIST_BUDDY -c "Add :Kernel:Add:$i:MinKernel string $minKernel" "$CONFIG"
rm "$KEXTDIR/$base.MinKernel.txt" # no longer needed
fi
exe=`find "$file" -path "*/Contents/MacOS/*" -type f -maxdepth 3 | head -1`
exe="${exe/$file\//}"
echo "ExecutablePath: $exe"
$PLIST_BUDDY -c "Add :Kernel:Add:$i:ExecutablePath string $exe" "$CONFIG"
$PLIST_BUDDY -c "Add :Kernel:Add:$i:Enabled bool true" "$CONFIG"
i=`expr $i + 1`
done
| true
|
676b9176cfe718829df07ed431f02c20fac35d79
|
Shell
|
robindanzinger/bash_playground
|
/lib/assert.sh
|
UTF-8
| 1,130
| 3.796875
| 4
|
[] |
no_license
|
# equal
function assert_equal {
expected=$1
actual=$2
if [ "$expected" != "$actual" ]
then
throw_error "$expected is not equal to $actual"
fi
}
# $1 folder $2 mode [not]
function assert_folder_exists {
if [ $2 ] && [[ "$2" == "not" ]]
then
assert "! -d $1" "folder $1 exists"
else
assert "-d $1" "folder $1 doesn't exist"
fi
}
function assert_file_exists {
assert "-e $1" "file $1 doesn't exist"
}
function assert {
if test $1
then
:
else
throw_error "$2"
fi
}
function assert_cmd {
if (eval $1)
then
:
else
throw_error $2
fi
}
function assert_cmd_not {
if (eval $1)
then
throw_error $2
fi
}
function throw_error {
echo "assertion failed"
echo "$1"
exit 42
}
# $1 pattern $2 mode [not]
function assert_log_contains {
cmd="grep $1 $cha_logfile"
if [ $2 ] && [[ "$2" == "not" ]]
then
assert_cmd_not "$cmd" "log contains $1, but wasn't expected"
else
assert_cmd "$cmd" "log doesn't contain $1, but was expected"
fi
}
function fails {
echo "fails cmd" $1
if ($1)
then
throw_error "should fail, but passed"
fi
}
| true
|
28154c18ab2c3438bb8eae1c59b59ececb96e744
|
Shell
|
kwegner7/publish
|
/tools/tools-ubu/tools/bsh/backup-ubu.bsh
|
UTF-8
| 925
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash -norc
##############################################################################
# copy ubuntu files to the windows side in order to use SyncToy
##############################################################################
declare -r SOURCE_FOLDER="/home/kurt/ubu"
declare -r DESTINATION_FOLDER="/media/File System - Windows/Users/Kurt/Documents"
echo ''
IFS=$'\n'
mkdir --parents "$DESTINATION_FOLDER"
cp --recursive --update --preserve=timestamps --backup=numbered --interactive \
${SOURCE_FOLDER} ${DESTINATION_FOLDER}
declare -r SIZE=$(du -h --summarize $DESTINATION_FOLDER/ubu | cut -f1)
echo -en "\nFiles have been copied to $DESTINATION_FOLDER/ubu "
echo -e "size is $SIZE\n"
rm "/media/File System - Windows/Users/Kurt/ToBePrinted/final/*.pdf"
cp --dereference /home/kurt/ubu/pdf/fiance-visa/final/*.pdf "/media/File System - Windows/Users/Kurt/ToBePrinted/final"
IFS=$'\x20\t\n'
exit
| true
|
610e40e7c8d807d38238753f632ba6dca4410751
|
Shell
|
MountainField/bash-timeout
|
/tests/test_bash-timeout.bats
|
UTF-8
| 2,218
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
# =================================================================
# bash-timeout
#
# Copyright (c) 2018 Takahide Nogayama
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# =================================================================
@test "${CONTEXT} / Status / timeout 2 sleep 1 #=> success" {
run bash -c "\
${TIMEOUT} 2 sleep 1 \
2> ${BATS_TMPDIR}/stderr"
(( ${status} == 0 ))
}
@test "${CONTEXT} / Status / timeout 1 sleep 2 #=> error" {
run bash -c "\
${TIMEOUT} 1 sleep 2 \
2> ${BATS_TMPDIR}/stderr"
[[ "${status}" > 0 ]]
}
@test "${CONTEXT} / Status / timeout 1 ls /FOOBAAR #=> error and status was retained" {
run bash -c "ls /FOOBAAR &> /dev/null"
local expected_status=$status
[[ "${expected_status}" > 0 ]]
run bash -c "\
${TIMEOUT} 1 ls /FOOBAAR \
2> ${BATS_TMPDIR}/stderr"
[[ "${status}" == "${expected_status}" ]]
}
@test "${CONTEXT} / Output / timeout 1 echo abc #=> abc" {
run bash -c "\
${TIMEOUT} 1 echo abc \
1> ${BATS_TMPDIR}/output \
2> ${BATS_TMPDIR}/stderr"
(( ${status} == 0 ))
echo abc > ${BATS_TMPDIR}/expected
diff ${BATS_TMPDIR}/output ${BATS_TMPDIR}/expected
}
@test "${CONTEXT} / Input pipe / echo abc | timeout 1 cat #=> abc" {
run bash -c "\
echo abc | ${TIMEOUT} 1 cat \
1> ${BATS_TMPDIR}/output \
2> ${BATS_TMPDIR}/stderr"
(( ${status} == 0 ))
echo abc > ${BATS_TMPDIR}/expected
diff ${BATS_TMPDIR}/output ${BATS_TMPDIR}/expected
}
@test "${CONTEXT} / Input redirect / < abc.txt timeout 1 cat #=> abc" {
echo abc > ${BATS_TMPDIR}/abc.txt
run bash -c "\
< ${BATS_TMPDIR}/abc.txt ${TIMEOUT} 1 cat \
1> ${BATS_TMPDIR}/output \
2> ${BATS_TMPDIR}/stderr"
(( ${status} == 0 ))
echo abc > ${BATS_TMPDIR}/expected
diff ${BATS_TMPDIR}/output ${BATS_TMPDIR}/expected
}
@test "${CONTEXT} / In script / timeout 1 myfunc #=> success" {
if source ${TIMEOUT} ; then
run bash -c "\
function myfunc0() { echo a; } ; \
function myfunc() { myfunc0; } ; \
source ${TIMEOUT} ; \
timeout 2 myfunc \
2> ${BATS_TMPDIR}/stderr"
(( ${status} == 0 ))
[[ "${output}" == "a" ]]
else
skip
fi
}
| true
|
0d636bf2431ec3d5417c24defe073d90f2aa752e
|
Shell
|
aadee92/BeeManager
|
/README/Provisioning.sh
|
UTF-8
| 1,405
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Provisioning File for Vagrant
# Installs everything needed for the HiveManagement App
#
#Start with everything updated
sudo apt-get update
#Install Python3 with Pip
sudo apt-get -y install python3-pip
#Setup a virtual environment
# - Make Folder
cd /vagrant/
mkdir /vagrant/venv
# - Install Virtual Environment
sudo -H pip3 install virtualenv
# - Configure Virtual Environment to use Python3
sudo -H virtualenv /vagrant/venv -p python3
# Begin using the virtual environment
source /vagrant/venv/bin/activate
#Install django
pip install django
#Psycopg2 is a tool that lets Python talk to PostgreSQL
# - First, install its dependencies
sudo apt-get -y install libpq-dev python-dev
# - Install Psycopg2
pip install psycopg2
#Install REST API
pip install djangorestframework
#Install PostgreSQL
sudo apt-get install -y postgresql postgresql-contrib postgresql-9.3-postgis-scripts
sudo -u postgres psql
ALTER USER postgres PASSWORD 'password';
\q
sudo -u postgres createdb BeeManagement
#Install its GEO-Library
sudo apt-get install -y PostGIS
#Configure this Django superuser
cd /vagrant/
# Install GIT
sudo apt-get -y install git
#Pull the latest project
git clone https://github.com/aadee92/HiveManagement.git
cd ./HiveManagement
python manage.py migrate
python manage.py createsuperuser
#admin
#password
python manage.py runserver 0.0.0.0:8000 --noreload
| true
|
869527080abf10cb4d36645ba1d1e32ddfd93e6d
|
Shell
|
haderman/woki-extension
|
/dev.sh
|
UTF-8
| 856
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# how to use: sh dev.sh popup|newtab|background
# This script is to watch changes in files and creat bundles from it
trap before_exit EXIT
before_exit() {
echo "End"
}
watch_newtab() {
cd src/elm
elm-live src/NewTab.elm -- --debug --output=../newtab/newtab.bundle.js
}
watch_popup() {
cd src/elm
elm-live src/Popup.elm -- --debug --output=../popup/popup.bundle.js
}
watch_background() {
# this works to me to make de "denon" command available
# but I think this shouldn't be here. I have to research more about this
#
# export PATH="/Users/<your account>/.deno/bin:$PATH"
#
export PATH="/Users/hadercardona/.deno/bin:$PATH"
denon start
}
case $1 in
"newtab")
watch_newtab
;;
"popup")
watch_popup
;;
"background")
watch_background
;;
*)
echo "command not found"
;;
esac
| true
|
857c2e243f9ed73075dc036e2fb5d8d8b05e5b80
|
Shell
|
apigee/consumer-data-standards-au
|
/setup/undeployOpenBankingAU.sh
|
UTF-8
| 5,434
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#### Utility functions
# Undeploy and delete an apiproxy or sharedflow
function undeploy_and_delete {
ARTEFACT_TYPE=$1
ARTEFACT_NAME=$2
if [ "$ARTEFACT_TYPE" = "Apiproxy" ]; then
TOOL_CMD_SUFFIX="";
else TOOL_CMD_SUFFIX="Sharedflow"
fi
echo "Suffix = " $TOOL_CMD_SUFFIX
echo "--->" Undeploying $ARTEFACT_NAME $ARTEFACT_TYPE
apigeetool undeploy$TOOL_CMD_SUFFIX -o $APIGEE_ORG -e $APIGEE_ENV -u $APIGEE_USER -p $APIGEE_PASSWORD -n $ARTEFACT_NAME
echo "--->" Deleting $ARTEFACT_NAME $ARTEFACT_TYPE
apigeetool delete$TOOL_CMD_SUFFIX -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD -n $ARTEFACT_NAME
}
#### End utility functions
# If no developer name has been set use a default
if [ -z "$CDS_TEST_DEVELOPER_EMAIL" ]; then CDS_TEST_DEVELOPER_EMAIL=CDS-Test-Developer@somefictitioustestcompany.com; fi;
CDS_REGISTER_TEST_DEVELOPER_EMAIL=CDR-Register-Test-Developer@somefictitioustestcompany.com
# Remove test app
echo "--->" Removing Test App: CDRTestApp...
apigeetool deleteApp -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --email $CDS_TEST_DEVELOPER_EMAIL --name CDSTestApp
# Remove CDR Register test app
echo "--->" Removing CDR Register Test App: CDRRegisterTestApp...
apigeetool deleteApp -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --email $CDS_REGISTER_TEST_DEVELOPER_EMAIL --name CDRRegisterTestApp
# Remove test developer
echo "--->" Removing Test Developer: $CDS_TEST_DEVELOPER_EMAIL
apigeetool deleteDeveloper -o $APIGEE_ORG -username $APIGEE_USER -p $APIGEE_PASSWORD --email $CDS_TEST_DEVELOPER_EMAIL
# Remove CDR Register test developer
echo "--->" Removing CDR Register Test Developer: $CDS_REGISTER_TEST_DEVELOPER_EMAIL
apigeetool deleteDeveloper -o $APIGEE_ORG -username $APIGEE_USER -p $APIGEE_PASSWORD --email $CDS_REGISTER_TEST_DEVELOPER_EMAIL
# Remove products
echo "--->" Removing API Product "Accounts"
apigeetool deleteProduct -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --productName "CDSAccounts"
echo "--->" Removing API Product "Transactions"
apigeetool deleteProduct -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --productName "CDSTransactions"
echo "--->" Removing API Product "OIDC"
apigeetool deleteProduct -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --productName "CDSOIDC"
echo "--->" Removing API Product "DynamicClientRegistration"
apigeetool deleteProduct -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --productName "CDSDynamicClientRegistration"
echo "--->" Removing API Product "Admin"
apigeetool deleteProduct -o $APIGEE_ORG -u $APIGEE_USER -p $APIGEE_PASSWORD --productName "CDSAdmin"
# Remove KVMs
echo "--->" Removing KVM CDSConfig
apigeetool deleteKVMmap -o $APIGEE_ORG -e $APIGEE_ENV -u $APIGEE_USER -p $APIGEE_PASSWORD --mapName CDSConfig
echo "--->" Removing KVM mockCDRRegister
apigeetool deleteKVMmap -o $APIGEE_ORG -e $APIGEE_ENV -u $APIGEE_USER -p $APIGEE_PASSWORD --mapName mockCDRRegister
echo "--->" Removing KVM mockADRClient
apigeetool deleteKVMmap -o $APIGEE_ORG -e $APIGEE_ENV -u $APIGEE_USER -p $APIGEE_PASSWORD --mapName mockADRClient
echo "--->" Deleting dynamic KVM CDSConfig...
apigeetool deleteKVMmap -u $APIGEE_USER -p $APIGEE_PASSWORD -o $APIGEE_ORG -e $APIGEE_ENV --mapName Consents
# Undeploy banking apiproxies
cd src/apiproxies/banking
for ap in $(ls .)
do
undeploy_and_delete "Apiproxy" $ap
done
# Undeploy common apiproxies
cd ../common
for ap in $(ls .)
do
undeploy_and_delete "Apiproxy" $ap
done
# Undeploy authn/authz related proxies
cd ../authnz
for ap in $(ls .)
do
undeploy_and_delete "Apiproxy" $ap
done
# Undeploy CDS-Admin proxy
cd ../admin/CDS-Admin
undeploy_and_delete "Apiproxy" CDS-Admin
# Undeploy Client Dynamic Registration proxy and the accompanying mock-register and mock-adr-client proxies
cd ../../dynamic-client-registration
for ap in $(ls .)
do
undeploy_and_delete "Apiproxy" $ap
done
# Undeploy Shared flows
cd ../../shared-flows
for sf in $(ls .)
do
undeploy_and_delete "Sharedflow" $sf
done
# Revert to original directory
cd ../../..
# Delete Caches and dynamic KVM sused by oidc proxy
echo "--->" Deleting cache OIDCState...
apigeetool deletecache -u $APIGEE_USER -p $APIGEE_PASSWORD -o $APIGEE_ORG -e $APIGEE_ENV -z OIDCState
echo "--->" Deleting cache PushedAuthReqs...
apigeetool deletecache -u $APIGEE_USER -p $APIGEE_PASSWORD -o $APIGEE_ORG -e $APIGEE_ENV -z PushedAuthReqs
echo "--->" Deleting dynamic KVM PPIDs...
apigeetool deleteKVMmap -u $APIGEE_USER -p $APIGEE_PASSWORD -o $APIGEE_ORG -e $APIGEE_ENV --mapName PPIDs
echo "--->" Deleting dynamic KVM TokensIssuedForConsent...
apigeetool deleteKVMmap -u $APIGEE_USER -p $APIGEE_PASSWORD -o $APIGEE_ORG -e $APIGEE_ENV --mapName TokensIssuedForConsent
# Delete cache used by basic consent management proxy
echo "--->" Deleting cache ConsentState...
apigeetool deletecache -u $APIGEE_USER -p $APIGEE_PASSWORD -o $APIGEE_ORG -e $APIGEE_ENV -z ConsentState
| true
|
ddfbc9be6209946d6490043f39f5416baf13d846
|
Shell
|
linuxandroid/syno5builder
|
/rmdisk_32/usr/syno/etc/rc.d/J30DisableNCQ.sh
|
UTF-8
| 441
| 3.34375
| 3
|
[] |
no_license
|
#/bin/sh
DISK_PATH=/sys/block/sd*/device/
NCQ_OFF_LIST=/usr/syno/etc/ncq_off_list
NCQOFF_DISK=`cat $NCQ_OFF_LIST`
NCQ_RET=1
for i in $DISK_PATH
do
Disk=`cat $i/model`
for j in $NCQOFF_DISK
do
echo $Disk |grep $j >/dev/null 2>&1
ret=$?
if [ 0 -eq $ret ]; then
NCQ_Value=`cat $i/queue_depth`
if [ ! 1 -eq $NCQ_Value ]; then
NCQ_RET=0
echo Disable $j NCQ
echo 1 > $i/queue_depth
fi
fi
done
done
exit $NCQ_RET
| true
|
0ce2403a58b469c781a351937b057f2990ac9f0d
|
Shell
|
deanberris/vim-foo
|
/setup.sh
|
UTF-8
| 682
| 2.6875
| 3
|
[
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# Copyright 2009 (c) Dean Michael Berris
# Distributed under the Boost Software License 1.0
# See LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt
#
CWD=`pwd`
if [ -d $HOME/.vim ]; then
echo "Moving existing ~/.vim directory to ~/.vim-old"
mv ~/.vim ~/.vim-old
fi
ln -s $CWD/dot-vim ~/.vim
# Install YCM -- this may take a while.
cd $CWD/dot-vim/bundle/YouCompleteMe && ./install.sh --clang-completer && cd $CWD
if [ -e $HOME/.vimrc ]; then
echo "Moving existing ~/.vimrc to ~/.vimrc-old"
mv ~/.vimrc ~/.vimrc-old
fi
ln -s $CWD/vimrc ~/.vimrc
# Set up the Vundle bundles.
vim +BundleInstall +qall
# Set up the pathogen bundles.
vim +Helptags +qall
| true
|
21a515975e538212b8c2971235d1d19ff5ad5831
|
Shell
|
akhayyat/dotfiles
|
/hg/cwdiff
|
UTF-8
| 8,288
| 3.359375
| 3
|
[] |
no_license
|
#! /bin/bash
#(C) 2009-2012 C. Junghans
# junghans@votca.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#version 0.1 04.08.10 -- initial version
#version 0.1.1, 05.08.10 -- added -D and -o
#version 0.1.2, 04.10.10 -- make -D work again and better help
#version 0.1.3, 10.05.11 -- added --text to diff_opts to allow diff of binary files (issue #3)
#version 0.1.4, 10.05.11 -- removed --text again and handle case of binary files
#version 0.2.0, 15.04.12 -- clean up + bugfix thanks to Arne
#version 0.2.1, 15.04.12 -- clean up + new bug report address
#version 0.2.2, 20.04.12 -- fixed a bug if argument were dirs
#version 0.2.3, 18.12.12 -- replace usage of mktemp with bash built-ins
#version 0.2.4, 31.08.13 -- fixed deleted leading spaces
#version 0.2.5, 06.09.13 -- do not expand escape sequences in diff
#version 0.2.6, 18.09.13 -- allow 1st argument begin a file with --filter
FAT_GREEN="[32;01m"
GREEN="[32m"
FAT_RED="[31;01m"
RED="[31m"
MAGENTA="[35m"
FAT_BLACK="[1m"
OFF="[0m"
NL="
"
usage="Usage: ${0##*/} [OPTIONS] FILE1 FILE2"
quiet="no"
diff="no"
filter="no"
diff_opts='--new-file --unified --show-c-function --recursive'
ab="no"
out="-"
style="wdiff"
a2ps=""
a2ps_opts="--prologue=color"
color="yes"
color_filter() {
if [[ $a2ps ]]; then
#seems like a bug in the a2ps style file
sed -e "s/\[-/[wd-/g" -e "s/-\]/-wd]/g" -e "s/{+/{wd+/g" -e "s/+}/+wd}/g" $1
elif [[ $color = yes ]]; then
sed -e "s/\[-/$RED/g" -e "s/-\]/$OFF/g" -e "s/{+/$GREEN/g" -e "s/+}/$OFF/g" $1
else
cat $1
fi
}
die() {
[[ $* ]] && echo "$*"
exit 1
}
qecho() {
[[ $quiet = yes ]] || echo "$*"
}
show_help () {
cat << eof
A colorized version of wdiff
$usage
OPTIONS:
-f, --filter Act as a wdiff color filter only and don't excute diff/wdiff
internally, just colorize input (no ARGS = read from stdin)
-d, --diff Preprocess input with diff and before giving it to wdiff
(very useful for dirs). Option can be used in combination
with --filter option meaning input is a patch.
-D, --diffonly Process input with diff, but NOT with wdiff, so ${0##*/}
basically acts like a colorized version of diff. Option
can be used in combination with --filter option meaning
input is a patch.
--diffopts XXX Change opts of diff
Default: '$diff_opts'
--ab replace trunc of dirname by 'a' and 'b'
--no-color Disable color
(implies --a2psopts '--prologue=ul')
-a, --a2ps Pipe the output to a2ps, which will produce ps code
(also work with --filter)
--a2psopts XXX Change opts of a2ps
Default: '$a2ps_opts'
-o, --out FILE Change output file
Default: stdout
-- Stop parsing options
-h, --help Show this help
-v, --version Show version
--hg Show last log message for hg (or cvs)
Examples: ${0##*/} -d dir1 dir2
${0##*/} file1 file2
${0##*/} --ab -D dir1 dir2
${0##*/} -a --ab -D dir1 dir2 > file.ps
wdiff file1 file2 | ${0##*/} -f
diff -u file1 file2 | ${0##*/} -D -f
Report bugs and comments at https://code.google.com/p/cj-overlay/issues/list
or junghans@votca.org
eof
}
shopt -s extglob
while [[ ${1} = -?* ]]; do
if [[ ${1} = --??*=* ]]; then # case --xx=yy
set -- "${1%%=*}" "${1#*=}" "${@:2}" # --xx=yy to --xx yy
elif [[ ${1} = -[^-]?* ]]; then # case -xy split
if [[ ${1} = -[o]* ]]; then #short opts with arguments
set -- "${1:0:2}" "${1:2}" "${@:2}" # -xy to -x y
else #short opts without arguments
set -- "${1:0:2}" "-${1:2}" "${@:2}" # -xy to -x -y
fi
fi
case $1 in
--ab)
ab="yes"
shift;;
--no-color)
unset FAT_GREEN GREEN FAT_RED RED MAGENTA OFF FAT_BLACK
a2ps_opts="--prologue=ul"
color="no"
shift;;
-f | --filter)
filter="yes"
shift ;;
-d | --diff)
diff="yes"
shift ;;
-D | --diffonly)
diff="only"
shift ;;
-a | --a2ps)
a2ps="a2ps"
unset FAT_GREEN GREEN FAT_RED RED MAGENTA OFF FAT_BLACK
shift ;;
--a2psopts)
a2ps_opts="$2"
shift 2;;
-o | --out)
out="$2"
[[ ! $out ]] && die "Missing filename after --out option"
shift 2;;
--diffopts)
diff_opts="$2"
shift ;;
-q | --quiet)
quiet="yes"
shift ;;
-h | --help)
show_help
exit 0;;
--hg)
echo "${0##*/}: $(sed -ne 's/^#version.* -- \(.*$\)/\1/p' $0 | sed -n '$p')"
exit 0;;
-v | --version)
echo "${0##*/}, $(sed -ne 's/^#\(version.*\) -- .*$/\1/p' $0 | sed -n '$p') by C. Junghans"
exit 0;;
--)
shift
break;;
*)
die "Unknown option '$1'";;
esac
done
[[ $1 = - ]] && filter="yes" && shift
if [[ $filter = no ]]; then
[[ ! $1 || ! $2 ]] && die "Please specify two files/dirs or add --filter option"
#use -e as it could be file or dir
[[ -e $1 ]] || die "Could not read file/dir '$1'"
[[ -e $2 ]] || die "Could not read file/dir '$2'"
[[ $3 ]] && die "I don't know what to do with arguments '${@:3}'"
else
[[ -n $1 && ! -f $1 ]] && die "Could not read file '$1'"
[[ $2 ]] && die "I don't know what to do with arguments '$*' together --filter option"
fi
if [[ $diff != no ]]; then
if [[ $filter = no ]]; then
exec 3< <(diff $diff_opts "$1" "$2")
#don't die here, because diff of binary files give exit code = 2
else
[[ $1 ]] && exec 3<$1 || exec 3<&0
fi
# don't do this if we have not files ;-)
if [[ $ab = yes && $1 && $2 ]]; then
#find the longest equal part in $1 and $2 from the end
for ((i=1;i<=(${#1}<${#2}?${#1}:${#2});i++)); do
[[ ${1:0-$i} != ${2:0-$i} ]] && break
done
((i--))
a="${1:0:${#1}-$i}"
b="${2:0:${#2}-$i}"
else
a=a; b=b
fi
# -r to not expand escape sequences in diff
while read -r -u 3; do
#small trick, because "read -u 3 i" would split the line and
#leading space would be lost.
i="${REPLY}"
if [[ $i = "Files "*" and "*" differ" ]]; then # binary case
i="${i/$a/a}"
i="${i/$b/b}"
echo "$i"
elif [[ $i = diff* ]]; then # diff header line
i="${i/ $diff_opts}"
i="${i/$a/a}"
echo "$FAT_BLACK${i/$b/b}$OFF"
elif [[ $i = ---* ]]; then
echo "${FAT_RED}${i/$a/a}${OFF}"
elif [[ $i = +++* ]]; then
echo "${FAT_GREEN}${i/$b/b}${OFF}"
elif [[ $i = @@* ]]; then
echo "${MAGENTA}${i}${OFF}"
elif [[ $i = -* ]]; then
[[ $diff = only ]] && echo "${RED}${i}${OFF}" || t1+="${i#-}$NL"
elif [[ $i = +* ]]; then
[[ $diff = only ]] && echo "${GREEN}${i}${OFF}" || t2+="${i#+}$NL"
else
# only true for diff != only
# cut the last newline do avoid an empty line at the end (echo append newline)
# echo -n would also work, but wdiff has strange behaviour if the 2nd file is
# empty, it will not append newline, which make the output look strange
[[ $t1 || $t2 ]] && { wdiff <(echo "${t1%$NL}") <(echo "${t2%$NL}") | color_filter; }
t1=
t2=
[[ $diff = only ]] && echo "${i}" || echo "${i## }"
fi
done
# thanks to Arne Babenhauserheide for pointing out this case is missing
# if there was + or - lines at the end, which has not been printed yet
[[ $t1 || $t2 ]] && { wdiff <(echo "${t1%$NL}") <(echo "${t2%$NL}") | color_filter; }
elif [[ $filter = yes ]]; then
color_filter $1
else
wdiff "$1" "$2" | color_filter
fi | if [[ $a2ps ]]; then
a2ps $a2ps_opts "--pretty-print=$style" -o "$out"
else
[[ $out = - ]] && cat || cat > "$out"
fi
| true
|
6e14f59e011ff93c719beaa668e6e1a91d1176b7
|
Shell
|
linorobot/lino_install
|
/install
|
UTF-8
| 6,132
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
source /opt/ros/$(dir /opt/ros)/setup.bash
wget https://www.pjrc.com/teensy/00-teensy.rules
sudo cp 00-teensy.rules /etc/udev/rules.d/
ROSDISTRO="$(rosversion -d)"
BASE=$1
SENSOR=$2
ARCH="$(uname -m)"
echo $ARCH
echo "
______ _____________ _________ ________ _______ ________ _______ ________
___ / ____ _/___ | / /__ __ \___ __ \__ __ \___ __ )__ __ \___ __/
__ / __ / __ |/ / _ / / /__ /_/ /_ / / /__ __ |_ / / /__ /
_ /_____/ / _ /| / / /_/ / _ _, _/ / /_/ / _ /_/ / / /_/ / _ /
/_____//___/ /_/ |_/ \____/ /_/ |_| \____/ /_____/ \____/ /_/
http://linorobot.org
"
if [ "$3" != "test" ]
then
if [ "$*" == "" ]
then
echo "No arguments provided"
echo
echo "Example: $ ./install.sh 2wd xv11"
echo
exit 1
elif [[ "$1" != "2wd" && "$1" != "4wd" && "$1" != "mecanum" && "$1" != "ackermann" ]]
then
echo "Invalid linorobot base: $1"
echo
echo "Valid Options:"
echo "2wd"
echo "4wd"
echo "ackermann"
echo "mecanum"
echo
exit 1
elif [[ "$2" != "xv11" && "$2" != "rplidar" && "$2" != "ydlidar" && "$2" != "hokuyo" && "$2" != "kinect" && "$2" != "realsense" ]]
then
echo "Invalid linorobot sensor: $2"
echo
echo "Valid Options:"
echo "hokuyo"
echo "kinect"
echo "lms1xx"
echo "realsense"
echo "rplidar"
echo "xv11"
echo "ydlidar"
echo
exit 1
elif [[ "$ARCH" != "x86_64" && "$2" == "realsense" ]]
then
echo "Intel Realsense R200 is not supported in $ARCH architecture."
exit 1
fi
echo
echo -n "You are installing ROS-$ROSDISTRO Linorobot for $BASE base with a $SENSOR sensor. Enter [y] to continue. "
read reply
if [[ "$reply" != "y" && "$reply" != "Y" ]]
then
echo "Wrong input. Exiting now"
exit 1
fi
fi
echo
echo "INSTALLING NOW...."
echo
sudo apt-get update
sudo apt-get install -y \
avahi-daemon \
openssh-server \
python-setuptools \
python-dev \
build-essential \
python-gudev
sudo easy_install pip
sudo python2.7 -m pip install -U platformio
sudo rm -rf $HOME/.platformio/
source /opt/ros/$ROSDISTRO/setup.bash
cd $HOME
mkdir -p linorobot_ws/src
cd $HOME/linorobot_ws/src
catkin_init_workspace
sudo apt-get install -y \
ros-$ROSDISTRO-roslint \
ros-$ROSDISTRO-rosserial \
ros-$ROSDISTRO-rosserial-arduino \
ros-$ROSDISTRO-imu-filter-madgwick \
ros-$ROSDISTRO-gmapping \
ros-$ROSDISTRO-map-server \
ros-$ROSDISTRO-navigation \
ros-$ROSDISTRO-robot-localization \
ros-$ROSDISTRO-tf2 \
ros-$ROSDISTRO-tf2-ros
if [[ "$3" == "test" ]]
then
sudo apt-get install -y \
ros-$ROSDISTRO-xv-11-laser-driver \
ros-$ROSDISTRO-rplidar-ros \
ros-$ROSDISTRO-urg-node \
ros-$ROSDISTRO-lms1xx \
ros-$ROSDISTRO-freenect-launch \
ros-$ROSDISTRO-depthimage-to-laserscan \
ros-$ROSDISTRO-teb-local-planner
cd $HOME/linorobot_ws/src
git clone https://github.com/EAIBOT/ydlidar.git
else
if [[ "$SENSOR" == "hokuyo" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-urg-node
hokuyoip=
echo ""
echo -n "Input your hokuyo IP. Press Enter to skip (Serial Based LIDAR): "
read hokuyoip
echo "export LIDARIP=$hokuyoip" >> $HOME/.bashrc
elif [[ "$SENSOR" == "kinect" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-freenect-launch
sudo apt-get install -y ros-$ROSDISTRO-depthimage-to-laserscan
elif [[ "$SENSOR" == "lms1xx" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-lms1xx
echo ""
echo -n "Input your LMS1xx IP: "
read lms1xxip
echo "export LIDARIP=$lms1xxip" >> $HOME/.bashrc
elif [[ "$SENSOR" == "realsense" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-realsense-camera
sudo apt-get install -y ros-$ROSDISTRO-depthimage-to-laserscan
elif [[ "$SENSOR" == "rplidar" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-rplidar-ros
elif [[ "$SENSOR" == "xv11" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-xv-11-laser-driver
elif [[ "$SENSOR" == "ydlidar" ]]
then
cd $HOME/linorobot_ws/src
git clone https://github.com/EAIBOT/ydlidar.git
fi
if [[ "$BASE" == "ackermann" ]]
then
sudo apt-get install -y ros-$ROSDISTRO-teb-local-planner
fi
fi
cd $HOME/linorobot_ws/src
git clone https://github.com/linorobot/linorobot.git
git clone https://github.com/linorobot/imu_calib.git
git clone https://github.com/linorobot/lino_pid.git
git clone https://github.com/linorobot/lino_udev.git
git clone https://github.com/linorobot/lino_msgs.git
cd $HOME/linorobot_ws/src/linorobot
TRAVIS_BRANCH="echo $TRAVIS_BRANCH"
if [ "$TRAVIS_BRANCH" = "devel" ]; then git checkout devel; fi
cd $HOME/linorobot_ws/src/linorobot/teensy/firmware
export PLATFORMIO_CI_SRC=$PWD/src/firmware.ino
platformio ci --project-conf=./platformio.ini --lib="./lib/ros_lib" --lib="./lib/config" --lib="./lib/motor" --lib="./lib/kinematics" --lib="./lib/pid" --lib="./lib/imu" --lib="./lib/encoder"
echo "source $HOME/linorobot_ws/devel/setup.bash" >> $HOME/.bashrc
echo "export LINOLIDAR=$SENSOR" >> $HOME/.bashrc
echo "export LINOBASE=$BASE" >> $HOME/.bashrc
source $HOME/.bashrc
cd $HOME/linorobot_ws
catkin_make --pkg lino_msgs
catkin_make
echo
echo "INSTALLATION DONE!"
echo
| true
|
6166fc71c98f1b129d683fc1ed7325ab38c773d6
|
Shell
|
patinnc/60secs
|
/gen_report.sh
|
UTF-8
| 48,325
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# expecte to be run from /root folder
# gets the results from the most recent runs of specint, specjbb, stream, geekbench, fio, vdbench, sysinfo
export LANGUAGE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
export LC_ALL=C
SCR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
AWK_BIN=awk
if [ -e $SCR_DIR/bin/gawk ]; then
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
AWK_BIN=$SCR_DIR/bin/gawk
fi
fi
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
AWK_BIN=gawk
fi
echo "$0.$LINENO awk_bin= $AWK_BIN" > /dev/stderr
PROJ_DIR=/root/output
VERBOSE=0
SKU="n/a"
SKU_MAKE="n/a"
SKU_MODEL="n/a"
got_fio=0
fio_key="fio_4K_randomread\tfio_1M_seq_read\tfio_4K_randomwrite\tfio_1M_seq_write"
fio_val="\t\t\t"
fiodisk_key=
fiodisk_val=
ALL_DIRS=1
CMB_FILE=
HOST=
NUM_HOST=
HOST_ARR_I=-1
IFS_SV=$IFS
#declare -A gb_arr
#declare -A cm_arr
declare -A HOST_ARR
#HOST_ARR=()
gb_arr1=()
gb_arr2=()
gb_arr3=()
cm_arr=()
cm_lines=-1
#SCR_DIR=`dirname "$(readlink -f "$0")"`
SCR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
BM_LIST="specint specjbb coremark stream vdbench geekbench sysinfo fio"
echo "$0: +++++cmdline= ${@}"
export AWKPATH=$SCR_DIR
while getopts "hvaH:m:N:p:r:s:t:z:" opt; do
case ${opt} in
a )
ALL_DIRS=1
;;
H )
HOST=$OPTARG
;;
m )
SKU_MAKE=$OPTARG
;;
N )
NUM_HOST=$OPTARG
;;
p )
PROJ_DIR=$OPTARG
;;
s )
SKU=$OPTARG
;;
t )
SKU_MODEL=$OPTARG
;;
z )
CMB_FILE=$OPTARG
;;
v )
VERBOSE=$((VERBOSE+1))
;;
h )
echo "$0 get results on a host in /root/output/* or (if -p proj_dir) /proj_dir/"
echo "Usage: $0 [-h] [ -p proj_dir] [-v]"
echo " -p project_dir"
echo " by default the host results dir name /root/output"
echo " If you specify a project dir the benchmark results are looked for"
echo " under /project_dir/"
echo " -s sku for host (from clusto info host 'Sku:' field)"
echo " -m sku make (like 'Dell') for host (from clusto info host 'Make:' field)"
echo " -t sku model (like 'R630') for host (from clusto info host 'Model:' field)"
echo " -v verbose mode"
exit
;;
: )
echo "Invalid option: $OPTARG requires an argument" 1>&2
;;
\? )
echo "Invalid option: $OPTARG" 1>&2
;;
esac
done
shift $((OPTIND -1))
ORIG_DIR=`pwd`
echo "$0.$LINENO ORIG_DIR= $ORIG_DIR" > /dev/stderr
echo "$0.$LINENO PROJ_DIR= $PROJ_DIR" > /dev/stderr
pushd $PROJ_DIR
LIST=(*)
LIST=`ls -1`
dirs=()
for f in $LIST; do
echo $f
dirs+=( "$f" )
done
DEF_BM=
got_any_bm=0
d="$PROJ_DIR"
for get_bm in $BM_LIST; do
#if [ $VERBOSE -gt 0 ]; then
# echo "ck bm= $get_bm in dir $d" > /dev/stderr
#fi
RC=`awk -v bm="$get_bm" -v path="$d" 'BEGIN{if (index(path, bm) > 0) { rc=1; } else { rc=0; };printf("%d\n", rc);exit}'`
if [ "$RC" == "1" ]; then
if [ $VERBOSE -gt 0 ]; then
echo "got bm= $get_bm in dir $d" > /dev/stderr
fi
let got_any_bm=($got_any_bm+1)
DEF_BM=$get_bm
#break
fi
done
echo "___default bm= $DEF_BM, found $got_any_bm benchmark names, from proj_dir= $d" > /dev/stderr
if [ "$got_any_bm" == "0" ]; then
dirs=()
for i in $BM_LIST; do
FND=`find . -type d -name "??-??-??_*${i}" | sort`
if [ "$FND" != "" ]; then
echo "FND_dir=$FND" > /dev/stderr
dirs+=($FND)
fi
FND=`find . -type d -name "${i}" | sort`
if [ "$FND" != "" ]; then
echo "FND_dir=$FND" > /dev/stderr
dirs+=($FND)
fi
done
echo "dirs= ${dirs[@]}" > /dev/stderr
fi
echo "$0.$LINENO DIRS= ${dirs[@]}" > /dev/stderr
for ((i=0; i<${#dirs[@]}; i++)); do
#20-03-31_182513_sysinfo
d=${dirs[$i]}
bm=${d##*_}
v=`basename ${dirs[$i]}`
# above assumes a certain naming convention for results directories
got_it=0
for get_bm in $BM_LIST; do
if [ "$get_bm" == "$v" ]; then
echo "$0.$LINENO got bm $get_bm in path ${dirs[$i]}" > /dev/stderr
bm=$get_bm
got_it=1
break
fi
if [ "$get_bm" == "$bm" ]; then
echo "$0.$LINENO got bm $get_bm in path ${dirs[$i]}" > /dev/stderr
got_it=1
break
fi
done
if [ $got_it -eq 0 ]; then
bm=
fi
for get_bm in $BM_LIST; do
if [ $VERBOSE -gt 0 ]; then
echo "ck bm= $get_bm in dir $d, def_bm= $DEF_BM"
fi
RC=`awk -v bm="$get_bm" -v path="$d" 'BEGIN{if (index(path, bm) > 0) { rc=1; } else { rc=0; };printf("%d\n", rc);exit}'`
if [ "$RC" == "1" ]; then
if [ $VERBOSE -gt 0 ]; then
echo "$0.$LINENO got bm= $get_bm in dir $d" > /dev/stderr
fi
if [ "$bm" == "" ]; then
bm=$get_bm
fi
break
fi
done
if [ "$bm" == "" -a "$DEF_BM" != "" ]; then
bm=$DEF_BM
if [ $VERBOSE -gt 0 ]; then
echo "$0.$LINENO use def_bm2= $DEF_BM ================" > /dev/stderr
fi
fi
EPOCH=`echo $d | awk -v bm="$bm" '
/20-/{
yy="20" substr($0, 1, 2);
mn=substr($0, 4, 2);
dd=substr($0, 7, 2);
hh=substr($0, 10, 2);
mm=substr($0, 12, 2);
ss=substr($0, 14, 2);
dt_str = yy " " mn " " dd " " hh " " hh " " ss;
#printf("inp= %s dt_str= %s\n", $0, dt_str) > "/dev/stderr";
epoch = mktime(dt_str);
#printf("epoch= %s offset= %s\n", epoch, offset) > "/dev/stderr";
printf("%s\n", epoch);
}'`
TS[$i]=$EPOCH
BM[$i]=$bm
if [ $i -gt 0 ]; then
j=$((i-1))
TSE[$j]=$((EPOCH-1))
fi
done
if [ "$CMB_FILE" != "" ]; then
PH_FILE=$ORIG_DIR/$CMB_FILE.phase.$HOST
if [ -e $PH_FILE ]; then
rm $PH_FILE
fi
echo "PH_FILE= $PH_FILE" > /dev/stderr
for ((i=0; i<${#dirs[@]}; i++)); do
#echo "dir[$i]= ${dirs[$i]} ${BM[$i]} ${TS[$i]} ${TSE[$i]}" >> /dev/stderr
tfl="${dirs[$i]}/phase.txt"
echo "$tfl " > /dev/stderr
if [ -e $tfl ]; then
RESP=`awk '{if (NR == 1){bm= $2; tb= $3;} else { printf("%s %s %s\n", bm, tb, $3);exit}}' $tfl`
echo "$RESP" >> $PH_FILE
echo "got RESP= $RESP" > /dev/stderr
else
echo "${BM[$i]} ${TS[$i]} ${TSE[$i]}" >> $PH_FILE
fi
done
fi
#declare -A did_bmarks
did_bmarks=()
echo "dirs_i= ${#dirs[@]}"
for ((dirs_i=${#dirs[@]}-1; dirs_i>=0; dirs_i--)); do
echo -e "now get bm perf in dirs i= $dirs_i ${dirs[$dirs_i]}" > /dev/stderr
#if [ $VERBOSE -gt 0 ]; then
# echo -e "i= $dirs_i ${dirs[$dirs_i]}" > /dev/stderr
#fi
d=${dirs[$dirs_i]}
bm=${d##*_}
lbm=`basename $d`
got_it=0
for get_bm in $BM_LIST; do
if [ "$get_bm" == "$lbm" ]; then
echo "$0.$LINENO got bm $get_bm in dir $d"
bm=$get_bm
got_it=1
break
fi
if [ "$get_bm" == "$bm" ]; then
got_it=1
break
fi
done
if [ $got_it -eq 0 ]; then
bm=
fi
for get_bm in $BM_LIST; do
if [ $VERBOSE -gt 0 ]; then
echo "$0.$LINENO ck bm= $get_bm in dir $d" > /dev/stderr
fi
RC=`awk -v bm="$get_bm" -v path="$d" 'BEGIN{if (index(path, bm) > 0) { rc=1; } else { rc=0; };printf("%d\n", rc);exit}'`
if [ "$RC" == "1" ]; then
if [ $VERBOSE -gt 0 ]; then
echo "$0.$LINENO got bm= $get_bm in dir $d" > /dev/stderr
fi
#bm=$get_bm
break
fi
done
echo "ck def_bm3= $bm, def_bm= $DEF_BM ================"
if [ "$bm" == "" -a "$DEF_BM" != "" ]; then
bm=$DEF_BM
if [ $VERBOSE -gt 0 ]; then
echo "$0.$LINENO use def_bm3= $DEF_BM ================" > /dev/stderr
fi
fi
if [ $VERBOSE -gt 0 ]; then
echo -e "$0.$LINENO bmark4= $bm dir= $d" > /dev/stderr
fi
valid=0
arg1=""
arg2=""
if [ "$bm" == "specint" ]; then
file1=$d/result/CPU2017.001.log
file2=$d/result/CPU2017.001.intrate.refrate.txt
lkfor="Est. SPECrate2017_int_base"
if [ ! -e $file2 ]; then
file2=$d/result/CPU2017.001.intrate.txt
lkfor="SPECrate2017_int_base"
fi
if [ -e $file1 -a -e $file2 ]; then
arg1=`grep ' --copies ' $file1 | head -1 | sed 's/.*--copies //;' | awk '{print $1}'`
if [ "$arg1" == "" ]; then
arg1=`grep 'copies.*=' $file1 | head -1 | sed 's/.*=//;s/ //g'`
fi
arg2=`grep "$lkfor" $file2 | sed 's/.*int_base//; s/ //g'`
#/ Success 557.xz_r base refrate ratio=136.27, runtime=507.220031, copies=64, threads=1, power=0.00W, temp=0.00 degC, humidity=0.00%
#/ Success 557.xz_r base refrate ratio=/ {
arg3=`awk '/ Success 557.xz_r base refrate ratio=/{gsub(",","",$0);for(i=5;i<NF;i++){n=split($i,arr,"=");if (arr[1]=="copies"){printf("%s\n", arr[2]);exit;}}}' $file2`
if [ "$arg3" != "" -a "$arg1" != "" ]; then
echo "$0.$LINENO arg3= $arg3 arg1= $arg1" > /dev/stderr
if [ $arg3 -gt $arg1 ]; then
arg1=$arg3
fi
fi
#echo " got specint files, arg1= $arg1 arg2= $arg2"
if [ "$arg1" != "" -a "$arg2" != "" ]; then
valid=1
echo "=== specint_rate === $d"
key="++\tspecint_rate\tspecint_threads"
val="++\t$arg2\t$arg1"
if [ "$specint_key" == "" ]; then
specint_key=$key
specint_val=$val;
fi
echo -e "$key"
echo -e "$val"
echo "$0.$LINENO specint val= $specint_val vals= $val $d" > /dev/stderr
fi
fi
fi
if [ "$bm" == "sysinfo" ]; then
pushd $d
file1=sysinfo.txt
file2=spin_freq.txt
file3=spin_bw.txt
file4=spin_bw_remote.txt
if [ ! -e $file2 ]; then
file2=
fi
if [ ! -e $file3 ]; then
file3=
fi
if [ ! -e $file4 ]; then
file4=
fi
if [ ! -e $file2 ]; then
file2=
fi
if [ -e $file1 ]; then
barg=`$AWK_BIN -v scr_dir="$SCR_DIR" -v host_in="$HOST" '
BEGIN{i=0;dsks=0;got_os=0;disk_keys=0;disk_val="";}
function ltrim(s) { sub(/^[ \t\r\n]+/, "", s); return s }
function rtrim(s) { sub(/[ \t\r\n,]+$/, "", s); return s }
function trim(s) { return rtrim(ltrim(s)); }
function get_cache_sizes(str) {
mbytes = 0.0;
if (index(str, " L3 ") > 0 || index(str, " L2 ") > 0 || index(str, " L1i ") > 0 || index(str, " L1d ") > 0) {
for(k = 1; k <= NF; k++) {
if ( $k == "L3" || $k == "L2" || $k == "L1i" || $k == "L1d" ) {
j = k+2;
sz = substr($j, 2, length($j)-2);
unit= substr(sz, length(sz)-1, 2);
sz = substr(sz, 1, length(sz)-2);
if ($VERBOSE > 0) { printf("typ= %s sz= %s unit= %s str= %s\n", $k, sz, unit, str); }
if (unit == "KB") { sz /= 1024.0; }
if (unit == "MB") { sz *= 1.0; }
if (unit == "GB") { sz *= 1024.0; }
mbytes += sz;
if ($VERBOSE > 0) { printf("MBs= %.3f str= %s\n", mbytes, str);}
}
}
}
return mbytes;
}
#work= freq_sml, threads= 96, total perf= 293.273 Gops/sec
/^work= freq.*, threads=/ {
if (FILENAME == "spin_freq.txt") { thrds= rtrim($4);cycles=$7/thrds;cycles=sprintf("%.3f",cycles);totcyc=sprintf("%.3f",$7);;
i++;key[i]="cycles/cpu";val[i]=cycles;
i++;key[i]="tot_cycles";val[i]=totcyc;
}
}
/^work= mem_bw_remote, threads=/ {
if (FILENAME == "spin_bw_remote.txt") {
# old format
thrds= rtrim($4);bw_rem=$7;bw_rem=sprintf("%.3f",bw_rem);
i++;key[i]="spin_bw_remote";val[i]=bw_rem;
}
}
#numa_nodes= 2
#cpus/node= 48
#spin numa memory bandwidth matrix GB/s
#Numa node 0 1
#0 86.593 79.660
#1 81.503 84.481
/^numa_nodes= / {
if (FILENAME == "spin_bw_remote.txt") {
# new format
numa_nodes_rmt = $2;
numa_nodes_beg = 0;
while(getline > 0) {
#printf("got spin bw remote line= %s\n", $0) > "/dev/stderr";
if ($1 == "Numa" && $2 == "node") { numa_nodes_beg = 1; continue; }
if ( numa_nodes_beg == 1 ) {
n = split($0, arr, "\t");
rmt_rw = arr[1]+1;
if (arr[n] == "" ) { n--;} # there can be a trailing tab char
# use < n since there is a trailing tab char
for (rmt_i=2; rmt_i <= n; rmt_i++) {
rmt_col=rmt_i -1;
v = arr[rmt_i]+0.0;
if (rmt_min == "") {
rmt_min = v;
#printf("got spin bw remote initial v= %f\n", v) > "/dev/stderr";
}
if (rmt_col != rmt_rw && rmt_min > v) {
rmt_min = v;
#printf("got spin bw remote new v= %f\n", v) > "/dev/stderr";
}
}
if (rmt_rw == numa_nodes_rmt) {break;}
}
}
if (rmt_min == "") { bw_rem=0.0; } else { bw_rem = rmt_min; }
i++;key[i]="spin_bw_remote";val[i]=bw_rem;
}
}
/^work= mem_bw, threads=/ {
if (FILENAME == "spin_bw.txt") {
thrds= rtrim($4);bw=$7;bw=sprintf("%.3f",bw);
i++;key[i]="spin_bw_local";val[i]=bw;
}
}
{
if (FILENAME == "spin_freq.txt" || FILENAME == "spin_bw.txt" || FILENAME == "spin_bw_remote.txt") { next; }
}
/====start cat .*scaling_governor=====/ {
getline;
++i;
printf("============== %s\n", $0) > "/dev/stderr";
key[i]="governor";
val[i]=$1;
next;
}
/====start uname -a/ {
getline;
++i;
key[i]="host";
hst=$2;
if (host_in != "" && host_in != hst) { hst = hst " " host_in;}
val[i]=hst;
++i;
key[i]="kernel";
val[i]=$3;
next;
}
/====start numactl --hardware/ {
getline;
++i;
key[i]="numa nodes";
val[i]=$2;
next;
}
/====start cat \/proc\/meminfo/ {
getline;
++i;
key[i]="MemTotal";
mem = $2;
unit = $3;
if (unit == "kB") { mem = mem/(1024.0*1024.0); unit = "GB"};
if (unit == "mB") { mem = mem/(1024.0); unit = "GB"};
mem = sprintf("%.3f", mem);
val[i]=mem " " unit;
next;
}
/====start lsb_release/ {
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {break;}
if ($1=="Description:") {$1=""; i++;key[i]="OS"; val[i]=$0;got_os=1;break;}
}
next;
}
/====start cat \/etc\/os-release/ {
if (got_os == 1) { next; }
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {break;}
n=split($0, arr, "=");
if (arr[1]=="PRETTY_NAME") {i++;key[i]="OS"; got_os=1; val[i]=arr[2];break;}
}
next;
}
/====start lscpu/{
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {break;}
n=split($0, arr, ":");
arr[2]=trim(arr[2]);
#printf("1=_%s_, a1=_%s_\n", $1, arr[1]);
#Thread(s) per core: 2
#Core(s) per socket: 16
#Socket(s): 1
#NUMA node(s): 1
#Vendor ID: GenuineIntel
#CPU family: 6
#Model: 85
#Model name: Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz
#Stepping: 4
#CPU MHz: 2500.000
#BogoMIPS: 5000.00
#Hypervisor vendor: KVM
#Virtualization type: full
#L1d cache: 32K
#L1i cache: 32K
#L2 cache: 1024K
#L3 cache: 33792K
#NUMA node0 CPU(s): 0-31
if ($1=="CPU(s):") {i++;key[i]="num_cpus"; val[i]=$2;continue;}
if (arr[1]=="Thread(s) per core") {i++;key[i]="thr/core"; val[i]=arr[2];continue;}
if (arr[1]=="Core(s) per socket") {i++;key[i]="cores/skt"; val[i]=arr[2];continue;}
if (arr[1]=="Socket(s)") {i++;key[i]="skts"; val[i]=arr[2];continue;}
if (arr[1]=="CPU family") {cpu_fam=arr[2];i++;key[i]="cpu family"; val[i]=arr[2];continue;}
if (arr[1]=="Architecture") {cpu_arch=arr[2];}
if (arr[1]=="Vendor ID") {cpu_vnd=arr[2];continue;}
if (arr[1]=="Model") {
cpu_mod=arr[2];
i++;key[i]="model"; val[i]=arr[2];
continue;}
if (arr[1]=="Model name") {
cpu_model_name = arr[2];
i++;key[i]="model name"; val[i]=arr[2];
cmd_decode = scr_dir "/decode_cpu_fam_mod.sh -f " cpu_fam " -m " cpu_mod " -n \""cpu_model_name "\" -v GenuineIntel -V 0" ;
cmd_decode | getline cpu_codename;
close(cmd_decode);
res = cpu_codename;
#res=decode_fam_mod(cpu_vnd, cpu_fam, cpu_mod, cpu_model_name);
++i;key[i]="cpu_decoder"; val[i]=res;
continue;}
if (arr[1]=="CPU MHz") {i++;key[i]="CPU MHz"; val[i]=arr[2];continue;}
if (arr[1]=="BogoMIPS") {i++;key[i]="BogoMIPS"; val[i]=arr[2];continue;}
if (arr[1]=="Hypervisor vendor") {i++;key[i]="Hypervisor vendor"; val[i]=arr[2];continue;}
if (arr[1]=="Virtualization type") {i++;key[i]="Virt. Typ"; val[i]=arr[2];continue;}
if (arr[1]=="L1d cache") {i++;key[i]=arr[1]; val[i]=arr[2];continue;}
if (arr[1]=="L1i cache") {i++;key[i]=arr[1]; val[i]=arr[2];continue;}
if (arr[1]=="L2 cache") {i++;key[i]=arr[1]; val[i]=arr[2];continue;}
if (arr[1]=="L3 cache") {i++;key[i]=arr[1]; val[i]=arr[2];continue;}
}
next;
}
#====start lstopo --no-io=
#Machine (480GB total)
# NUMANode L#0 (P#0 240GB) + Socket L#0 + L3 L#0 (45MB)
# L2 L#0 (256KB) + L1d L#0 (32KB) + L1i L#0 (32KB) + Core L#0
# PU L#0 (P#0)
# PU L#1 (P#32)
# L2 L#1 (256KB) + L1d L#1 (32KB) + L1i L#1 (32KB) + Core L#1
# PU L#2 (P#1)
# PU L#3 (P#33)
#====start lstopo --no-io=====
#Machine (124GB) + Socket L#0 + L3 L#0 (33MB)
# L2 L#0 (1024KB) + L1d L#0 (32KB) + L1i L#0 (32KB) + Core L#0
# PU L#0 (P#0)
# PU L#1 (P#8)
# L2 L#1 (1024KB) + L1d L#1 (32KB) + L1i L#1 (32KB) + Core L#1
# PU L#2 (P#1)
# PU L#3 (P#9)
#Machine (250GB total) + Socket L#0
# NUMANode L#0 (P#0 125GB)
# L3 L#0 (8192KB)
# L2 L#0 (512KB) + L1d L#0 (32KB) + L1i L#0 (64KB)
# Core L#0 + PU L#0 (P#0)
# Core L#1 + PU L#1 (P#16)
# L2 L#1 (512KB) + L1d L#1 (32KB) + L1i L#1 (64KB)
# Core L#2 + PU L#2 (P#1)
# Core L#3 + PU L#3 (P#17)
/====start lstopo --no-io=/{
tot_cache=0;
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {printf("tot_cache MBs= %.3f\n", tot_cache);++i;key[i]="total_cache MBs";val[i]=tot_cache;break;}
tot_cache += get_cache_sizes($0);
continue;
}
next;
}
/====start dmidecode/ {
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {break;}
n=split($0, arr, /[ \t]/);
if ($0=="BIOS Information") {area="bios";continue;}
if (area == "bios" && arr[2] == "Vendor:") {$1="";str=$0;continue;}
if (area == "bios" && arr[2] == "Version:") {$1="";str = str " " $0;continue;}
if (area == "bios" && arr[2] == "Release") {$1=$2="";str = str " " $0;++i;key[i]="bios";val[i]=str;area="";continue;}
if ($0=="System Information") { area="sys";continue;}
if (area == "sys" && arr[2] == "Manufacturer:") {$1="";str=$0;continue;}
if (area == "sys" && arr[2] == "Product") {$1=$2="";str = str " " $0;++i;key[i]="system";val[i]=str;area="";break;}
}
next;
}
# *-disk:1
# description: ATA Disk
# product: MTFDDAK1T9TDD
# physical id: 0.1.0
# bus info: scsi@0:0.1.0
# logical name: /dev/sdb
# version: U004
# serial: 18351F1367F6
# size: 1788GiB (1920GB)
# capacity: 1788GiB (1920GB)
# configuration: ansiversion=6 logicalsectorsize=512 sectorsize=4096
/====start lshw==/{
printf("got lshw= %s\n", $0);
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {break;}
pos=index($0, "*-disk");
#printf("got lshw: pos= %d %s\n", pos, $0);
if (pos > 1) { # start of a disk section
printf("got disk= %s\n", $0);
dck_str[1]="product: ";
dck_str[2]="logical name: ";
dck_str[3]="size: ";
if (disk_keys > 0) { dcomma=", ";}
disk_keys++;
disk_str=dcomma "{";
while(1) {
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {disk_str=disk_str"}";disk_val=disk_val""disk_str;break;}
match($0, /^ */); # find leading spaces
#printf("There are %d spaces leading up to %s\n", RLENGTH, substr($0,RLENGTH+1))
if (RLENGTH > pos) { # this is the details of the above disk
$0=substr($0, RLENGTH+1); #drop leading spaces
#printf("nstr= %s, disk_str= %s\n", $0, disk_str);
for (kk=1; kk <= 3; kk++) {
if (index($0, dck_str[kk]) == 1) {
disk_str=disk_str " " substr($0, length(dck_str[kk])+1);
if (kk==3) { disk_str = disk_str ", ";}
#printf("disk_str[%d]= %s\n", kk, disk_str);
break;
}
}
} else {
printf("disk_str end = %s, cur_line= %s\n", disk_str, $0);
disk_str = (disk_str "}");
printf("disk_str end = %s, cur_line= %s\n", disk_str, $0);
disk_val = disk_val "" disk_str;
printf("disk_val= %s\n", disk_val);
# check if the current line is for a new disk
pos=index($0, "*-disk");
if (pos > 1) {
printf("new disk area\n");
disk_keys++;
disk_str=dcomma "{";
} else {
# not a new disk record
printf("not disk area\n");
break;
}
}
}
}
n=split($0, arr, " ");
#printf("1=_%s_, 2=_%s_\n", arr[1], arr[2]);
#if (arr[1]=="*-cpu") {getline; $1="";str=$0;++i;key[i]="cpu";val[i]=str;area="";break;}
}
next;
}
/====start lsblk -P -o NAME,SIZE,MODEL=/{
printf("got lsblk -P -o NAME,SIZE,MODEL\n")
lsdsk_str="";
lsdsk_val="";
while(1) {
#rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {printf("lsdsk_str= %s\n", lsdsk_str);break;}
rc=getline; if (rc==0 || substr($0, 1, 4) == "====") {printf("lsdsk_str= %s\n", lsdsk_str); ++i;key[i]="lsblk_disks";val[i]=lsdsk_str;break;}
#n=split($0, arr, /[ \"]+/);
n=split($0, arr, /[ "]+/);
#for (ni=1; ni <= n; ni++) {printf("fld[%d]= %s\n", ni, arr[ni])};
if (n >= 7) {
lsdsks++;
nm=arr[2];
sz=arr[4];
model=arr[6];
if (n >= 8) {
model = model " " arr[7];
}
printf("disk[%d]= %s, %s, %s\n", lsdsks, nm, sz, model);
lsdsk_str = lsdsk_str "" nm ", " sz ", " model "/";
}
continue;
}
next;
}
END {
if (disk_val != "") {
key[++i]="disks";
val[ i]=disk_val;
}
hdr="++\\t";for (j=1; j <= i; j++) { printf("%s%s\t", hdr, key[j]);hdr="";} printf("\n");
hdr="++\\t";for (j=1; j <= i; j++) { printf("%s%s\t", hdr, val[j]);hdr="";} printf("\n");
for (j=1; j <= i; j++) { printf("%s\t%s\n", key[j], val[j]);}
}
' $file1 $file2 $file3 $file4`
if [ "$barg" != "" ]; then
valid=1
echo "=== sysinfo === $d"
if [ "$sysinfo_kv" == "" ]; then
sysinfo_kv=`echo -e "$barg" | grep "++"`
fi
echo -e "$barg"
tmp_str=`echo -e "$barg" | grep "++" | awk -v lkfor="host" '
BEGIN{
hdr=0;
sv_col=-1;
}
{
if (hdr==0){
hdr=1;
for (i=1; i <= NF; i++) {
if ($i == lkfor) {
sv_col = i;
break;
}
}
} else {
if (sv_col != -1) {
str = $sv_col;
gsub(/\./, "_", str); # going to use hostname as key for bash array key. bash doesnt like . in key
printf("%s\n", str);
} else {
printf("%s\n", "not_found");
}
}
}
'`
HOST_SYSINFO=$tmp_str
STR2=${HOST_ARR[$HOST_SYSINFO]}
echo "$0.$LINENO HOST_SYSINFO= $HOST_SYSINFO" > /dev/stderr
if [ "$STR2" == "" ]; then
HOST_ARR_I=$((HOST_ARR_I+1))
#HOST_ARR[$HOST_SYSINFO]=$HOST_ARR_I
HOST_ARR[${HOST_SYSINFO}]=$HOST_ARR_I
HOST_ARR_LKUP[$HOST_ARR_I]=$HOST_SYSINFO
fi
echo -e "-------sysinfo str= $tmp_str" > /dev/stderr
fi
popd
fi
fi
#====start cat /proc/meminfo=====
#MemTotal: 195898172 kB
#15:39:40.000 Starting RD=run-1-seqWrite; I/O rate: Uncontrolled MAX; elapsed=200; For loops: threads=2 xfersize=1m
if [ "$bm" == "fio" ]; then
#RESP=`ls -1 -1 $d/FIO_*|wc -l`
# nvme2n1: (g=0): rw=read, bs=1M-1M/1M-1M/1M-1M, ioengine=libaio, iodepth=8
# READ: io=1126.7GB, aggrb=3843.6MB/s, minb=1921.8MB/s, maxb=1921.9MB/s, mint=300004msec, maxt=300004msec
# nvme2n1: ios=4608828/0, merge=0/0, ticks=17899976/0, in_queue=17606684, util=100.00%
arg1="";
shopt -s nullglob
FILES=($d/FIO_*)
if [ ${#FILES[@]} -gt 0 ]; then
barg=`awk 'BEGIN{i=0;dsks=0;}
/ rw=/{sv=0;k1=index($0," rw="); str=substr($0,k1+4); k2=index(str,","); typ=substr(str,1,k2-1); k3=index(str," bs="); bs=substr(str,k3+4); k4=index(bs,"-"); bs=substr(bs,1,k4-1); }
/ in_queue=/{if (dsk_nm[$1] != $1) {dsks++;dsk_nm[$1]=$1;dsk_arr[dsks]=$1;}}
/ aggrb=/{
k1=index($0," aggrb=");
str=substr($0,k1+7); k2=index(str,",");
bw=substr(str,1,k2-1);
pos = index(bw, "KB");
if (pos > 0) { num = substr(bw,1,pos-1); num = num / 1024.0; bw=sprintf("%.3f", num);}
pos = index(bw, "MB");
if (pos > 0) { bw = substr(bw,1,pos-1);};
#printf("{typ= %s, bs= %s, bw= %s}\n", typ, bs, bw);
i++; arr[i,1]=bw;arr[i,2]=bs;arr[i,3]=typ;}
END{
printf("fio disks used:\n");
for(j=1; j<= dsks; j++){
printf("%d\t%s\n",j,dsk_arr[j]);
}
printf("fio_disks:\n");
str="DISKS: " dsks " x";
for(j=1; j<= dsks; j++){
str = str " " dsk_arr[j];
}
printf("%s\n", str);
printf("aggMB/s\tSize\tRd/Wr\n");
for(j=1; j<= i; j++){
printf("%s\t%s\t%s\n",arr[j,1],arr[j,2],arr[j,3]);
}
}
' ${FILES[@]}`
#' $j`
arg1="$arg1 $barg";
fi
if [ "$arg1" != "" ]; then
valid=1
echo "=== fio === $d"
echo "$barg"
# fio_key="fio_4K_randomread\tfio_1M_seq_read\tfio_4K_randomwrite\tfio_1M_seq_write"
if [ $got_fio -eq 0 ]; then
fio_key=
fio_val=
fio_count=0
RESP=`echo "$barg" | egrep "4K.randre.d"`
for g in $RESP; do
fio_key="fio_4K_randomread";
fio_val="$g"
fio_count=$((fio_count+1))
break
done
RESP=`echo "$barg" | egrep "16M.re.d"`
for g in $RESP; do
fio_key="$fio_key\tfio_16M_seq_read";
fio_val="$fio_val\t$g"
fio_count=$((fio_count+1))
break
done
RESP=`echo "$barg" | egrep "1M.re.d"`
for g in $RESP; do
fio_key="$fio_key\tfio_1M_seq_read";
fio_val="$fio_val\t$g"
fio_count=$((fio_count+1))
break
done
RESP=`echo "$barg" | egrep "4K.randwrite"`
for g in $RESP; do
fio_key="$fio_key\tfio_4K_randomwrite";
fio_val="$fio_val\t$g"
fio_count=$((fio_count+1))
break
done
RESP=`echo "$barg" | egrep "1M.write"`
for g in $RESP; do
fio_key="$fio_key\tfio_1M_seq_write";
fio_val="$fio_val\t$g"
fio_count=$((fio_count+1))
break
done
RESP=`echo "$barg" | egrep "16M.write"`
for g in $RESP; do
fio_key="$fio_key\tfio_16M_seq_write";
fio_val="$fio_val\t$g"
fio_count=$((fio_count+1))
break
done
RESP=`echo "$barg" | egrep "DISKS: "`
echo "fiodisks RESP= $RESP"
for g in "$RESP"; do
gg=`echo $g | sed 's/DISKS: //'`
fiodisk_key="fio_disks";
fiodisk_val="$gg"
echo "fiodisk_key= $fiodisk_key"
echo "fiodisk_val= $fiodisk_val"
break
done
got_fio=1
fi
fi
fi
if [ "$bm" == "vdbench" ]; then
#Dec 02, 2019 interval i/o MB/sec bytes read resp read write read write resp queue cpu% cpu%
# rate 1024**2 i/o pct time resp resp max max stddev depth sys+u sys
#09:50:56.010 avg_2-8 3832.1 3832.07 1048576 100.00 1.043 1.043 0.000 2.47 0.00 0.114 4.0 3.1 1.9
file1=$d/vdbench_stdout.txt
# /Starting /{sv=0;if (index($0, "seekpct=0") > 0 && (index($0, "xfersize=4k") > 0 || index($0, "xfersize=1m") > 0)){sv=1;printf("%s\n", $0);}}
if [ -e $file1 ]; then
arg1=`awk -v vrb=$VERBOSE 'BEGIN{hdr1="";hdr2="";i=0;}
/Starting /{sv=1;if (index($0, "seekpct=0") > 0){typ="seq";}else{typ="rand";}if (vrb > 0){printf("%s\n", $0);}}
/bytes +read +resp +read/{if(hdr1==""){hdr1=$0;}}
/pct +time +resp +resp/{if(hdr2==""){hdr2=$0;}}
/ avg_/{if(sv==1){i++; arr[i,1]=$4;arr[i,2]=$5;if($6=="100.00"){arr[i,3]="read";}else{arr[i,3]="write";};arr[i,4]=typ;if(vrb>0){printf("%s\n", $0);}}}
END{
if(vrb>0){printf("%s\n%s\n", hdr1, hdr2);}
printf("aggMB/s\tSize\tRd/Wr\tSeq/Rand\n");
for(j=1; j<= i; j++){
printf("%s\t%s\t%s\t%s\n",arr[j,1],arr[j,2],arr[j,3],arr[j,4]);
}
}' $file1`
#echo " got vdbench file, arg1= $arg1"
if [ "$arg1" != "" ]; then
valid=1
echo "=== vdbench === $d"
echo "$arg1"
fi
#logfile.html:09:30:49.903 sd=sd1,lun=/dev/nvme2n1 lun size: 7,500,000,000,000 bytes; 6,984.9194 GB (1024**3); 7,500.0001 GB (1000**3)
file2=$d/logfile.html
if [ -e $file2 ]; then
arg2=`awk '/,lun=/{printf("%s\n", $0);}' $file2`
fi
fi
fi
if [ "$bm" == "stream" ]; then
file1=$d/stream_full-stream-out.out
if [ -e $file1 ]; then
arg1=`grep -E 'Number of Threads counted|Copy|Scale|Add|Triad|Read|Function' $file1`
#echo " got stream file, arg1= $arg1"
if [ "$arg1" != "" ]; then
#Number of Threads counted = 32
#Function Best Rate MB/s Avg time Min time Max time
#Copy: 71321.0 0.030381 0.030110 0.043833
#Scale: 71709.4 0.030164 0.029947 0.035470
#Add: 81041.3 0.040092 0.039748 0.058096
#Triad: 80980.1 0.040136 0.039778 0.055336
arg1=`echo "$arg1" | awk '
BEGIN {i=0;}
function add_key_val(str, res, div) {
if (div==1){res=res/1024.0};i++;key[i]=str;val[i]=sprintf("%.3f", res);
}
/^Number of/{ add_key_val("stream_threads", $6, 0); next;}
/^Copy:/ { add_key_val("stream copy GB/s",$2, 1); next;}
/^Scale:/ { add_key_val("stream scale GB/s",$2, 1); next;}
/^Add:/ { add_key_val("stream add GB/s",$2, 1); next;}
/^Triad:/ { add_key_val("stream triad GB/s",$2, 1); next;}
/^Read:/ { add_key_val("stream read GB/s",$2, 1); next;}
END {
hdr="++\\t";for (j=1; j <= i; j++) { printf("%s%s\t", hdr, key[j]);hdr="";} printf("\n");
hdr="++\\t";for (j=1; j <= i; j++) { printf("%s%s\t", hdr, val[j]);hdr="";} printf("\n");
for (j=1; j <= i; j++) { printf("%s\t%s\n", key[j], val[j]);}
}
'`
echo "=== stream === $d"
echo "$arg1"
if [ "$stream_kv" == "" ]; then
stream_kv=`echo -e "$arg1" |grep "++"`
echo "stream_kv= $stream_kv"
else
if [ "$stream_kv" != "" -a "$ALL_DIRS" == "1" ]; then
tmp=`echo -e "$arg1" |grep "++"`
ln_1a=`echo -e "$stream_kv" | awk '{if (NR==1){printf("%s\n", $0);}}'|sed 's/\t$//'`
ln_2a=`echo -e "$stream_kv" | awk '{if (NR==2){printf("%s\n", $0);}}'|sed 's/\t$//'`
ln_1b=`echo -e "$tmp" | awk '{if (NR==1){printf("%s\n", $0);}}'|sed 's/++\t//'`
ln_2b=`echo -e "$tmp" | awk '{if (NR==2){printf("%s\n", $0);}}'|sed 's/++\t//'`
echo -e "ln_1a= $ln_1a"
echo -e "ln_2a= $ln_2a"
echo -e "ln_1b= $ln_1b"
echo -e "ln_2b= $ln_2b"
stream_kv=$(printf "$ln_1a\t$ln_1b\n$ln_2a\t$ln_2b")
#farr=$(tr -d \\n < $tmp)
#stream_kv="$stream_kv;$tmp"
echo -e "stream_kv= $stream_kv"
fi
fi
valid=1
fi
fi
fi
if [ "$bm" == "geekbench" ]; then
file1=`find $d -name "gb_scores.tsv"`
if [ $VERBOSE -gt 0 ]; then
echo "geekbench file1= $file1"
fi
gb_k="\t"
gb_v="\t"
if [ "$file1" != "" ]; then
arg1=`grep 'score_single' $file1 | sed 's/.*\t//g; s/ //g;'`
arg2=`grep 'score_multi' $file1 | sed 's/.*\t//g; s/ //g;'`
echo "=== geekbench === $d"
key="++\tgb_single\tgb_multi"
val="++\t$arg1\t${arg2}"
echo -e "$key"
echo -e "$val"
if [ "$arg1" != "" -a "$arg2" != "" ]; then
gb_i=${gb_arr1[${dirs_i}]}
if [ "$gb_i" == "" ]; then
gb_i=0
fi
gb_i=$((gb_i+1))
gb_arr1[${dirs_i}]=$gb_i
gb_arr2[${dirs_i},$gb_i]=$arg1
gb_arr3[${dirs_i},$gb_i]=$arg2
if [ "$gb_key" == "" ]; then
gb_key="$key";
gb_val="$val";
else
if [ "$gb_key" != "" -a "$ALL_DIRS" == "1" ]; then
key="\tgb_single\tgb_multi"
val="\t$arg1\t${arg2}"
gb_key="$gb_key$key";
gb_val="$gb_val$val";
echo -e "gb_key= $gb_key"
echo -e "gb_val= $gb_val"
fi
fi
str2=${gb_arr1[${dirs_i}]}
echo "gb_arr1[${dirs_i}]= ${gb_arr1[${dirs_i}]}, gb_i= $gb_i, ${gb_arr2[${dirs_i},$gb_i]}, ${gb_arr3[${dirs_i},$gb_i]}"
#echo "$0.$LINENO: bye"
#exit 1
fi
if [ "$arg1" != "" -a "$arg2" != "" ]; then
valid=1
fi
fi
fi
if [ "$bm" == "coremark" ]; then
file1=`find $d -name "run_*_*.log*" | sort`
if [ $VERBOSE -gt 0 ]; then
echo "coremark file1= $file1"
fi
if [ "$file1" != "" ]; then
MyD=`pwd`
RES=`$SCR_DIR/coremark/get_coremark_results.sh $file1`
LNS=`echo -e "$RES" | wc -l`
echo "LNS= $LNS"
cm_lines=$((cm_lines+1))
for (( t_i=1; t_i < $LNS; t_i++ )); do
LN=(`echo "$RES" | awk -v want="$t_i" 'BEGIN{want+=0;i=-1;}{i++;if (i==want){print $0;exit}}'`)
echo "LN[$t_i]= ${LN[@]}"
cmln_i=$((cmln_i+1))
cm_i=${cm_arr[${dirs_i},0]}
if [ "$cm_i" == "" ]; then
cm_i=0
fi
cm_i=$((cm_i+1))
cm_arr[${dirs_i},0]=$cm_i
cm_arr[${dirs_i},$cm_i,0]=${LN[0]}
cm_arr[${dirs_i},$cm_i,1]=${LN[1]}
cm_arr[${dirs_i},$cm_i,2]=${LN[4]}
cm_dir=$d
key="++\tcm_score\tcm_thrds\tcm_pct_stdev"
val="++\t${LN[0]}\t${LN[1]}\t${LN[4]}"
echo -e "$key"
echo -e "$val"
if [ "$cm_key" == "" ]; then
cm_key="$key";
cm_val="$val";
else
key="cm_score\tcm_thrds\tcm_pct_stdev"
val="${LN[0]}\t${LN[1]}\t${LN[4]}"
cm_key="$cm_key$key";
cm_val="$cm_val$val";
fi
done
#echo -e "myd= $MyD cm_arr= ${RES[0]}" > /dev/stderr
#echo -e "myd= $MyD cm_arr= ${cm_arr[${dirs_i},0]}" > /dev/stderr
#exit
fi
fi
if [ "$bm" == "specjbb" ]; then
echo "============= specjbb ================"
file2=`find $d -name "specjbb.log"`
echo "$0.$LINENO specjbb.log= $file2" > /dev/stderr
if [ "$file2" == "" ]; then
file2=`find $d/.. -name "specjbb.log"`
echo "$0.$LINENO specjbb.log= $file2" > /dev/stderr
fi
if [ $VERBOSE -gt 0 ]; then
echo "specjbb1 file2= $file2"
fi
java_k="\t"
java_v="\t"
if [ "$file2" != "" ]; then
arg3=`grep 'version' $file2 | sed 's/.*version //g;'`
if [ "$arg3" != "" -a "$ALL_DIRS" != "1" ]; then
java_k="\tjava_ver"
java_v="\t$arg3"
fi
java_str=`awk '
/version/{ if ($2 == "version" && ($1 == "java" || $1 == "openjdk")) {jdkver= $3;}}
/^arg1=/{dir=substr($0, 6, length($0));}
/^arg2=/{n=split($0,arr, "=");gsub(/ /,"",arr[3]);numa=arr[3]; if (arr[3] == "") { numa="unbnd";printf("__line= %s\n",$0) > "/dev/stderr";} gsub(/^[ \t]+/,"",numa);}
/^arg3=/{n=split($0,arr, "=");grps=arr[3]+0; if (n < 3 || grps < 1) {def_grp=1; grps=1;}}
/^arg4=/{
n=split($0,arr, "=");
tpg=arr[3]+0;
p1=index(arr[2],"NUM_CPUS(");
sb=substr(arr[2],p1+9,length(arr[2]));
p1=index(sb,")");
num_cpus=substr(sb,1,p1-1)+0;
if ((numa=="local" || numa=="remote") && tpg == 0 && num_cpus > 0 && grps > 0) { tpg = num_cpus/grps; }
if (numa=="unbnd") { tpg = num_cpus/grps; }
}
/^NUMACTL_NODES=/{numa_nodes= $2;}
/\/bin\/java/{ java= $0;}
END{
if (def_grp == 1 && (numa == "local" || numa == "remote")) {
grps = numa_nodes;
if (num_cpus > 0) { tpg = num_cpus/grps; }
}
str=sprintf("specjbb numa_nodes= %s, numa_strat= %s, grps= %s, tpg= %s java= %s, java_ver= %s", numa_nodes, numa, grps, tpg, java, jdkver);
printf("%s\n", str);printf("%s\n", str) > "/dev/stderr";
}' $file2`
IFS=$'\n' NUMA_STRS=(`egrep "^CMD_C|^CMD_BE|^CMD_TI" $file2`)
IFS=$IFS_SV
NUMA_STR=
for ((jj=0; jj < ${#NUMA_STRS[@]}; jj++)); do
NUMA_STR="${NUMA_STR};\"${NUMA_STRS[$jj]}\""
done
if [ "$java_str" != "" ]; then
java_k="\tjava_ver"
java_v="\t${java_str}"
echo "java_str ${java_str} $java_k $java_v" > /dev/stderr
fi
fi
file1=`find $d -name "specjbb2015-M-*-00001.raw"`
if [ $VERBOSE -gt 0 ]; then
echo "specjbb2 file1= $file1"
fi
if [ "$file1" != "" ]; then
for f in $file1; do
arg1=`grep 'jbb2015.result.metric.max-jOPS =' $f | sed 's/.*=//g; s/ //g;'`
arg2=`grep 'jbb2015.result.metric.critical-jOPS =' $f | sed 's/.*=//g; s/ //g;'`
sj_i=${sj_arr[${dirs_i},0]}
if [ "$sj_i" == "" ]; then
sj_i=0
fi
sj_i=$((sj_i+1))
sj_arr[${dirs_i},0]=$sj_i
sj_arr[${dirs_i},$sj_i,0]=$arg1
sj_arr[${dirs_i},$sj_i,1]=$arg2
sj_dir=$(dirname $file1)
echo "__val2__;sj_max_crit;$arg1;$arg2;${PROJ_DIR};${dirs[$dirs_i]};$sj_dir${NUMA_STR}"
echo "=== specjbb === $d"
key="++\tmax-jOPS\tcrit-jOPS${java_k}"
val="++\t$arg1\t${arg2}${java_v}"
#if [ "$specjbb_key" == "" ]; then
# specjbb_key="$key";
# specjbb_val="$val";
#fi
echo -e "$key"
echo -e "$val"
if [ "$arg1" != "" -a "$arg2" != "" ]; then
if [ "$specjbb_key" == "" ]; then
specjbb_key="$key";
specjbb_val="$val";
else
if [ "$specjbb_key" != "" -a "$ALL_DIRS" == "1" ]; then
key="max-jOPS\tcrit-jOPS${java_k}"
val="$arg1\t${arg2}${java_v}"
specjbb_key="$specjbb_key$key";
specjbb_val="$specjbb_val$val";
echo -e "specjbb_key= $specjbb_key"
echo -e "specjbb_val= $specjbb_val"
fi
fi
fi
if [ "$arg1" != "" -a "$arg2" != "" ]; then
valid=1
fi
done
fi
fi
#if [ ${did_bmarks[$bm]+_} ]; then
# valid=0
#fi
if [ $VERBOSE -gt 0 ]; then
if [ $valid -eq 1 ]; then
echo " ckt $bm, dir $d"
echo " arg1= $arg1"
#if [ "$arg2" != "" ]; then
#echo " arg2= $arg2"
#fi
fi
fi
did_bmarks[$bm]=$d
done
popd
arr=()
while read -r line; do
arr+=("$line")
done <<< "$stream_kv"
echo -e "arr= ${arr}"
stream_key="${arr[0]}"
stream_val="${arr[1]}"
echo -e "st-key= $stream_key"
echo -e "st-val= $stream_val"
arr=()
while read -r line; do
arr+=("$line")
done <<< "$sysinfo_kv"
sysinfo_key="${arr[0]}"
sysinfo_val="${arr[1]}"
echo -e "$specint_key"
echo -e "$specint_val"
echo -e "$stream_key"
echo -e "$stream_val"
echo -e "$fio_key"
echo -e "$fio_val"
echo -e "$fiodisk_key"
echo -e "$fiodisk_val"
echo -e "$specjbb_key"
echo -e "$specjbb_val"
echo -e "$gb_key"
echo -e "$gb_val"
echo -e "$sysinfo_key"
echo -e "$sysinfo_val"
echo "========== all ========="
specint_key=`echo -e "$specint_key" | sed 's/++ //;'| sed 's/++\t//;'`
specint_val=`echo -e "$specint_val" | sed 's/++ //;'| sed 's/++\t//;'`
stream_key=`echo -e "$stream_key" | sed 's/++ //;'| sed 's/++\t//;'`
stream_val=`echo -e "$stream_val" | sed 's/++ //;'| sed 's/++\t//;'`
fio_key=`echo -e "$fio_key" | sed 's/++ //;'| sed 's/++\t//;'`
fio_val=`echo -e "$fio_val" | sed 's/++ //;'| sed 's/++\t//;'`
fiod_key="fio_disks"
fiod_val=""
if [ "$fiodisk_key" != "" ]; then
fiod_key="$fiodisk_key"
fiod_val="$fiodisk_val"
fi
specjbb_key=`echo -e "$specjbb_key" | sed 's/++ //;'| sed 's/++\t//;'`
specjbb_val=`echo -e "$specjbb_val" | sed 's/++ //;'| sed 's/++\t//;'`
gb_key=`echo -e "$gb_key" | sed 's/++ //;'| sed 's/++\t//;'`
gb_val=`echo -e "$gb_val" | sed 's/++ //;'| sed 's/++\t//;'`
sysinfo_key=`echo -e "$sysinfo_key" | sed 's/++ //;'| sed 's/++\t//;'`
sysinfo_val=`echo -e "$sysinfo_val" | sed 's/++ //;'| sed 's/++\t//;'`
k1="$specint_key\t$stream_key\t$fio_key\t$specjbb_key\t$gb_key\t$sysinfo_key\t$fiod_key"
v1="$specint_val\t$stream_val\t$fio_val\t$specjbb_val\t$gb_val\t$sysinfo_val\t$fiod_val"
k2=`echo -e "$k1" | sed 's/\t/;/g;'`
v2=`echo -e "$v1" | sed 's/\t/;/g;'`
echo -e "$specint_key\t$stream_key\t$fio_key\t$specjbb_key\t$gb_key\t$sysinfo_key"
echo -e "$specint_val\t$stream_val\t$fio_val\t$specjbb_val\t$gb_val\t$sysinfo_val"
if [ "$SKU" == "\"N/A\"" -a "$HOST" != "" ]; then
SKU=$HOST
fi
if [ "$NUM_HOST" != "" ]; then
kk2=";NUM_HOST"
vv2=";$NUM_HOST"
fi
echo "__key__;SKU;SKU_MAKER;SKU_MODEL${kk2};$k2"
echo "__val__;$SKU;$SKU_MAKE;$SKU_MODEL${vv2};$v2"
#echo "dirs= ${#dirs[@]}"
for ((dirs_i=${#dirs[@]}-1; dirs_i>=0; dirs_i--)); do
gb_i=${gb_arr1[${dirs_i}]}
echo "gb_i= $gb_i, dirs_i= $dirs_i"
if [ "$gb_i" != "" ]; then
#echo "__val2__;"
for ((j=1; j<=$gb_i; j++)); do
echo "__val2__;gb_single_multi;${gb_arr2[${dirs_i},${j}]};${gb_arr3[${dirs_i},${j}]};${PROJ_DIR};${dirs[$dirs_i]}"
done
fi
done
for ((dirs_i=${#dirs[@]}-1; dirs_i>=0; dirs_i--)); do
sj_i=${sj_arr[${dirs_i},0]}
#echo "sj_i= $sj_i"
if [ "$sj_i" != "" ]; then
j=$sj_i;
#for ((j=1; j<=$sj_i; j++)); do
#echo "__val2__;sj_max_crit;${sj_arr[${dirs_i},${j},0]};${sj_arr[${dirs_i},${j},1];};${PROJ_DIR};${dirs[$dirs_i]}"
#done
fi
done
echo "++++++++++++++++++++++at end: cm_arr= ${cm_arr[@]}" > /dev/stderr
for ((dirs_i=${#dirs[@]}-1; dirs_i>=0; dirs_i--)); do
cm_i=${cm_arr[${dirs_i},0]}
echo "cm_i= $cm_i" > /dev/stderr
if [ "$cm_i" != "" ]; then
echo "__val2__;"
for ((j=1; j<=$cm_i; j++)); do
echo "__val2__;coremark,score,thrds,%stdev;${cm_arr[${dirs_i},${j},0]};${cm_arr[${dirs_i},${j},1];};${cm_arr[${dirs_i},${j},2];};${cm_arr[${dirs_i},${j},3];};${PROJ_DIR};${dirs[$dirs_i]}"
done
fi
done
exit
while IFS= read -r line; do
my_array+=( "$line" )
echo $line
done < <( $CMD )
IFS=$IFS_SV
LIST=`ls -1 |grep -E "[0-9]+-[0-9]+-[0-9]+_[0-9]+_"`
result/CPU2017.001.log
copies = 32
result/CPU2017.001.intrate.refrate.txt
Est. SPECrate2017_int_base
for f in $LIST; do
echo $f
done
popd
| true
|
9a77d9d8a12f9e03e8639a8fa2df0b00e2679526
|
Shell
|
tincanbox/caravan
|
/burden/.mysh/bootstrap.sh
|
UTF-8
| 1,669
| 3.21875
| 3
|
[] |
no_license
|
# Add...
# source ~/.mysh/bootstrap.sh
# in your *rc file.
#
if test -n "$ZSH_VERSION"; then
current_shell=zsh
elif test -n "$BASH_VERSION"; then
current_shell=bash
elif test -n "$KSH_VERSION"; then
current_shell=ksh
elif test -n "$FCEDIT"; then
current_shell=ksh
elif test -n "$PS3"; then
current_shell=unknown
else
current_shell=sh
fi
# Information
echo "Home: ""$HOME"
echo "Using Shell: ""$current_shell"
echo "- - - - - - - - - - - - - - - - - - - -"
export PATH=/usr/local/bin:/usr/local/sbin:$PATH
export PATH="$HOME/.mysh/bin":$PATH
export VISUAL=vim
mysh_bootstrap(){
if [ -f "$HOME/.mysh/setup/shell/$current_shell.sh" ]; then
echo "Loading current shell setup file."
source "$HOME/.mysh/setup/shell/$current_shell.sh"
else
echo ".mysh cant handle shell:""$current_shell"
exit 1
fi
#
# User Custom files....
#
echo "Loading .mysh/setup/common files."
for f in ~/.mysh/setup/common/*;
do
case "${f}" in
*".sh") source "${f}";;
esac
done
echo "Loading .mysh/setup/vendor files."
for f in ~/.mysh/setup/vendor/*;
do
case "${f}" in
*".sh") source "${f}";;
esac
done
echo "Loading User extension files."
if [ ! -e "$HOME/.mysh/extension" ]; then
mkdir "$HOME/.mysh/extension"
fi
for f in ~/.mysh/extension/*;
do
case "${f}" in
*".sh") source "${f}";;
esac
done
# Checks vim plugins
if [ ! -d ~/.vim/plugged ]; then
echo "Installing VIM plugins..."
eval "vim +PlugUpdate +qall"
eval "python3 ~/.vim/plugged/youcompleteme/install.py --all"
fi
touch ~/.shrc.local
}
mysh_bootstrap
export XDG_CONFIG_HOME=~/.config
| true
|
3daff5e52f4f10f401872b855acb4a1a01b624bb
|
Shell
|
marcosbaroni/mkp-thesis
|
/exp/kpbt/plot-proof-steps.sh
|
UTF-8
| 1,105
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
data1=$1
data2=$2
term=svg
gnuplot <<!
# PLOTING CURVE OF PROOF
set term $term
set datafile separator ";"
set output "proof.$term"
set style data linespoints
set grid
set logscale y
set title "KP Proof Steps"
set xlabel "N. of items"
set ylabel "Steps"
set yrange [1:1e+16]
plot "$data1" u 1:2 title "Avg. of 1000", 2**x t "2^x", 1.76**(x+3) t "1.76^(x+3)"
# PLOTING CURVE OF FINDING
set term $term
set datafile separator ";"
set output "find.$term"
set style data linespoints
set grid
set logscale y
set title "KP Proof Steps"
set xlabel "N. of items"
set ylabel "Steps"
set yrange [1:1e+16]
plot "$data2" u 1:2 title "Avg. of 1000", 2**x t "2^x", 1.55**(x-4) t "1.55^(x-4)"
!
# gnuplot <<!
# set terminal pngcairo transparent font "arial,10" size 600, 400
# set datafile separator ";"
# set output '$output'
# set key inside right top vertical Right noreverse enhanced autotitle box linecolor -1 linewidth 1.000
# set style data lines
# set title "KP Proof Steps"
# set xlabel "N. of items"
# set ylabel "Steps"
# set yrange [0:*]
# plot "$data" u 1:2:3 t "Power" w yerrorbars
# !
| true
|
5508f467409bb91c9cff8354358e39e71fbe02dc
|
Shell
|
mozhuli/sonobuoy-plugin-ab
|
/run_ab.sh
|
UTF-8
| 344
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
RESULTS_DIR="${RESULTS_DIR:-/tmp/results}"
sleep 10
/usr/bin/ab -n 90000 -c 50 ${NGINX_ENDPOINT} >"${RESULTS_DIR}/ab"
echo "ab done"
cd ${RESULTS_DIR}
tar -czf e2e-ab.tar.gz *
# mark the done file as a termination notice.
echo -n ${RESULTS_DIR}/e2e-ab.tar.gz > ${RESULTS_DIR}/done
| true
|
5357e0ce908b597e1087e8c3f4ae788f73745def
|
Shell
|
pegasus-isi/pegasus
|
/share/pegasus/examples/bosco-shared-fs/hello.sh
|
UTF-8
| 531
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
input=f.a
if [ ! -e $input ]; then
echo "ERROR: input file $input does not exist" 1>&2
exit 1
fi
# check that we got the input file
cat $input
# output something on stdout
echo "Hello!"
# in the DAX, this job is specified to have an f.b output file
echo "Hello!" >f.b
| true
|
69f8a58911dc0324d6463192e36f3164bb60f2ef
|
Shell
|
schutm/wakala
|
/environments/vagrant/setup.sh
|
UTF-8
| 198
| 3.1875
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
VAGRANTSTATUS=$(vagrant status)
# If vagrant is running already, reprovision it
if echo "$VAGRANTSTATUS" | egrep -q "running" ; then
vagrant provision
else
vagrant up --provision
fi
| true
|
6d05f3ef9d02bf002b83b72661076c18e949699c
|
Shell
|
briannrmit/cpt264-assignment2
|
/menu_system
|
UTF-8
| 3,461
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# This is the Menu System for Part C of the assignment written by Brian Nguyen s3689020
PRINTF=/usr/bin/printf
LS=/bin/ls
PWD=/bin/pwd
SLEEP=/bin/sleep
QUIT=0
menu(){
$CLEAR
$PRINTF "\n"
$PRINTF "~~~~~~~~~~~~~~~~~~~~\n"
$PRINTF " User Menu System\n"
$PRINTF "~~~~~~~~~~~~~~~~~~~~"
$PRINTF "\n"
$PRINTF " 1) Display basic information \n"
$PRINTF "\n"
$PRINTF " 2) Display basic information (Items that involve post-processing) \n"
$PRINTF "\n"
$PRINTF " 3) Find an item within system and apply an action (delete/print/print0/custom) \n"
$PRINTF "\n"
$PRINTF " 4) Basic profiler of the program you want \n"
$PRINTF "\n"
$PRINTF "Please enter [1-4] for the any of the options above, or Q to quit the menu:"
return
}
# Set up input validation and menu for Basic information display (Part B Req1)
display1_cases(){
local QUITDISPLAY=0
while [ $QUITDISPLAY -eq 0 ] ; do
$PRINTF "What basic information would you like to display? Enter one of (mem/disk/con/time), or enter Q to quit \n"
read -r CHOICE
if [ "$CHOICE" == "mem" ] || [ "$CHOICE" == "disk" ] || [ "$CHOICE" == "con" ] || [ "$CHOICE" == "time" ]; then
./display1 "$CHOICE"
$PRINTF "\n"
elif [ "$CHOICE" == "Q" ] || [ "$CHOICE" == "q" ]; then
QUITDISPLAY=1
else echo "Invalid option, please enter either mem/disk/con/time for display info or Q to quit display function\n"
fi
done
}
# Set up input validation and menu for Basic information display with processing (Part B Req2)
display2_cases(){
local QUITDISPLAY=0
while [ $QUITDISPLAY -eq 0 ] ; do
$PRINTF "What basic information with post processing would you like to display?\n"
$PRINTF "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
$PRINTF "Please enter options as a combination of [ncpfo] where:\n"
$PRINTF "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
$PRINTF "n gives the number of CPU core on the system\n"
$PRINTF "c gives currenct process' priority\n"
$PRINTF "p gives the total number of processes running under current user\n"
$PRINTF "f gives the number of open file descriptors owned by the current user\n"
$PRINTF "o gives the max number of file descriptors that can be opened by a process\n"
$PRINTF "or enter Q to quit \n"
$PRINTF "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
read -r CHOICE
if ! [[ "$CHOICE" =~ [^ncpfo] ]]; then
$PRINTF "\n"
./display2 "-$CHOICE"
$PRINTF "\n"
elif [ "$CHOICE" == "Q" ] || [ "$CHOICE" == "q" ]; then
QUITDISPLAY=1
else
$PRINTF "\n"
echo "Invalid option, please enter either options as a combination of [ncpfo] for display info or Q to quit display function"
$PRINTF "\n"
fi
done
}
while [ $QUIT -eq 0 ] ; do
menu
read -r CHOICE
case $CHOICE in
1)
$PRINTF "\n"
$PRINTF "\n"
display1_cases
$PRINTF "\n"
$SLEEP 2
;;
2)
$PRINTF "\n"
$PRINTF "\n"
display2_cases
$PRINTF "\n"
$SLEEP 2
;;
3)
$PRINTF "\n"
$PRINTF "\n"
$LS
$SLEEP 2
;;
4)
$PRINTF "\n"
$PRINTF "\n"
$PRINTF "And that's goodnight from me\n"
$SLEEP 2
;;
[Qq])
QUIT=1;;
?)
$PRINTF "\n"
$PRINTF "\n"
$PRINTF "Invalid option entered\n"
$PRINTF "Valid options are 1, 2, 3, 4, and Q to quit\n"
$SLEEP 2;;
esac
done
| true
|
c60cb81d6466b32c31f33994ebe70d8aefb54a89
|
Shell
|
fbricker/phpwiki-to-github
|
/phpwikiSctipts/wiki2github
|
UTF-8
| 3,079
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# mime2mdwn - Convert MIME format wiki export (developed for PhpWiki exports) to Markdown
#
# globals
debug=true
# the actual heavy lifting
function convert_mime_to_markdown() {
local in_header=false
local boundary='idsajhfkjafhskdhasjdf'
local version=0
local data=''
local data_version=0
while IFS=$IFS$'\r' read -r line; do
data="${data}${line}"$'\n'
done < "$1"
#printf "DATA (VERSION %s; pre-markup conversion):\n%s" "$data_version" "$data"
# convert CamelCase (starting w/capital; e.g. "NewtonNewbieGuide") to wiki links (e.g. "[[NewtonNewbieGuide]]")
data=$(echo -n "$data" | perl -pe "s/(^|\b|_)((?<![\[|])[A-Z][a-z]+[A-Z][A-Za-z]+(?![\]|]))($|\b|_)/\1\[\[\2\]\]\3/g")
# convert non-URI links in square brackets (e.g. "[NewtonConnectivityCD]") to wiki links (e.g. "[[NewtonConnectivityCD]]"
#data=$(echo -n "$data" | sed -E "s/([^[])\[([^\s]+)\]([^]])/\1\[\[\2\]\]\3/g")
data=$(echo -n "$data" | perl -pe "s/((?<!\[)\[[ \/:_\-\p{L}0-9]+\](?!\]))/\[\1\]/g")
# convert non-URI, named links in square brackets (e.g. "[BluetoothConnection|UsingBluetoothIndex]") to Markdown link format (e.g. "[BluetoothConnection](/UsingBluetoothIndex)")
data=$(echo -n "$data" | perl -pe "s/(?<!\[)\[(.+)\s?\|\s?([ #\/:_\-\p{L}0-9]+)\](?!\])/[\1](\2)/g")
# convert URI, named links in square brackets (e.g. "[Newtontalk.net|http://www.newtontalk.net/]") to Markdown link format (e.g. "[Newtontalk.net](http://www.newtontalk.net/)")
data=$(echo -n "$data" | perl -pe "s/(?<!\[)\[(.+)\s?\|\s?([A-Za-z]+:(\/\/)?.+)\](?!\])/[\1](\2)/g")
# convert URI-only links in square brackets (e.g. "[http://tools.unna.org/glossary/]") to angle bracket format (e.g. "<http://tools.unna.org/glossary/>")
data=$(echo -n "$data" | perl -pe "s/(?<!\[)\[([A-Z-a-z]+:(\/\/)?.+)\](?!\])/<\1>/g")
# convert triple prime bold (e.g. "'''bold'''") to Markdown format (e.g. "__bold__")
data=$(echo -n "$data" | perl -pe "s/(?<!')'''(.+)'''(?!')/__\1__/g")
# convert double prime emphasis (e.g. "''emphasis''") to Markdown format (e.g. "_emphasis_")
data=$(echo -n "$data" | perl -pe "s/(?<!')''(.+)''(?!')/_\1_/g")
# convert headings (e.g. "!!Heading") to Markdown atk-style format (e.g. "## Heading")
data=$(echo -n "$data" | perl -pe "s/^!{1}([^!]+)$/# \1/g")
data=$(echo -n "$data" | perl -pe "s/^!{2}([^!]+)$/## \1/g")
data=$(echo -n "$data" | perl -pe "s/^!{3}([^!]+)$/### \1/g")
data=$(echo -n "$data" | perl -pe "s/^!{4}([^!]+)$/#### \1/g")
data=$(echo -n "$data" | perl -pe "s/^!{5}([^!]+)$/##### \1/g")
data=$(echo -n "$data" | perl -pe "s/^!{6}([^!]+)$/###### \1/g")
# printf "DATA (VERSION %s; post-markup conversion):\n%s" "$data_version" "$data"
# write the data back out to the file
echo -n "$data" > "$1"
}
# batch process a directory of files?
if [ -d "$1" ]; then
for file in $1/*; do
if [ ! -d "$file" ]; then
convert_mime_to_markdown "$file"
fi
done
# or just a single file?
elif [ -e "$1" ]; then
convert_mime_to_markdown "$1"
# if no file/directory, throw an error
else
printf "ERROR! %s not found!\n" "$1"
fi
| true
|
fd89f710de5ef03f6ffc1702df1b5f27e6ef9278
|
Shell
|
eklitzke/c.sh
|
/hello.sh
|
UTF-8
| 852
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# where ctypes.sh is installed; you will likely have to change this
LD_LIBRARY_PATH=$HOME/local/lib
. ~/code/ctypes.sh/ctypes.sh
# compile stdin to a DSO
function build {
cfile=$(mktemp /tmp/XXXXXX.c)
sofile=$(mktemp /tmp/XXXXXX.so)
while read line; do
echo $line>>$cfile
done
cc -fPIC -shared $cfile -o $sofile
rm -f $cfile
echo $sofile
}
# our code
sofile=$(build <<EOF
#include <stdio.h>
void hello_world(void) {
puts("hello world");
}
int popcnt(int num) {
int out;
__asm__("popcnt %1, %0"
:"=r"(out)
:"r"(num)
:"0"
);
return out;
}
EOF
)
# clean up when we're done
trap "rm -f $sofile" EXIT
# load the code
dlopen $sofile
# print hello world
dlcall hello_world
# get the popcnt of 5
dlcall -r int -n out popcnt 5
echo $out | egrep -o '[0-9]+'
| true
|
2771d8f85cdab8ccbdffbbad9a4790c138298d58
|
Shell
|
mayongji12/health_examination
|
/script/old/��Ӳ����Ϣ.txt
|
GB18030
| 2,497
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/ksh -eu
#--ϵͳӲϢ
THIS=`basename $0 .ksh`
RUN_DATE=`date '+%y%m%d'`
OUT_FILE=$HOME_OUT/${SERIAL_NUM}_${THIS}_${RUN_DATE}
LOG_FILE=$HOME_LOG/${SERIAL_NUM}_${THIS}_${RUN_DATE}.log
rm -f ${OUT_FILE}*
rm -f ${LOG_FILE}*
#echo $THIS
#echo $RUN_TIME
#echo $OUT_FILE
#echo $LOG_FILE
echo "Now start to collect the hardware information of the server!"
#--CPU info
CORE_CNT=`cat /proc/cpuinfo | grep "model name" | wc -l`
CORE_VSN=`cat /proc/cpuinfo | grep "model name" | awk -F: '{print $2}' | sed 's/^ //' | tr -s ' ' | uniq`
LONG_BIT=`getconf LONG_BIT`
printf "#--CPU info\n" >> ${OUT_FILE}_cpuinfo.rpt
printf "CPUͺ : $CORE_VSN\n" >> ${OUT_FILE}_cpuinfo.rpt
printf "CPU : $CORE_CNT\n" >> ${OUT_FILE}_cpuinfo.rpt
printf "CPUλ : $LONG_BIT\n" >> ${OUT_FILE}_cpuinfo.rpt
#--Memory info
MEM_CNT=`dmidecode | grep -P -A16 "Memory\s+Device" | grep Size | grep -v Range | wc -l`
#MEM_TYPE=`dmidecode | grep -P -A16 "Memory\s+Device" | grep "Type:" | awk -F: '{print $2}' | sed 's/^ //' | tr -s ' '|uniq`
MEM_SZE=`dmidecode | grep -P -A16 "Memory\s+Device" | grep Size | grep -v Range |awk -F: '{print $2}' | sed 's/^ //'`
MEM_SPD=`dmidecode | grep -P -A16 "Memory\s+Device" | grep Speed | awk -F: '{print $2}' | sed 's/^ //'`
printf "#--Memory info\n" >> ${OUT_FILE}_meminfo.rpt
printf "ڴ : $MEM_CNT\n" >> ${OUT_FILE}_meminfo.rpt
printf "ڴС : \n$MEM_SZE\n" >> ${OUT_FILE}_meminfo.rpt
printf "ڴƵ : \n$MEM_SPD\n" >> ${OUT_FILE}_meminfo.rpt
#--Network card info
NET_CNT=`kudzu --probe --class=network | grep desc | awk -F: '{print $2}' | sed 's/^ //' | sed 's/"//' | wc -l`
NET_VSN=`kudzu --probe --class=network | grep desc | awk -F: '{print $2}' | sed 's/^ //' | sed 's/"//g'`
printf "#--Network card info\n" >> ${OUT_FILE}_netinfo.rpt
printf " : $NET_CNT\n" >> ${OUT_FILE}_netinfo.rpt
printf "Ϣ :\n$NET_VSN\n" >> ${OUT_FILE}_netinfo.rpt
#--I/O info
printf "#--I/O info\n" >> ${OUT_FILE}_hdiskinfo.rpt
#time dd if=/dev/zero of=/u3/wtest.out bs=8192 count=200000 >> ${OUT_FILE}_hdiskinfo.rpt 2>>$LOG_FILE
hdparm -Tt /dev/sda >> ${OUT_FILE}_hdiskinfo.rpt 2>>$LOG_FILE
#--Gather all the hardware info to generate the final report
for file in $(ls -t ${OUT_FILE}*)
do
cat $file >> ${OUT_FILE}.rpt
echo " " >> ${OUT_FILE}.rpt
done
printf "\n\nӲ :\t ${OUT_FILE}.rpt\n"
exit 0
| true
|
0c1f2cf268e4bf6d3d9460ba2f5b5669b1271d84
|
Shell
|
goldobin/trials
|
/nodes/redis-server.sh
|
UTF-8
| 1,589
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
INSTANCES=3
START_PORT=7001
CONF_DIR=/etc/redis
RUN_DIR=/var/run
DB_DIR=/var/lib
LOG_DIR=/var/log/redis
INIT_SCRIPT=/etc/init.d/redis-server
Redis Installation
add-apt-repository -y ppa:chris-lea/redis-server
apt-get -y update
# apt-get -y upgrade
apt-get -y install redis-server redis-tools ruby
gem install redis
curl http://download.redis.io/redis-stable/src/redis-trib.rb > /usr/bin/redis-trib.rb
chmod 755 /usr/bin/redis-trib.rb
/bin/cat <<EOM >/etc/security/limits.d/redis.conf
redis soft nofile 1024
redis hard nofile 102400
EOM
# Nodes configuration
function generate_node_files() {
local PORT="$1"
local NODE_CONF=${CONF_DIR}/redis-${NODE_PORT}.conf
local NODE_PID=${RUN_DIR}/redis-server-${NODE_PORT}.pid
local NODE_DB_DIR=${DB_DIR}/redis-${NODE_PORT}
local NODE_LOG=${LOG_DIR}/redis-server-${NODE_PORT}.log
mkdir -p ${NODE_DB_DIR}
chown redis:redis "${NODE_DB_DIR}"
/bin/cat <<EOM >${NODE_CONF}
daemonize yes
cluster-enabled yes
bind 0.0.0.0
port ${PORT}
pidfile ${NODE_PID}
dir ${NODE_DB_DIR}
loglevel notice
logfile ${NODE_LOG}
EOM
local NODE_CONF_ESCAPED=${NODE_CONF//\//\\/}
local NODE_PID_ESCAPED=${NODE_PID//\//\\/}
local NODE_INIT_SCRIPT=${INIT_SCRIPT}-${NODE_PORT}
sed -e "s/DAEMON_ARGS=\/etc\/redis\/redis.conf/DAEMON_ARGS=${NODE_CONF_ESCAPED}/g;s/PIDFILE=\$RUNDIR\/redis-server.pid/PIDFILE=${NODE_PID_ESCAPED}/g" "${INIT_SCRIPT}" > ${NODE_INIT_SCRIPT}
chmod 755 ${NODE_INIT_SCRIPT}
}
for i in `seq 1 ${INSTANCES}`;
do
NODE_PORT="$((START_PORT + i - 1))"
generate_node_files ${NODE_PORT}
done
| true
|
0ee324f894dfb8c39135941d0de098b04b66e630
|
Shell
|
sproul/multivcs_query
|
/src/vcs_scripts/cache.ls
|
UTF-8
| 662
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
search_args=$*
if [ -z "$search_args" ]; then
search_args=.
fi
for cf in `ls $TMP/cache.*.cmd | sed -e 's/\.cmd$//'`; do
if cat $cf.cmd | grepm $search_args; then
cat $cf
echo EOD
echo "cache.get $cf"
echo '----------------------------------------------------------------------------------------------'
echo checking $cf.exit_code..................
if [ -f $cf.exit_code ]; then
honkat $cf.exit_code
fi
fi
done
exit
cd $dp/git/change_tracker/src/test/cache_seed/
$dp/bin/cache.ls clone
| true
|
1b170489f1973b0d388fcea53f09988b4842f504
|
Shell
|
xiaodongzhang1025/faceswap-simple
|
/SimpleSteps/extract.sh
|
UTF-8
| 223
| 2.96875
| 3
|
[] |
no_license
|
#/bin/sh
if [ "$1" == "" ]; then
echo "Usage: $0 photo_dir"
exit -1
fi
echo extract photos from "$1" to "$1_extract"
/D/Python38/python ../faceswap.py extract -i "$1" -o "$1_extract"
cp "$1"/*.fsa "$1_extract"/
| true
|
de68f95f339cb9c042a508d32b770254fa8d0074
|
Shell
|
caudebert/seqomm
|
/clean.sh
|
UTF-8
| 580
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
if (( $# != 1 )); then
echo "Illegal number of arguments. There should be 1 argument: case name."
exit 1
fi
caseName=$1
rm -f ./pdf.txt *.out
rm -fr ./outputs/$1/cache ./outputs/$1/DOFSelection ./outputs/$1/PDFs
rm -f ./data/$1/OMM.in ./data/$1/selectedDOFs.txt
read -p "Delete also derivatives? (y/n)" reply
echo
if [[ $reply =~ ^[Yy]$ ]]
then
rm -fr ./outputs/$1/derivatives
fi
read -p "Delete also pre-processing? (y/n)" reply
echo
if [[ $reply =~ ^[Yy]$ ]]
then
rm -fr ./data/$1/measurements/noise* ./data/$1/simulations/normalized
fi
| true
|
71bbcf9276e730eba022fb9016699fb9f05622dc
|
Shell
|
pratikkshaa/pratiksha
|
/Desktop/ao.sh
|
UTF-8
| 228
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
#ip address >>ip.txt
#grep "inet addr">>ip.txt
#File="/Desktop/myfile.txt"
#echo "--file contents $File--"
#cat $File
#ip route get 8.8.8.8 | tr -s ' '|cut -d ' ' -f7
cat >>myfile.txt <<EOF
this is text file
...
EOF
| true
|
1c1e01b2bff20232dbf8093b811bc9c48acfe7d3
|
Shell
|
vrangasam/cloud-terasort
|
/scripts/spark/5.push-files-to-hosts-and-update-them.sh
|
UTF-8
| 1,534
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
while read line; do
echo "Sending file to $line"
scp -i $1 ./hosts ubuntu@$line:/home/ubuntu/
scp -i $1 ./slaves ubuntu@$line:/home/ubuntu/
scp -i $1 ./masters ubuntu@$line:/home/ubuntu/
scp -i $1 ./xmls/core-site.xml ubuntu@$line:/home/ubuntu/
scp -i $1 ./xmls/hdfs-site.xml ubuntu@$line:/home/ubuntu/
scp -i $1 ./xmls/mapred-site.xml ubuntu@$line:/home/ubuntu/
scp -i $1 ./xmls/yarn-site.xml ubuntu@$line:/home/ubuntu/
scp -i $1 ./change-hostfiles.sh ubuntu@$line:/home/ubuntu/
scp -i $1 ./generate-data-and-put-in-hdfs.sh ubuntu@$line:/home/ubuntu/
scp -i $1 ./gensort ubuntu@$line:/home/ubuntu/
# ssh -i $1 -n ubuntu@$line sudo su hduser
done < dns
echo "Updating host files for spark multinode setup"
while read line; do
ssh -i $1 -n ubuntu@$line "/home/ubuntu/change-hostfiles.sh"
done < dns
while read line; do
> ./environ.sh
ed ./environ.sh <<TEXT
a
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
export SPARK_PUBLIC_DNS=$line
export SPARK_WORKER_CORES=32
.
w
q
TEXT
cat ./environ.sh
scp -i $1 "environ.sh" ubuntu@$line:/home/ubuntu/
scp -i $1 ./"update-spark-slave.sh" ubuntu@$line:/home/ubuntu/
ssh -i $1 -n ubuntu@$line "sudo /home/ubuntu/update-spark-slave.sh"
done < dns
while read line; do
scp -i $1 ./slaves-spark ubuntu@$line:/home/ubuntu/
scp -i $1 ./"update-for-spark-master.sh" ubuntu@$line:/home/ubuntu/
ssh -i $1 -n ubuntu@$line "/home/ubuntu/generate-data-and-put-in-hdfs.sh"
ssh -i $1 -n ubuntu@$line "sudo /home/ubuntu/update-for-spark-master.sh"
break
done < dns
| true
|
ef59cbad9caab2432a499277f96b6752ccb74e3d
|
Shell
|
theathos/synity-Bash-Aliases
|
/functions/editMySQLConfig.sh
|
UTF-8
| 692
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# ~/.bashrc
mysqlconfig() {
if [ -f /etc/my.cnf ]; then
sedit /etc/my.cnf
elif [ -f /etc/mysql/my.cnf ]; then
sedit /etc/mysql/my.cnf
elif [ -f /usr/local/etc/my.cnf ]; then
sedit /usr/local/etc/my.cnf
elif [ -f /usr/bin/mysql/my.cnf ]; then
sedit /usr/bin/mysql/my.cnf
elif [ -f ~/my.cnf ]; then
sedit ~/my.cnf
elif [ -f ~/.my.cnf ]; then
sedit ~/.my.cnf
else
echo "Error: my.cnf file could not be found."
echo "Searching for possible locations:"
sudo updatedb && locate my.cnf
fi
}
| true
|
65aada965a0ee6b4f7894076671dd52e31242211
|
Shell
|
kamynina/pgday-example
|
/run-coverage.sh
|
UTF-8
| 714
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
docker-compose up -d postgres
# wait when postgres up and running
until
docker-compose run --rm -e 'PGPASSWORD=example' deploy_api psql -U example -h postgres -d example -c "select 1";
do
sleep 1;
done
# deploy
docker-compose run --rm deploy_api bash /deploy.sh -h postgres -d example -u example -w example -v 1.1 -i _init.sql
# trace stored procedures
docker-compose run coverage -c trace -h postgres -u example -d example -w example
# run tests
# @TODO filter stdin
docker-compose run --rm test -h postgres -u example -d example -w example -i /t/api/_init.sql > result/trace.txt
# generate report
docker-compose run coverage -c report -h postgres -u example -d example -w example
| true
|
873c49470e8a7ca6c7febaac5edf891925faa078
|
Shell
|
spicy202110/spicy_202006
|
/docs/blog/2021/10/14/20200613_8p6sv6vxlgk/20200613_8P6Sv6VXLGk.info.json.sh2
|
UTF-8
| 4,056
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
rm -f \
20200613_8P6Sv6VXLGk.info.json.vo.* \
20200613_8P6Sv6VXLGk.info.json.ao.* \
20200613_8P6Sv6VXLGk.info.json.bo.* \
20200613_8P6Sv6VXLGk.info.json.wav
rm -fr 20200613_8P6Sv6VXLGk.info_dir
mkdir -p 20200613_8P6Sv6VXLGk.info_dir
mv 20200613_8P6Sv6VXLGk.info.json 20200613_8P6Sv6VXLGk.info.json.* 20200613_8P6Sv6VXLGk.info_dir/
mv 20200613_8P6Sv6VXLGk.info_dir/20200613_8P6Sv6VXLGk.info.json.jpg 20200613_8P6Sv6VXLGk.info_dir/20200613_8P6Sv6VXLGk.info.json.mp4.jpg
cat > 20200613_8P6Sv6VXLGk.info_dir/_index.md << EOF3
+++
title = " 20200613_8P6Sv6VXLGk 200:(有字幕)美国的自由灯塔对战美国的死板的民主,哪个会胜出? "
description = " #自由#民主#美国_本期节目是订阅朋友们要求许久的,加上这两天一些海外民运人对我不高兴,说我没逻辑,说我人在中国墙内不懂美国之类的。逼我不如做一期先,如果还有问题,我就在下周再做。没啥问题的话,就不做后续了。_自我介绍_姓名:看左上方_性别:当然不是女的_生日:365天里的一天_属相:非常荣幸属凤凰_学历 :反正不是博士_职业 :元朝不如妓女_喜欢滴名人:老子!_最喜欢滴字:拆_最喜欢滴事:故居上,自焚!_三观加一观_人生观:一直以为,吹牛是一门很有前途的职业!_价值观:喜欢看脑子被门夹着的人,喜欢看嘴巴比脸大的人!_世界观:老子是思想家,不信你问老子!_悲观:人这一生能搞出些什么?不外乎有二:男人扛着女人、钱和名被一根稻草压死,而女人,往风景处一站,即可笑傲江湖!_联系方式_我的邮箱是wto5185188@gmail(建议墙内朋友不要评论过火的话,给我写邮件才安全)_我的腿腿儿账号是:wto518_脸书ID叫:Diamond John "
weight = 20
+++
{{< mymp4 mp4="20200613_8P6Sv6VXLGk.info.json.mp4"
text="len $(cat 20200613_8P6Sv6VXLGk.info_dir/20200613_8P6Sv6VXLGk.info.json.mp4|wc -c)"
>}}
{{< mymp4x mp4x="20200613_8P6Sv6VXLGk.info.json.25k.mp4"
text="len $(cat 20200613_8P6Sv6VXLGk.info_dir/20200613_8P6Sv6VXLGk.info.json.25k.mp4|wc -c)"
>}}
{{< mymp4x mp4x="20200613_8P6Sv6VXLGk.info.json.48k.mp4"
text="len $(cat 20200613_8P6Sv6VXLGk.info_dir/20200613_8P6Sv6VXLGk.info.json.48k.mp4|wc -c)"
>}}
{{< mydiv text="#自由#民主#美国_本期节目是订阅朋友们要求许久的,加上这两天一些海外民运人对我不高兴,说我没逻辑,说我人在中国墙内不懂美国之类的。逼我不如做一期先,如果还有问题,我就在下周再做。没啥问题的话,就不做后续了。_自我介绍_姓名:看左上方_性别:当然不是女的_生日:365天里的一天_属相:非常荣幸属凤凰_学历 :反正不是博士_职业 :元朝不如妓女_喜欢滴名人:老子!_最喜欢滴字:拆_最喜欢滴事:故居上,自焚!_三观加一观_人生观:一直以为,吹牛是一门很有前途的职业!_价值观:喜欢看脑子被门夹着的人,喜欢看嘴巴比脸大的人!_世界观:老子是思想家,不信你问老子!_悲观:人这一生能搞出些什么?不外乎有二:男人扛着女人、钱和名被一根稻草压死,而女人,往风景处一站,即可笑傲江湖!_联系方式_我的邮箱是wto5185188@gmail(建议墙内朋友不要评论过火的话,给我写邮件才安全)_我的腿腿儿账号是:wto518_脸书ID叫:Diamond John" >}}
<br>
{{< mydiv link="https://www.youtube.com/watch?v=8P6Sv6VXLGk" >}}
<br>
请大家传播时,不需要传播文件本身,<br>
原因是:一旦传播过大东西(例如,图片,文件),<br>
就会触发检查机制。<br>
我不知道检查机制的触发条件。<br>
但是我知道,不会说你传一个没有敏感词的网络地址都检查,<br>
否则,检查员得累死。<br><br>
直接转发网址就可以了:<br>
原因是,这是程序员网站,<br>
共匪不敢封锁,墙内可以直接下载。
EOF3
| true
|
f3862a276919e08b73d22fd2ac0b43993f6c206e
|
Shell
|
govCMS/ipfa-theme
|
/build
|
UTF-8
| 426
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove the styleguide from previous builds.
echo -n "Deleting previous styleguide..."
rm -rf public *.html
echo " done."
# Build the CSS with Bundler, Sass and Compass.
compass compile
# Build the styleguide using kss-node.
# kss-node [source files to parse] [destination folder] --template [location of template files]
kss-node ./sass styleguide --template ./custom-template --css ./../css/ipfa_styles.css;
| true
|
5898a0b7675fb79b8bfbe51af7739aab0e8f9933
|
Shell
|
rferri-gr8/test-versioning
|
/.scripts/tag_release.sh
|
UTF-8
| 2,757
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# exit w/ 0 when any command returns 0
set -e
remote=origin
release_branch_prefix=release
status_check=1
version=""
while getopts "v:r:di" opt; do
case $opt in
v)
version="$OPTARG"
;;
r)
remote="$OPTARG"
;;
i)
status_check=0
;;
\?)
>&2 echo "Invalid option: -$OPTARG"
exit 1
;;
:)
>&2 echo "Option -$OPTARG requires an argument."
exit 1
;;
esac
done
options_are_valid=1
if [ "$version" = "" ]; then
options_are_valid=0
>&2 echo "Missing version number (-v)"
fi
if [ "$remote" = "" ]; then
options_are_valid=0
>&2 echo "Missing remote name (-r)"
fi
if [ $options_are_valid -eq 0 ]; then
>&2 echo "Please try command again with the correct option(s)"
exit 1
fi
if [ $status_check -gt 0 ]; then
git_status=$(git status --porcelain)
if [ -n "$git_status" ]; then
>&2 echo "Please clean your working directory and try command again."
>&2 echo "To skip this check, pass the -i option."
>&2 echo ""
>&2 echo "The following files have been changed or added to your working directory:"
>&2 echo "$git_status"
exit 1
fi
fi
release_branch_name="$release_branch_prefix/$version"
# fetch latest from remote
git fetch $remote
# checkout the latest on trunk
git checkout $remote/$release_branch_name
release_branch_current_version=$(cat package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g' \
| tr -d '[[:space:]]')
echo "Current release branch version: $release_branch_current_version"
tag_version=$(echo $release_branch_current_version | sed 's/-.*//')
tag_version="v$tag_version"
echo "Release tag version: $tag_version"
# bump to the release tag version
npm --no-git-tag-version version ${tag_version#v}
# commit the version bump
git add package.json
git add npm-shrinkwrap.json
git commit -m "Bump version number to $tag_version"
git push $remote HEAD:refs/heads/$release_branch_name
git tag $tag_version $remote/$release_branch_name > /dev/null
echo "Pushing tag $tag_version to $remote"
git push $remote $tag_version
# have npm bump the minor version number w/out creating a git tag
release_branch_new_version=$(npm --no-git-tag-version version prepatch)
# hack: old version of npm version doesn't allow --preid=snapshot arg?
release_branch_new_version=${release_branch_new_version::-2}
release_branch_new_version="$release_branch_new_version-rc"
echo "New release branch version: $release_branch_new_version"
npm --no-git-tag-version version ${release_branch_new_version#v}
# commit the version bump
git add package.json
git add npm-shrinkwrap.json
git commit -m "Bump version number to $release_branch_new_version"
git push $remote HEAD:refs/heads/$release_branch_name
| true
|
2306123c4443539ba2ecd710c55ad1dd06b675aa
|
Shell
|
smelnik84/temp
|
/repo-full-copy.sh
|
UTF-8
| 490
| 2.515625
| 3
|
[] |
no_license
|
# Клонируем исходный репозиторий без рабочего каталога (--bare)
git clone --bare https://github.com/exampleuser/old-repository.git
cd old-repository.git
# Делаем mirror-push(будут скопированы все ветки и тэги) в новый репозиторий
git push --mirror https://github.com/exampleuser/new-repository.git
cd ..
# Удаляем папку с репозиторием
rm -rf old-repository.git
| true
|
4e63d8682e5ffbb2ab90aefbf2b4552e385a170e
|
Shell
|
manasij7479/z3-partial-order
|
/utils/run-clause-tests.sh
|
UTF-8
| 1,132
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Reads configs from a file
# num_vars num_clauses neg_prob clause_size name
make
# make -C ../ release
mkdir -p tests
# rm -rf tests/*
cp $1 tests/
cd tests
touch result
ulimit -t 60
TIMEFORMAT='%3R'
while read p; do
filename=`echo $p | awk '{print $5}'`
if [[ "${p:0:1}" == "#" ]]; then
continue
fi
# echo $p
echo $filename
# ../gen-targetted-clauses $p
t=`(time ../../z3.out.release.new $filename) 2>&1 >/dev/null | grep 'real' | awk '{print $2}'`
echo "NEW " $t $filename >> result
echo $filename
# ../gen-targetted-clauses $p
t=`(time ../../z3.out.release.old $filename) 2>&1 >/dev/null | grep 'real' | awk '{print $2}'`
echo "OLD " $t $filename >> result
# cp $filename po-$filename
# ../gen-targetted-clauses $p "lo"
# t=`(time ../../z3.out.release $filename) 2>&1 >/dev/null | grep 'real' | awk '{print $2}'`
# echo "LO: " $t $filename >> result
# cp $filename lo-$filename
# ../gen-targetted-clauses $p "idl"
# t=`(time ../../z3.out.release $filename) 2>&1 >/dev/null | grep 'real' | awk '{print $2}'`
# echo "IDL: " $t $filename >> result
done < `basename $1`
| true
|
766e3ce40ad28bae918a5890204fcfd9c3571040
|
Shell
|
samsarahq/git-delete-lock-buildkite-plugin
|
/hooks/post-checkout
|
UTF-8
| 224
| 3.453125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
set -u
echo "Current directory:"
pwd
FILE=.git/index.lock
if [ -f "$FILE" ]; then
echo "$FILE exists - deleting"
rm -f $FILE
else
echo "$FILE does not exist - not deleting"
fi
| true
|
0b2fd0b71ba6f591960fc85a55a323d58351d527
|
Shell
|
just-ai/jaicf-jaicp-spring-template
|
/docker/app/run.sh
|
UTF-8
| 2,583
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
SERVICE_HOME=/opt/jaicf
SERVICE_CONF_DIR=$SERVICE_HOME/conf
SERVICE_OVERRIDING_DIR=/etc/jaicf
SERVICE_ENV_CFG=/etc/jaicf/env.cfg
SERVICE_LOG_DIR=/var/log/jaicf
SERVICE_TMP_DIR=/tmp
JAR_FILE=$SERVICE_HOME/app.jar
#set default service parameters
SERVICE_JAVA_XMX=512m
SERVICE_JAVA_MAX_PERM_SIZE=256m
# ------ customServiceInitScript starts
# ------ customServiceInitScript ends
# if you want to access jmx interface outside the host then put next line into .cfg file
#PUBLIC_IP=`wget http://ipinfo.io/ip -qO -`
PUBLIC_IP=localhost
PORT=8080
JMX_PORT=10011
RMI_PORT=10012
JDWP_PORT=10013
LOG_CONFIG=
LOGGING_CFG=$SERVICE_CONF_DIR/logback.xml
if [ -f $SERVICE_OVERRIDING_DIR/logback.xml ]; then
LOGGING_CFG=$SERVICE_OVERRIDING_DIR/logback.xml
fi
if [ -f $SERVICE_CONF_DIR/logback.xml ]; then
LOG_CONFIG="-Dlogging.config=$LOGGING_CFG"
fi
# import overridden definitions from SERVICE_ENV_CFG
if [ -f $SERVICE_ENV_CFG ]; then
. $SERVICE_ENV_CFG
fi
JMX_OPTS="-Dcom.sun.management.jmxremote=true \
-Djava.rmi.server.hostname=$PUBLIC_IP \
-Dcom.sun.management.jmxremote.port=$JMX_PORT \
-Dcom.sun.management.jmxremote.rmi.port=$RMI_PORT \
-Dcom.sun.management.jmxremote.authenticate=false \
-Dcom.sun.management.jmxremote.ssl=false"
JDWP_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=$JDWP_PORT"
MEM_OPTS="-Xms$SERVICE_JAVA_XMX -Xmx$SERVICE_JAVA_XMX -XX:MaxMetaspaceSize=$SERVICE_JAVA_MAX_PERM_SIZE"
ADDITIONAL_JVM_OPTS=""
DEFAULT_JVM_OPTS="-XX:+UseCompressedOops\
-XX:-OmitStackTraceInFastThrow\
-XX:+ExitOnOutOfMemoryError\
-XX:+DoEscapeAnalysis\
-XX:AutoBoxCacheMax=10000\
-XX:+AlwaysPreTouch\
-XX:+UseG1GC\
-XX:+DisableExplicitGC\
-XX:+HeapDumpOnOutOfMemoryError"
JVM_OPTS="$ADDITIONAL_JVM_OPTS $DEFAULT_JVM_OPTS $JMX_OPTS $JDWP_OPTS $MEM_OPTS"
JAVA_ENV_OPTS="-Djava.io.tmpdir=$SERVICE_TMP_DIR\
-Dfile.encoding=UTF-8\
-Duser.timezone=UTC"
APP_OPTS="-Dlog.dir=$SERVICE_LOG_DIR\
$LOG_CONFIG\
-Dspring.config.location=classpath:/application.yml,optional:$SERVICE_CONF_DIR/application.yml,optional:$SERVICE_OVERRIDING_DIR/zone.application.yml,optional:$SERVICE_OVERRIDING_DIR/application.yml\
-Djustai.config.location=$SERVICE_CONF_DIR\
-DPORT=$PORT"
ADDITIONAL_APP_OPTS=""
JAVA_ARGS="$JVM_OPTS $JAVA_ENV_OPTS $APP_OPTS $ADDITIONAL_APP_OPTS"
java -jar $JAVA_ARGS $JAR_FILE
| true
|
46e9887bb27f41ff1f7de87cc31be76de8dbc46e
|
Shell
|
faiden/dotfiles
|
/.bin/mpdSinkToggle.sh
|
UTF-8
| 550
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
IP="192.168.0.3"
SINK=$(mpc -h $IP outputs)
SINK=${SINK##*(Pulseaudio sink) is }
SINK=${SINK%%Output*}
SINK=${SINK/$'\n'/}
case $SINK in
enabled)
# Enable the raspi sink
mpc -p 6600 -h 192.168.0.3 disable "Pulseaudio sink"
dunstify --replace=1338 -a notify-send "mpd sink:" "Off"
;;
disabled)
mpc -p 6600 -h 192.168.0.3 enable "Pulseaudio sink"
dunstify --replace=1338 -a notify-send "mpd sink:" "On"
;;
*)
# If Sink is down
dunstify --replace=1338 -a notify-send "mpd error:" "Connection refused"
;;
esac
| true
|
a57d78c3d3c145a876b95f7e2e97f8dfa95d8d97
|
Shell
|
isabella232/earthquake-design-ws
|
/z_update_postgis.sh
|
UTF-8
| 133
| 2.90625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
#!/bin/bash -e
update=$(which update-postgis.sh || echo '/usr/local/bin/update-postgis.sh');
if [ -x $update ]; then
${update};
fi
| true
|
8c7e6b18497a34705f6a5654faa51a643bcfe0f9
|
Shell
|
wpcampus/wpcampus-pantheon
|
/scripts/code-deploy.sh
|
UTF-8
| 1,083
| 4.15625
| 4
|
[] |
no_license
|
###
# This script requires:
# - Access to the site's Pantheon Dashboard
# - Terminus, Pantheon's CLI (https://pantheon.io/docs/terminus)
# - An .env file which has your Pantheon email.
#
# See the README for more information.
###
# Make sure we have all the environment information we need.
# Defines the following variables that we need:
# - TERMINUS_BINARY
# - SITE_PATH
source ./env-setup.sh
# Note is the second argument received from the command.
NOTE=$2
shift
# Confirm we have a note.
if [[ -z "${NOTE}" ]]; then
printf "\nA note describing the deploy is required to run this script.\n\nCommand: %s [environment] \"I'm a note about the deploy\"" "$0"
printf "\n\nExample: %s %s \"Updating plugins\"\n\n" "$0" "${ENV_NAME}"
exit 1
fi
# Pull in the functions we need.
source ./bash-functions.sh
source ./terminus-functions.sh
printf "\n"
# Ask for confirmation.
confirm_message "Are you sure you want to deploy to the ${ORG_LABEL} ${ENV_NAME} environment?"
printf "\n"
deploy "${SITE_PATH}" "${NOTE}"
printf "\n"
clear_cache "${SITE_PATH}"
printf "\nDone!\n\n"
| true
|
bc9d855a8ca1fa07280564cd5ca8c022749ef7d1
|
Shell
|
blowyourheart/myconfig
|
/bin/free.sh
|
UTF-8
| 2,144
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
# Determine OS
uname=$(uname)
# Build stats
if [ $uname = 'Darwin' ]; then
escecho='echo'
cachename='speculative'
# Get total/swap stats from sysctl
totalMem=$(sysctl -n hw.memsize)
allSwap=$(sysctl -n vm.swapusage | sed 's/M//')
swapTotal=$(echo $allSwap | awk '{ print $3 }' | awk -F . '{ print $1*1048576 + $2*10486}')
swapUsed=$(echo $allSwap | awk '{ print $6 }' | awk -F . '{ print $1*1048576 + $2*10486}')
swapFree=$(echo $allSwap | awk '{ print $9 }' | awk -F . '{ print $1*1048576 + $2*10486}')
# Get the rest from vm_stat
vmStats=$(vm_stat | sed 's/\.//')
pagesFree=$(echo "$vmStats" | awk '/Pages free:/ { print $3*4096 }')
pagesActive=$(echo "$vmStats" | awk '/Pages active:/ { print $3*4096 }')
pagesInactive=$(echo "$vmStats" | awk '/Pages inactive:/ { print $3*4096 }')
pagesSpeculative=$(echo "$vmStats" | awk '/Pages speculative:/ { print $3*4096 }')
pagesWired=$(echo "$vmStats" | awk '/Pages wired down:/ { print $4*4096 }')
elif [ $uname = 'FreeBSD' ]; then
escecho='echo -e'
cachename='cache'
pageSize=$(sysctl -n hw.pagesize)
# Get all stats from sysctl
totalMem=$(sysctl -n hw.realmem)
swapTotal=$(sysctl -n vm.swap_total)
# FIXME: Is this right? No swapping systems to test
swapUsed=$(($(sysctl -n vm.stats.vm.v_swappgsout) * $pageSize))
swapFree=$(($swapTotal - $swapUsed))
# Top's UFS buffers are not used separate in calculation
pagesFree=$(($(sysctl -n vm.stats.vm.v_free_count) * $pageSize))
pagesActive=$(($(sysctl -n vm.stats.vm.v_active_count) * $pageSize))
pagesInactive=$(($(sysctl -n vm.stats.vm.v_inactive_count) * $pageSize))
pagesSpeculative=$(($(sysctl -n vm.stats.vm.v_cache_count) * $pageSize))
pagesWired=$(($(sysctl -n vm.stats.vm.v_wire_count) * $pageSize))
else
echo "Unsupported uname: $(uname)"
exit 1
fi
# Print out Linux-style memory stats
$escecho "\ttotal\t\tused\t\tfree\t\tinactive\t$cachename"
$escecho "Mem:\t$totalMem\t$(($totalMem - $pagesFree))\t$pagesFree\t$pagesInactive\t$pagesSpeculative"
$escecho "-/+ inact/$cachename:\t$(($pagesActive + $pagesWired))\t$(($pagesFree + $pagesInactive + $pagesSpeculative))"
$escecho "Swap:\t$swapTotal\t$swapUsed\t$swapFree"
| true
|
4be4b61083885aeb99f5ce1ce06934816f05077f
|
Shell
|
JoshuaSkootsky/chuyu-tian
|
/deploy.sh
|
UTF-8
| 1,021
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# if command fails, whole script stops
set -e
# function cleanup_at_exit {
# # return to main branch
# }
# trap cleanup_at_exit EXIT
# Subtree method here: https://gohugo.io/hosting-and-deplooyment/hosting-on-github/
echo "Deleting old publication"
rm -rf public
mkdir public
git worktree prune
rm -rf .git/worktrees/public/
echo "Checking out gh-pages branch into public"
git worktree add -B gh-pages public github/gh-pages
echo "Removing existing files"
rm -rf public/*
echo "Building site with Hugo..."
hugo # if using a theme, replace with `hugo -t <YOURTHEME>`
# Build the project with SEO
HUGO_ENV=production hugo
# Commit changes.
msg="rebuilding site $(date)"
if [ -n "$*" ]; then
msg="$*"
fi
echo "Updating gh-pages branch with message {$msg}"
cd public && git add --all && git commit -m "$msg" && cd ..
printf "\033[0;32mDeploying updates to GitHub...\033[0m\n"
# Push source and build repos.
git push -f github gh-pages
# Push source to main branch on github
git push github main
| true
|
066b6af50d31fef78b25218fff0da8cf0b039c9c
|
Shell
|
vincentdm05/raytracer
|
/tools/clean.sh
|
UTF-8
| 250
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
rootDir=$(cd "$(dirname $0)/../" ; pwd)
if [[ ! -d "${rootDir}" || "$(basename ${rootDir})" != "raytracer" ]]; then
echo "Incorrect root directory '${rootDir}'." 1>&2
exit 1
fi
rm -r "${rootDir}"/bin
rm -r "${rootDir}"/build
| true
|
165d2c1b6c5312a2d7e72e81ccc86e7c5e790d6e
|
Shell
|
dallakyan/imod_singularity
|
/imod
|
UTF-8
| 224
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
ARGS=$@
if [ "$#" -lt 1 ]; then
echo "Enter an IMOD command."
else
echo -e "\nExecuting \`$ARGS\`\n"
singularity exec -B /home/:/home/ imod.img bash -c "source /etc/profile.d/IMOD-linux.sh && $ARGS"
fi
| true
|
d151f8d11b7d9f674f26f338c6bf024978b82334
|
Shell
|
satoshi-hirayama/techtalk4
|
/provisioning/bin/site
|
UTF-8
| 1,390
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CWD=`dirname "${0}"`
ROOT_DIR=${CWD/bin/}
function help {
cecho 36 "This command is generate default playbooks and inventry."
echo ""
cecho 36 "- generate usage: ${CWD}/site -n [site_name]"
cecho 36 "- delete usage: ${CWD}/site -n [site_name] -d"
exit 0
}
function cecho {
echo -e "\033[0;$1m$2\033[0;39m"
}
function run_and_print {
eval "$1"
cecho 35 "ran command: $1"
}
while getopts hn:d OPT
do
case $OPT in
'h' ) help ;;
'n' ) SITE_NAME=$OPTARG ;;
'd' ) DELETE_OPT=1 ;;
esac
done
# Check site name.
if [ -z ${SITE_NAME+x} ]; then
help
fi
if [ -z ${DELETE_OPT+x} ]; then
# Check site playbook exists.
if [ -e "${ROOT_DIR}${SITE_NAME}.yml" ]; then
cecho 31 "Error: ${ROOT_DIR}${SITE_NAME}.yml already exists."
exit 1
fi
cecho 32 "Making default playbooks and inventry..."
# Make playbooks and inventory.
run_and_print "touch ${ROOT_DIR}${SITE_NAME}.yml"
for ENV in 'integration' 'production' ; do
run_and_print "touch ${ROOT_DIR}${ENV}.ini"
run_and_print "touch ${ROOT_DIR}group_vars/${SITE_NAME}-${ENV}.yml"
done
else
cecho 32 "Removing default playbooks and inventry..."
# Remove playbooks and inventory.
run_and_print "rm -f ${ROOT_DIR}${SITE_NAME}.yml"
run_and_print "rm -rf ${ROOT_DIR}*.ini"
run_and_print "rm -f ${ROOT_DIR}group_vars/${SITE_NAME}-*"
fi
cecho 32 "Finish!!"
| true
|
7795077d867e3ea849f5bc1b9c9def1820e6cfb7
|
Shell
|
sentabi/AutoInstaller
|
/fedora.sh
|
UTF-8
| 10,396
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
## jangan asal di jalankan, liat dulu scriptna untuk menghindari hal-hal yang tidak
## diinginkan
# Set Zona Waktu WIB
rm -f /etc/localtime
cp /usr/share/zoneinfo/Asia/Jakarta /etc/localtime
waktuMulai=$(date)
# Set hostname
if [[ -n "$1" ]]; then
hostnamectl set-hostname --static "$1"
else
hostnamectl set-hostname --static fedora
fi
# Generate SSH Key tanpa password
ssh-keygen -t rsa -b 4096 -N "" -f ~/.ssh/id_rsa -q
USERSUDO=$SUDO_USER
if [[ $USERSUDO == 'root' || -z $USERSUDO ]]; then
echo "--------------------------------------------"
echo "Script ini harus dijalankan menggunakan sudo dan user biasa" 1>&2
echo "sudo -E bash ./fedora.sh hostname"
echo "Contoh : sudo -E bash ./fedora.sh fedoraku" 1>&2
echo "--------------------------------------------"
exit 1
fi
# Hapus aplikasi yang ngga perlu
dnf remove transmission* claws-mail* abrt-* midori pidgin -y
# 3rd party repo
dnf install http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-"$(rpm -E %fedora)".noarch.rpm -y
dnf install http://download1.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-"$(rpm -E %fedora)".noarch.rpm -y
dnf install https://rpms.remirepo.net/fedora/remi-release-"$(rpm -E %fedora)".rpm -y
# Update Repo dan Upgrade
dnf upgrade -y
# install kernel-devel dkk untuk compile
dnf install kernel-devel kernel-headers gcc make dkms acpid libglvnd-glx libglvnd-opengl libglvnd-devel pkgconfig -y
# install aplikasi
dnf install sshpass pavucontrol nano wget curl lshw rfkill mediawriter puddletag sshfs -y
# GIT
dnf install git -y
# OpenVPN
dnf install openvpn -y
# VNC tools
dnf install tigervnc remmina remmina-plugins* -y
# Design
dnf install shotwell gimp inkscape -y
# Debugging tools
dnf install wireshark nmap strace sysstat ltrace -y
# Utility
dnf install rsnapshot wavemon -y
# CLI TOOLS
dnf install mtr rsync htop whois iperf3 traceroute bind-utils -y
# git prompt
wget https://raw.github.com/git/git/master/contrib/completion/git-prompt.sh -O ~/.git-prompt.sh
# .bashrc
sudo -u "$USERSUDO" bash -c "rm -f /home/$USERSUDO/.bashrc"
sudo -u "$USERSUDO" bash -c "wget https://raw.githubusercontent.com/sentabi/AutoInstaller/master/bashrc -O /home/$USERSUDO/.bashrc"
# nano Syntax highlight
sudo -u "$USERSUDO" bash -c "find /usr/share/nano/ -iname "*.nanorc" -exec echo include {} \; >> ~/.nanorc"
# Download manager
dnf install uget aria2 qbittorrent -y
# Password Manager
dnf install keepassxc pwgen -y
# Nextcloud client
dnf install nextcloud-client -y
# screenshoot tools
dnf install shutter -y
# XFCE
dnf install xfce4-pulseaudio-plugin bluebird-gtk3-theme bluebird-gtk2-theme bluebird-xfwm4-theme -y
# codec multimedia
dnf install ffmpeg gstreamer1-plugins-base gstreamer1-plugins-good-extras gstreamer1-vaapi \
gstreamer1-plugins-good gstreamer1-plugins-ugly gstreamer1-plugins-bad-free gstreamer1-plugins-bad-free \
gstreamer1-plugins-bad-freeworld gstreamer1-plugins-bad-free-extras -y
# HTML 5 / h264 Firefox
dnf config-manager --set-enabled fedora-cisco-openh264
dnf install gstreamer1-plugin-openh264 mozilla-openh264 compat-ffmpeg28 -y
# Downloader Youtube
wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
chmod a+rx /usr/local/bin/youtube-dl
# Multimedia Player
dnf install vlc smplayer mplayer mpv clementine -y
# install VirtualBox
FILEREPOVIRTUALBOX=/etc/yum.repos.d/virtualbox.repo
VIRTUALBOX_LATEST_VERSION=$(wget -qO- https://download.virtualbox.org/virtualbox/LATEST-STABLE.TXT | grep -oE '^[0-9]{1}.[0-9]{1}')
if [ ! -f "$FILEREPOVIRTUALBOX" ]
then
wget http://download.virtualbox.org/virtualbox/rpm/fedora/virtualbox.repo -O /etc/yum.repos.d/virtualbox.repo
rpm --import https://www.virtualbox.org/download/oracle_vbox.asc
fi
dnf install VirtualBox-"${VIRTUALBOX_LATEST_VERSION}" -y
usermod -a -G vboxusers "$USERSUDO"
# install Sublime Text
FOLDERSUBLIME=/opt/sublime_text
SUBLIME_LATEST_VERSION=$(curl -s https://www.sublimetext.com/updates/3/stable/updatecheck | grep latest_version | cut -d ':' -f2 | sed 's/[^0-9]*//g')
if [ ! -d "$FOLDERSUBLIME" ]; then
echo "Installing Sublime Text ..."
wget "https://download.sublimetext.com/sublime_text_build_${SUBLIME_LATEST_VERSION}_x64.tar.xz"
tar Jxvf sublime_text_build_"${SUBLIME_LATEST_VERSION}"_x64.tar.xz -C /opt
ln -s /opt/sublime_text/sublime_text /usr/bin/sublime
rm -f "sublime_text_build_${SUBLIME_LATEST_VERSION}_x64.tar.xz"
fi
# ekstrator
dnf install file-roller zip unzip p7zip unrar -y
# Mount Android
dnf install libmtp-devel libmtp gvfs-mtp simple-mtpfs libusb gvfs-client gvfs-smb gvfs-fuse gigolo -y
# Install SAMBA
dnf install samba samba-common samba-client -y
# Install Google Chrome
dnf install https://dl.google.com/linux/direct/google-chrome-stable_current_x86_64.rpm -y
# Install Thunderbird
dnf install thunderbird -y
# LibreOffice
dnf install libreoffice -y
# DLL
dnf install xclip gpg -y
## Font Rendering
cat >/etc/fonts/conf.d/99-autohinter-only.conf <<'EOL'
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<match target="font">
<edit name="autohint" mode="assign">
<bool>true</bool>
</edit>
</match>
</fontconfig>
EOL
ln -s /etc/fonts/conf.avail/10-autohint.conf /etc/fonts/conf.d/
ln -s /etc/fonts/conf.avail/10-sub-pixel-rgb.conf /etc/fonts/conf.d/
cat >/home/"$USERSUDO"/.fonts.conf <<'EOL'
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<match target="font" >
<edit mode="assign" name="autohint" >
<bool>true</bool>
</edit>
</match>
<match target="font" >
<edit mode="assign" name="rgba" >
<const>none</const>
</edit>
</match>
<match target="font" >
<edit mode="assign" name="hinting" >
<bool>false</bool>
</edit>
</match>
<match target="font" >
<edit mode="assign" name="hintstyle" >
<const>hintnone</const>
</edit>
</match>
<match target="font" >
<edit mode="assign" name="antialias" >
<bool>true</bool>
</edit>
</match>
</fontconfig>
EOL
echo "Xft.lcdfilter: lcddefault" | sudo -u "$USERSUDO" tee /home/"$USERSUDO"/.Xresources
# Font
dnf install freetype-freeworld -y
TMP_FONT_FOLDER=$(mktemp)
cd "$TMP_FONT_FOLDER" || exit
wget https://assets.ubuntu.com/v1/0cef8205-ubuntu-font-family-0.83.zip -O ubuntu.zip
unzip ubuntu.zip
mv ubuntu-font-family-* /usr/share/fonts/
wget https://github.com/RedHatOfficial/Overpass/releases/download/v3.0.5/overpass-3.0.5.zip -O overpass.zip
unzip overpass.zip
mv Overpass-* /usr/share/fonts/
wget https://github.com/downloads/adobe-fonts/source-code-pro/SourceCodePro_FontsOnly-1.013.zip -O sourcecodepro.zip
unzip sourcecodepro.zip
mv SourceCodePro_FontsOnly-* /usr/share/fonts/
rm -fr "$TMP_FONT_FOLDER"
# tweak font
dnf copr enable dawid/better_fonts -y
dnf install fontconfig-enhanced-defaults fontconfig-font-replacements -y
# Tweak XFCE
su "$USERSUDO" -m -c 'xfconf-query -c xfce4-panel -p /plugins/plugin-1/show-button-title -n -t bool -s false'
su "$USERSUDO" -m -c 'xfconf-query -c xfce4-panel -p /plugins/plugin-1/button-icon -n -t string -s "ibus-hangul"'
su "$USERSUDO" -m -c 'xfconf-query -c xfwm4 -p /general/theme -s "Bluebird"'
su "$USERSUDO" -m -c 'xfconf-query -c xsettings -p /Net/ThemeName -s "Glossy"'
# Disable Selinux. Enable setelah semua di testing ;)
sed -i s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
# Mengamankan /tmp
cd "$HOME" || exit
rm -rf /tmp
mkdir /tmp
mount -t tmpfs -o rw,noexec,nosuid tmpfs /tmp
chmod 1777 /tmp
echo "tmpfs /tmp tmpfs rw,noexec,nosuid 0 0" >> /etc/fstab
rm -rf /var/tmp
ln -s /tmp /var/tmp
# Batasi ukuran log systemd
echo '
Storage=persistent
SystemMaxUse=400M
SystemMaxFileSize=30M
RuntimeMaxUse=250M
RuntimeMaxFileSize=30M' >> /etc/systemd/journald.conf
# restart systemd
systemctl restart systemd-journald
# SSH
echo "UseDNS no" >> /etc/ssh/sshd_config
## LAMP untu Web Development
dnf install httpd mariadb mariadb-server phpMyAdmin php php-pdo php-cli php-mysqlnd php-mcrypt php-xml -y
# Buat baru file /var/tmp
# Biar ga error https://jaranguda.com/solusi-mariadb-failed-at-step-namespace-spawning/
rm -fr /var/tmp
mkdir /var/tmp
chmod 1777 /var/tmp
# Setting MariaDB
systemctl start mariadb
MYSQL_ROOT_PASSWORD=$(pwgen 15 1)
# MARIADB disable Unix Socket authentication
# https://mariadb.com/kb/en/library/authentication-plugin-unix-socket/
mysql -e "UPDATE mysql.user SET Password=PASSWORD('$MYSQL_ROOT_PASSWORD') WHERE User='root';"
mysql -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');"
mysql -e "DELETE FROM mysql.user WHERE User='';"
mysql -e "DROP DATABASE test;"
mysql -e "FLUSH PRIVILEGES;"
# mysql -e "UPDATE mysql.user set plugin='' where user='root';"
echo "[client]
user = root
password = $MYSQL_ROOT_PASSWORD" | sudo -u "$USERSUDO" tee /home/"$USERSUDO"/.my.cnf > /dev/null
systemctl restart mariadb
## Login permanen ke phpMyAdmin dari localhost
# TODO
# replace baris ini bukan di delete, lalu tambah baru.
sed -i "/'cookie'/d" /etc/phpMyAdmin/config.inc.php
sed -i "/'user'/d" /etc/phpMyAdmin/config.inc.php
sed -i "/'password'/d" /etc/phpMyAdmin/config.inc.php
sed -i "/?>/d" /etc/phpMyAdmin/config.inc.php
echo "
\$cfg['Servers'][\$i]['auth_type'] = 'config'; // Authentication method (config, http or cookie based)?
\$cfg['Servers'][\$i]['user'] = 'root'; // MySQL user
\$cfg['Servers'][\$i]['password'] = '$MYSQL_ROOT_PASSWORD'; // MySQL password (only needed
" >> /etc/phpMyAdmin/config.inc.php
chown "$USERSUDO":"$USERSUDO" -R /var/www
# Install Composer
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/bin/composer
# Tweak
sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf
# WP CLI
WPCLI='/usr/local/bin/wp'
if [ ! -f $WPCLI ]; then
echo "---------------------------"
echo "Download & Install WPCLI ... "
wget -q https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar -O /usr/local/bin/wp
chmod +x /usr/local/bin/wp
echo "Install WPCLI selesai!"
echo "---------------------------"
fi
# Speedtest CLI
wget https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py -O /usr/bin/speedtest
chmod +x /usr/bin/speedtest
# Telegram
wget --content-disposition -q https://telegram.org/dl/desktop/linux -O tsetup.tar.xz
tar xJvf tsetup.tar.xz -C /opt
rm -f tsetup.tar.xz
echo "Instalasi selesai!"
echo "Mulai dijalankan $waktuMulai"
echo "Selesai $(date)"
| true
|
91fcc5d41d98b7b4bb3498a9345ec7ecbbe035e7
|
Shell
|
iptux/AndroidDev
|
/bin/signapk
|
UTF-8
| 5,095
| 3.5625
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
#
# signapk
# signing a apk with keystore
# see https://developer.android.com/tools/publishing/app-signing.html
#
# Create: 2016-04-24 17:10
# Author: Alex.wang
# Require: keytool, jarsigner, zipalign
readonly KEYSTOREDIR=~/.android/keystore
readonly DEFAULTKEYSTORE=cert
# Timestamping Authority
# some url from https://stackoverflow.com/a/25053511
readonly -A TSAURLS=(
[certum]=http://time.certum.pl
[ncipher]=http://dse200.ncipher.com/TSS/HttpTspServer
[safecreative]=https://tsa.safecreative.org/
[dfn]=http://zeitstempel.dfn.de
[comodo]=http://timestamp.comodoca.com/rfc3161
[symantec]=http://sha256timestamp.ws.symantec.com/sha256/timestamp
[geotrust]=https://timestamp.geotrust.com/tsa
[globalsign]=http://timestamp.globalsign.com/scripts/timstamp.dll
[signfiles]=https://ca.signfiles.com/tsa/get.aspx
[globaltrustfinder]=http://services.globaltrustfinder.com/adss/tsa
[tugraz]=https://tsp.iaik.tugraz.at/tsp/TspRequest
[digicert]=http://timestamp.digicert.com
[verisign]=http://timestamp.verisign.com
)
TSAURL=
ZIPALIGN="`which zipalign`"
ALIGN="4"
VERBOSE=
showerror() {
echo "$@" >&2
exit 1
}
listtsa() {
local key
printf "%20s %s\n" "TSANAME " "Timestamping Authority URL"
for key in "${!TSAURLS[@]}" ; do
printf "%20s %s\n" "${key}:" "${TSAURLS[${key}]}"
done
}
settsa() {
local tsa="${TSAURLS[$1]}"
if [ -n "${tsa}" ] ; then
TSAURL="${tsa}"
fi
}
genkeypair() {
local keystore="$1"
[ -e "${keystore}" ] && return
[ -d "`dirname ${keystore}`" ] || mkdir -p "`dirname ${keystore}`"
export STOREPASS KEYPASS
keytool -genkeypair ${VERBOSE:+-v} \
-storepass:env STOREPASS \
-keystore "${keystore}" \
-keypass:env KEYPASS \
-alias "${KEYALIAS}" \
-dname "${CERTDNAME}" \
-validity 10000 \
-keysize 2048 \
-keyalg RSA
export -n STOREPASS KEYPASS
}
setkeystore() {
local name="$1"
[ -n "${name}" ] || name="${DEFAULTKEYSTORE}"
# CN=Common Name, OU=Organizational Unit, O=Organization, L=Locality, ST=State or Province, C=two-letter country code
readonly CERTDNAME="CN=${name} key"
readonly KEYSTORE="${KEYSTOREDIR}/${name}.keystore"
readonly STOREPASS="android${name}"
readonly KEYALIAS="${name}"
readonly KEYPASS="android${name}"
[ -e "${KEYSTORE}" ] || genkeypair "${KEYSTORE}"
}
loadkeyconf() {
local conf="$1"
[ -f "${conf}" ] || return
local -A MAP
local key value
while read key value ; do
if [ -n "${key}" -a -n "${value}" ] ; then
MAP["${key}"]="${value}"
fi
done < "${conf}"
readonly KEYSTORE="${MAP[storeFile]}"
readonly STOREPASS="${MAP[storePassword]}"
readonly KEYALIAS="${MAP[keyAlias]}"
readonly KEYPASS="${MAP[keyPassword]}"
}
signapk() {
local in="$1"
[ -e "${in}" ] || showerror "${in}: file not exist"
local bn="${in%\.*}"
local unaligned="${bn}-unaligned.apk"
local signed="${bn}-signed.apk"
set -e
cp -f "${in}" "${unaligned}"
# remove signing info
zip -q -d "${unaligned}" "META-INF/*"
export STOREPASS KEYPASS
jarsigner ${VERBOSE:+-verbose} \
-storepass:env STOREPASS \
-keystore "${KEYSTORE}" \
-keypass:env KEYPASS \
-sigfile CERT \
-digestalg SHA1 \
-sigalg SHA1withRSA \
${TSAURL:+-tsa ${TSAURL}} \
"${unaligned}" \
"${KEYALIAS}"
export -n STOREPASS KEYPASS
if [ -x "${ZIPALIGN}" ] ; then
"${ZIPALIGN}" -f ${VERBOSE:+-v} -z "${ALIGN}" "${unaligned}" "${signed}"
rm -f "${unaligned}"
else
mv -f "${unaligned}" "${signed}"
fi
echo "signed apk: ${signed}"
set +e
}
showcert() {
local in="$1"
[ -e "${in}" ] || return 1
echo "${in}"
jarsigner -verify ${VERBOSE:+-verbose:summary -certs} "${in}"
}
showhelp() {
cat <<EOF
`basename $0`: signing a apk
Usage: `basename $0` [options...] apk
-h show this help
-V be verbose
-v show certificate details of a signed apk
-k KEYNAME use keystore KEYNAME to sign apk
keystore files is stored in ${KEYSTOREDIR} directory
-K KEYCONF load keystore configure from KEYCONF file
-L list internal Timestamping Authority urls
-T TSANAME use Timestamping Authority TSANAME (-L list valid TSANAME)
-t TSAURL location of the Timestamping Authority (for jarsigner)
-n DO NOT use zipalign
-z ZIPALIGN path to zipalign (auto detected in \$PATH)
-a ALIGN alignment in bytes (for zipalign, default 4)
sample KEYCONF file:
storeFile KEYSTOREPATH
storePassword STOREPASSWORD
keyAlias KEYALIAS
keyPassword KEYPASSWORD
EOF
exit
}
main() {
local opt
local key
local cert
while getopts "a:hk:K:Lnt:T:vVz:" opt ; do
case "${opt}" in
a)
ALIGN="${OPTARG}"
;;
k)
key="${OPTARG}"
;;
K)
key="${OPTARG}"
;;
L)
listtsa
exit 0
;;
n)
ZIPALIGN=
;;
t)
TSAURL="${OPTARG}"
;;
T)
settsa "${OPTARG}"
;;
v)
cert=yes
;;
V)
VERBOSE=yes
;;
z)
ZIPALIGN="${OPTARG}"
;;
?)
showhelp
;;
esac
done
# remove options
shift "$[$OPTIND - 1]"
[ "$#" -eq 0 ] && showhelp
if [ -n "${cert}" ] ; then
for f in "$@" ; do
showcert "${f}"
done
exit 0
fi
if [ -f "${key}" ] ; then
loadkeyconf "${key}"
else
setkeystore "${key}"
fi
for f in "$@" ; do
signapk "${f}"
done
}
main "$@"
| true
|
683f697494066479b351af8f36b938aa2a09a591
|
Shell
|
kzsh/scripts
|
/bash/direnv/pyenv
|
UTF-8
| 1,129
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DIRENV_DIR=$SCRIPT_DIR
PYTHON_VERSION="$(cat "$DIRENV_DIR/.python-version")"
VIRTUAL_ENVIRONMENT="$(cat "$DIRENV_DIR/.virtual-environment")"
function main() {
export PYENV_ROOT=$HOME/.pyenv
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
if ! virtual_env_exists; then
echo "RUN: Install $PYTHON_VERSION"
create_missing_virtual_environment
echo "RUN COMPLETE: Install $PYTHON_VERSION"
else
echo "SKIP: install. pyenv virtualenv $VIRTUAL_ENVIRONMENT is already installed"
fi
pyenv activate "$VIRTUAL_ENVIRONMENT"
}
function virtual_env_exists() {
pyenv versions | grep -q full_virtualenv_path
}
function create_missing_virtual_environment() {
pyenv virtualenv $PYTHON_VERSION $VIRTUAL_ENVIRONMENT
}
function full_virtualenv_path() {
echo "$PYTHON_VERSION/env/$VIRTUAL_ENVIRONMENT"
}
function is_python_version_installed() {
pyenv versions | grep -q "^$PYTHON_VERSION\$"
}
function install_missing_python_version() {
pyenv install "$PYTHON_VERSION"
}
main "$@"
| true
|
51db5bd3b9be70755a8218c737f80351d979f311
|
Shell
|
MagHub/aoide-dac-drivers
|
/aoide_dac_drivers_install.sh
|
UTF-8
| 1,980
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
newest_driver_version="4.19.86"
newest_firmware_hash="b9ecbe8d0e3177afed08c54fc938938100a0b73f"
current_kernel_version=$(uname -r)
proper_driver_version=${current_kernel_version:0:(${#current_kernel_version}-1)}
driver_test_path="/lib/modules/"$proper_driver_version+"/kernel/sound/soc/codecs/sabre9018k2m.ko"
function welcome(){
echo ">AOIDE DAC Drivers Installer.<"
}
function check_current_kernel_driver(){
echo ">Check the driver of current kernel."
if [ -e "drivers/aoide_dac_$proper_driver_version.tar.gz" ]; then
return 1
else
return 0
fi
}
function kernel_install(){
echo ">Install Raspberry PI Kernel "$newest_driver_version
SOFT=$(dpkg -l $SOFTWARE_LIST | grep "<none>")
if [ -n "$SOFT" ]; then
apt update
apt -y install $SOFTWARE_LIST
fi
if [ ! -f "/usr/bin/rpi-update" ]; then
curl -L --output /usr/bin/rpi-update https://raw.githubusercontent.com/Hexxeh/rpi-update/master/rpi-update && sudo chmod +x /usr/bin/rpi-update
fi
UPDATE_SELF=0 SKIP_BACKUP=1 rpi-update $newest_firmware_hash
echo " Kernel install complete!"
}
function driver_install(){
if [ -f "$driver_test_path" ]; then
echo ">Drivers has been installed,exit."
exit
fi
check_current_kernel_driver
if [ $? -eq 1 ]; then
echo " Driver exists,begin to install..."
tar zxvf drivers/aoide_dac_$proper_driver_version.tar.gz -C /
depmod -b / -a $proper_driver_version+
depmod -b / -a $proper_driver_version-v7+
depmod -b / -a $proper_driver_version-v7l+
depmod -b / -a $proper_driver_version-v8+
else
kernel_install
tar zxvf drivers/aoide_dac_$newest_driver_version.tar.gz -C /
depmod -b / -a $newest_driver_version+
depmod -b / -a $newest_driver_version-v7+
depmod -b / -a $newest_driver_version-v7l+
depmod -b / -a $newest_driver_version-v8+
fi
echo ">Drivers install complete!"
}
if [ $UID -ne 0 ]; then
echo "Superuser privileges are required to run this script."
echo "e.g. \"sudo $0\""
exit 1
fi
welcome
driver_install
| true
|
9275bd2f4e817a7664496553737a8e88186a805b
|
Shell
|
rmkn/pdns4api
|
/entrypoint.sh
|
UTF-8
| 502
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
KEY=${APIKEY:-pdns}
DBF=${DBFILE:-/tmp/pdns.db}
echo "select * from domains limit 1;" | sqlite3 $DBF
if [ $? -ne 0 ]; then
curl -o /tmp/schema.sqlite3.sql -SL "https://raw.githubusercontent.com/PowerDNS/pdns/auth-4.1.1/modules/gsqlite3backend/schema.sqlite3.sql"
sqlite3 $DBF < /tmp/schema.sqlite3.sql
chmod 666 $DBF
sed -i -e "s/^api-key=.*$/api-key=${KEY}/" /etc/pdns/pdns.conf
sed -i -e "s/^gsqlite3-database=.*$/gsqlite3-database=${DBF//\//\\/}/" /etc/pdns/pdns.conf
fi
exec "$@"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.