blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
88e7c893d8b806fd265d7dee0e8dd37df2b6b148
|
Shell
|
purpleposeidon/FzDocs
|
/update.sh
|
UTF-8
| 280
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ ! -e /var/www/FzDocs ]; then
echo "not exported"
exit
fi
git checkout gh-pages
rm -rf * #Deletes this very script, but that's fine.
git checkout master *
wget -r http://localhost:8000/FzDocs/ || true
mv localhost*/FzDocs/* ./
rm -rf localhost*
| true
|
c38dc037a135ab54c57860d954158953b5870b78
|
Shell
|
fyfh/aria2-config
|
/autoupload1.sh
|
UTF-8
| 1,075
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
path="$3" #取原始路径,我的环境下如果是单文件则为/data/demo.png,如果是文件夹则该值为文件夹内某个文件比如/data/a/b/c/d.jpg
downloadpath='/usr/local/caddy/www/aria2/Download' #下载目录
rclone1='m:skr' #rclone挂载的目录1
rclone2='n:' #rclone挂载的目录2
if [ $2 -eq 0 ] #下载文件为0跳出脚本
then
exit 0
fi
while true; do #提取下载文件根路径,如把/data/a/b/c/d.jpg变成/data/a
filepath=$path
path=${path%/*};
if [ "$path" = "$downloadpath" ] && [ $2 -eq 1 ]
then
rm '${filepath}.aria2'
rclone copy -v "${filepath}" "${rclone1}"
rclone copy -v "${filepath}" "${rclone2}"
rm -rf "${filepath}"
exit 0
elif [ "$path" = "$downloadpath" ] #文件夹
then
rm '${filepath}.aria2'
rclone copy -v "${filepath}" "${rclone1}/${filepath#${downloadpath}/}"
rclone copy -v "${filepath}" "${rclone2}/${filepath#${downloadpath}/}"
rm -rf "${filepath}"
exit 0
fi
done
| true
|
21a9a08161142578f0dcf7c220840f3e41782b63
|
Shell
|
otaviocarvalho/energy-project
|
/spark/energy_project/launch_local.sh
|
UTF-8
| 1,355
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
source ~/.bashrc
PROJECT_HOME="/home/omcarvalho/tcc/project/spark/energy_project/"
# zookeeper
nohup $ZOOKEEPER_HOME/bin/zkServer.sh start $ZOOKEEPER_HOME/conf/zoo.cfg &
sleep 1
# kafka
rm -rf /tmp/kafka-logs/streaming-topic*
echo "rmr /brokers/streaming-topic" | $ZOOKEEPER_HOME/bin/zkCli.sh
sleep 1
nohup $KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties &
sleep 1
$KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic streaming-topic
## kafka-test
#echo "bla ble bli blo blu" | $KAFKA_HOME/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic streaming-topic
#$KAFKA_HOME/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic streaming-topic --from-beginning
#PID=$!
## Wait for 2 seconds
#sleep 2
## Kill it
#kill $PID
# redis
#sudo add-apt-repository ppa:chris-lea/redis-server
#sudo apt-get update
#sudo apt-get install redis-server
#redis-benchmark -q -n 1000 -c 10 -P 5
sudo service redis-server restart
redis-cli flushall
# run kafka producer
#nohup $SPARK_HOME/bin/spark-submit --class "KafkaInputProducer" target/scala-2.10/energy-projt-assembly-1.0.jar localhost:9092 streaming-topic 3 5 &
# run spark consumer
#$SPARK_HOME/bin/spark-submit --class "KafkaReceiver" target/scala-2.10/energy-project-assembly-1.0.jar
| true
|
4e0bea51c2889b034f8c8735a916f6b587d18d69
|
Shell
|
bryancarl7/gpu-bot
|
/start.sh
|
UTF-8
| 1,946
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
if [ "$1" == "-skip" ] ; then
# This skip is to set the flags to a switch
python=1
req=1
logging=1
else
# To ensure that Mac OS doesnt have trouble multi threading
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
echo "------------------------- Attention -------------------------------"
echo " If this is your first installation, you may have to restart "
echo " your terminal, after this preliminary installation"
echo "-------------------------------------------------------------------"
# Setup files and logs that we will need
mkdir tmp/
touch tmp/bot.log
echo "------------------------- Step 1/4 --------------------------------"
echo " Succesfully setup 'tmp' directory and logging files"
echo "-------------------------------------------------------------------"
logging=1
python=0
req=0
fi
# Checks for correct python version
if [[ "$python" == 0 ]] ; then
version=(python3 -c 'import sys; print(sys.version_info[:])')
if [[ -z "$version" ]] ; then
echo "Requires Python 3.7.5 or higher to run"
exit 1;
else
echo "------------------------- Step 2/4 --------------------------------"
echo " Succesfully Verified Python3 Installation"
echo "-------------------------------------------------------------------"
python=1
fi
fi
# Pip installs any python package required from "requirements.txt"
if [ "$req" != 1 ] ; then
while read -r p; do
pip3 install "$p"
done < requirements.txt
req=1
echo "--------------------------- Step 3/4 ------------------------------"
echo " Succesfully Installed Python3 Pip Requirements"
echo "-------------------------------------------------------------------"
fi
# Try to kickstart Python Server:
echo "--------------------------- Step 4/4 ------------------------------"
echo " ...Launching Flask Server..."
echo "-------------------------------------------------------------------"
python3 scraper.py
| true
|
2c5f1910374469e3a1084788774e37b60209d08f
|
Shell
|
yihanzhen/google-photos-uploader
|
/scripts/workflow.sh
|
UTF-8
| 1,106
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# handle arguments
if [ "${#}" -ne 5 ]; then
echo "Usage: workflow fromdir processeddir destdir time location"
exit 1
fi
fromdir=$1
processeddir=$2
destdir=$3
time=$4
location=$5
# build
mvn install
jarname=target/google-photos-uploader-1.0-SNAPSHOT-jar-with-dependencies.jar
mainclass=com.hzyi.google.photos.uploader.UploaderMain
# sanitize
java -cp "${jarname}" "${mainclass}" sanitize -r -dir "${fromdir}" -p "${processeddir}" -dest "${destdir}"
if [ "${?}" -ne 0 ]; then
echo "sanitize failed"
exit 1
fi
# upload
java -cp "${jarname}" "${mainclass}" upload -a -dir "${destdir}original/jpeg/" -ab original -t "${time}" -l "${location}"
if [ "${?}" -ne 0 ]; then
echo "upload original failed"
exit 1
fi
java -cp "${jarname}" "${mainclass}" upload -a -dir "${destdir}edited/jpeg/" -ab processed -t "${time}" -l "${location}"
if [ "${?}" -ne 0 ]; then
echo "upload processed failed"
exit 1
fi
java -cp "${jarname}" "${mainclass}" upload -a -dir "${destdir}edited/raw/" -ab raw -t "${time}" -l "${location}"
if [ "${?}" -ne 0 ]; then
echo "upload raw failed"
exit 1
fi
| true
|
d29d04bdbc1e55038100cf60e4cf14b47a6a6c81
|
Shell
|
woshimayi/driver
|
/tools/gdb-dof/coredumpBcmGpon
|
UTF-8
| 1,549
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#run coredump parse command for BCM GPON product automatically
#created by chenxi 2022-01-24
usage()
{
cat << EOF
coredumpBcmGpon [your-core-dump-file]
Note:
1.[your-core-dump-file] is optional if current direcotry exists [your-core-dump-file](prefix with core-svn-*).
2.fs.install files is required in current direcotry(images/fs.install).
Try to untar SVN_*_fs_install.tgz if it doesn't exists.
EOF
}
create_image_files()
{
TGZFILENUM=`find ./ -name "SVN_*.tgz" | wc -l`
if [ "${TGZFILENUM}" != "1" ]; then
echo error! need one and only one tgz file in current directory.
exit -1
fi
echo ${TGZFILENUM}
tar xzvf `find ./ -name "SVN_*.tgz"`
}
check_image_dir()
{
if [ ! -d ./images ]; then
echo create images/fs.install files
create_image_files
fi
}
COREDUMPFILE=""
check_coredump_file()
{
COREDUMPFILENUM=`find ./ -name "core-svn*" | wc -l`
echo ${COREDUMPFILE}
if [ "${COREDUMPFILENUM}" != "1" ]; then
echo error! need one and only one core-svn file in current directory.
exit -1
fi
COREDUMPFILE=`find ./ -name "core-svn*"`
FILE=`echo ${COREDUMPFILE} | awk -F '-' '{print $NF}'`
}
if [ "$#" == "1" ]; then
case $1 in
h|help|-h|-help)
usage
exit 0
esac
fi
case $# in
0)
check_image_dir
check_coredump_file
;;
1)
check_image_dir
COREDUMPFILE=$1
FILE=`echo ${COREDUMPFILE} | awk -F '-' '{print $NF}'`
;;
*)
usage
exit 0
;;
esac
echo hgcoredump images/fs.install/bin/${FILE} ${COREDUMPFILE} images/fs.install/
hgcoredump images/fs.install/bin/${FILE} ${COREDUMPFILE} images/fs.install/
| true
|
2f8bbd07df974b6e372782ee06887746aa58b61b
|
Shell
|
ngtankhoa/doan-power-frontend
|
/_docker/run-container.sh
|
UTF-8
| 829
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
# Print out current command and exit as soon as any line in the script fails
set -ex
source ./constants.sh .
NODE_ENV="${1:-production}"
if [ "$NODE_ENV" = "production" ]; then
docker run \
-d \
--name=$IMAGE_NAME \
--hostname=${WEB_NAME} \
--network=${NETWORK} \
--env-file=${ENV_FILE} \
-e VIRTUAL_HOST=${WEB_VIRTUAL_HOST} \
-p 3001:80 \
$IMAGE_NAME:$VERSION
elif [ "$NODE_ENV" = "development" ]; then
# VERSION="$VERSION-dev"
# docker run \
# --rm -it \
# --name web \
# --env-file=${ENV_FILE} \
# --volume ${PWD}:/usr/src/app \
# --volume /usr/src/app/node_modules \
# -p 3000:3000 \
# $IMAGE_NAME:$VERSION
echo "WIP Not support yet, please run production"
fi
# Debug if something wrong - exit code 1 @@!
# docker run -it --entrypoint /bin/bash $IMAGE_NAME:$VERSION -s
| true
|
0c800a4ad71ba8895984e150c143d7e61418c814
|
Shell
|
cryptooman/dcos-php-api
|
/scripts/docker_gc.sh
|
UTF-8
| 4,973
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# Docker garbage collection
DRY_RUN=0
for arg in "$@"
do
case $arg in
--dry-run=*)
DRY_RUN="${arg#*=}"
shift
;;
*)
echo "Unknown option [$arg]. Usage /bin/bash $0 [--dry-run=1]"
exit 1
;;
esac
done
echo 'Started at '`date '+%Y-%m-%d %H:%M:%S'`
# Unmount unused (stuck) overlays
echo 'Unmount unused overlays'
# Get all mounted overlays (in case of new overlays will be mounted during the script processing these overlays will not be affected)
i=0
for overlay in `df -h | grep overlay | sed -r 's/^.+(\/var\/lib\/docker\/overlay\/.+)$/\1/'`
do
mounted_overlays_path[$i]=$overlay
mounted_overlays_hash[$i]=`echo $overlay | sed -r 's/\/var\/lib\/docker\/overlay\/([a-z0-9]+).+/\1/'`
i=$((i+1))
done
echo 'Mounted overlays: '${#mounted_overlays_path[@]}
# Get overlays for active containers
active_containers=`docker ps -a -q -f status=running -f status=created -f status=restarting -f status=removing -f status=paused`
i=0
for container_id in $active_containers
do
active_overlays_path[$i]=`docker inspect $container_id | grep 'MergedDir' | sed -r 's/.+(\/var\/lib\/docker\/overlay\/[^\/]+\/merged).+/\1/'`
active_overlays_hash[$i]=`echo ${active_overlays_path[$i]} | sed -r 's/\/var\/lib\/docker\/overlay\/([a-z0-9]+).+/\1/'`
i=$((i+1))
done
echo 'Active overlays: '${#active_overlays_path[@]}
# Get unused overlays
i=0
for m_hash in ${mounted_overlays_hash[@]}
do
active=0
for a_hash in ${active_overlays_hash[@]}
do
if [ $a_hash == $m_hash ]
then
active=1
fi
done
if [ $active -eq 0 ]
then
unused_overlays_path[$i]=${mounted_overlays_path[$i]}
unused_overlays_hash[$i]=${mounted_overlays_hash[$i]}
i=$((i+1))
fi
done
echo 'Unused overlays: '${#unused_overlays_path[@]}
if [ ${#unused_overlays_path[@]} -ge 1 ]
then
# Unmount unused overlays
for i in "${!unused_overlays_path[@]}"
do
echo 'Unmounting overlay: '${unused_overlays_hash[$i]}
if [ $DRY_RUN -eq 0 ]
then
echo "exec: umount ${unused_overlays_path[$i]}"
umount ${unused_overlays_path[$i]}
else
echo "dry-run: umount ${unused_overlays_path[$i]}"
fi
done
echo 'Done'
fi
# Remove unused containers
echo 'Removing unused containers'
for container_id in `docker ps -a -q -f status=exited -f status=dead`
do
if [ $DRY_RUN -eq 0 ]
then
echo "exec: docker rm $container_id"
docker rm $container_id
else
echo "dry-run: docker rm $container_id"
fi
done
# Remove dangling (untagged) images
echo 'Removing dangling images'
images=`docker images -qa -f dangling=true`
if [ "$images" ]
then
if [ $DRY_RUN -eq 0 ]
then
echo "exec: docker rmi $images"
docker rmi $images
else
echo "dry-run: docker rmi $images"
fi
fi
# Remove unused layers
# NOTE: Be carefull when doing manual manipulation with /var/lib/docker/overlay/*, as it can break docker and require reinstall
echo 'Removing unused layers'
images=`docker images -qa`
if [ "$images" ]
then
for layer in `ls /var/lib/docker/image/overlay/layerdb/sha256/`
do
image_layer=`cat /var/lib/docker/image/overlay/layerdb/sha256/$layer/diff`
active=0
for image_id in $images
do
docker inspect --format "{{ .RootFS.Layers }}" $image_id | tr ' ' "\n" | grep $image_layer 1>/dev/null
if [ $? -eq 0 ]
then
active=1
break
fi
done
if [ $active -eq 0 ]
then
layer_path="/var/lib/docker/image/overlay/layerdb/sha256/$layer"
root_overlay_hash=`cat $layer_path/cache-id`
root_overlay_path="/var/lib/docker/overlay/$root_overlay_hash/root"
echo "Removing layer: $image_layer -> $root_overlay_path"
if [ $DRY_RUN -eq 0 ]
then
echo "exec: rm -rf $root_overlay_path && rm -rf $layer_path"
rm -rf $root_overlay_path && rm -rf $layer_path
else
echo "dry-run: rm -rf $root_overlay_path && rm -rf $layer_path"
fi
fi
done
fi
# Remove container's old logs
LOGS_LIFETIME_MINUTES=$((1440*3)) # 3 days
echo "Removing container's old logs"
if [ "$(ls /var/lib/docker/containers/*/*-json.log 2>/dev/null)" ]
then
if [ $DRY_RUN -eq 0 ]
then
echo "exec: find /var/lib/docker/containers/*/*-json.log -cmin +$LOGS_LIFETIME_MINUTES -exec sh -c \"rm -rf {} && echo 'Removed '{}\" \;"
find /var/lib/docker/containers/*/*-json.log -cmin +$LOGS_LIFETIME_MINUTES -exec sh -c "rm -rf {} && echo 'Removed '{}" \;
else
echo "dry-run: find /var/lib/docker/containers/*/*-json.log -cmin +$LOGS_LIFETIME_MINUTES -exec sh -c \"rm -rf {} && echo 'Removed '{}\" \;"
fi
fi
echo 'Completed at '`date '+%Y-%m-%d %H:%M:%S'`
echo ''
| true
|
c7658efdabebcdbce946384f16910f3eb175264c
|
Shell
|
ncode3/content-google-cloud-engineer
|
/products/deploy/deploy.sh
|
UTF-8
| 967
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# Import the settings from the common settings file
source ../../common/project_settings.sh
bash ../build/build.sh
# The tag la-ace-products:0.1 is set in the build file.
# If you change it here, change it there to match.
docker tag la-ace-products:0.1 "gcr.io/$PROJECT_NAME/products"
docker push "gcr.io/$PROJECT_NAME/products"
# Authenticate kubectl
gcloud container clusters get-credentials $PRODUCT_CLUSTER_NAME --zone $PROJECT_ZONE --project $PROJECT_NAME
# Create a secret from the service account JSON file.
# This is an easy way to create or update a secret.
# From some awesome internet person
# https://stackoverflow.com/questions/45879498/how-can-i-update-a-secret-on-kuberenetes-when-it-is-generated-from-a-file
kubectl create secret generic service-account-file \
--from-file=../app/secrets/service_account.json \
--dry-run -o yaml | kubectl apply -f -
kubectl apply -f workload.yaml
kubectl apply -f service.yaml
| true
|
085fc5f32cb52d2f58887106782aa11e80612c52
|
Shell
|
mangalbhaskar/maskrcnn_sophisticate-
|
/scripts/lscripts/cuda-10.0-with-tensorrt.aptget-install.sh
|
UTF-8
| 4,042
| 3.078125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function cuda_install_dockerfile() {
sudo apt -s purge 'cuda*'
sudo apt -s purge 'cudnn*'
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
gnupg2 curl ca-certificates && \
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub | sudo apt-key add - && \
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" | sudo tee /etc/apt/sources.list.d/cuda.list && \
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" | sudo tee /etc/apt/sources.list.d/nvidia-ml.list
local CUDA_VER="10.0"
local CUDA_PKG="${CUDA_VER}.130-1"
local CUDA_REL=$(echo ${CUDA_VER} | tr . -) ## 10-0
local CUDA_VERSION=${CUDA_VER}
local CUDA_PKG_VERSION="${CUDA_REL}=${CUDA_PKG}"
local cuDNN_VER=7
local CUDNN_MAJOR_VERSION=${cuDNN_VER}
local CUDNN_VERSION=7.6.4.38
local NCCL_VERSION=2.4.8
local TENSORRT_VER=5
local LIBNVINFER_VER=5.1.5-1+cuda${CUDA_VER}
# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
cuda-cudart-${CUDA_PKG_VERSION} \
cuda-compat-${CUDA_VER}
sudo ln -s cuda-${CUDA_VER} /usr/local/cuda
# Required for nvidia-docker v1
sudo echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
sudo echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:$LD_LIBRARY_PATH
# nvidia-container-runtime
export NVIDIA_VISIBLE_DEVICES=all
export NVIDIA_DRIVER_CAPABILITIES=compute,utility
export NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_VER} brand=tesla,driver>=384,driver<385 brand=tesla,driver>=410,driver<411"
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
cuda-libraries-${CUDA_PKG_VERSION} \
cuda-nvtx-${CUDA_PKG_VERSION} \
libnccl2=$NCCL_VERSION-1+cuda${CUDA_VER} && \
sudo apt-mark hold libnccl2
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
libcudnn7=$CUDNN_VERSION-1+cuda${CUDA_VER}
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
cuda-nvml-dev-${CUDA_PKG_VERSION} \
cuda-command-line-tools-${CUDA_PKG_VERSION} \
cuda-libraries-dev-${CUDA_PKG_VERSION} \
cuda-minimal-build-${CUDA_PKG_VERSION} \
libnccl-dev=$NCCL_VERSION-1+cuda${CUDA_VER}
export LIBRARY_PATH=/usr/local/cuda/lib64/stubs:$LIBRARY_PATH
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
libcudnn7=$CUDNN_VERSION-1+cuda${CUDA_VER} \
libcudnn7-dev=$CUDNN_VERSION-1+cuda${CUDA_VER}
## Link the libcuda stub to the location where tensorflow is searching for it and reconfigure
## dynamic linker run-time bindings
sudo ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1
sudo echo "/usr/local/cuda/lib64/stubs" > /etc/ld.so.conf.d/z-cuda-stubs.conf
sudo ldconfig
sudo apt-get install -y --no-install-recommends \
libnvinfer${TENSORRT_VER}=${LIBNVINFER_VER} \
libnvinfer-dev=${LIBNVINFER_VER}
## Tensorflow specific configuration
## https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/dockerfiles/dockerfiles/devel-gpu-jupyter.Dockerfile
# Configure the build for our CUDA configuration.
export CI_BUILD_PYTHON=3
export LD_LIBRARY_PATH=/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH
export TF_NEED_CUDA=1
export TF_NEED_TENSORRT=1
export TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0
export TF_CUDA_VERSION=${CUDA_VERSION}
export TF_CUDNN_VERSION=${CUDNN_MAJOR_VERSION}
export DEBIAN_FRONTEND=noninteractive
export FORCE_CUDA="1"
export TORCH_CUDA_ARCH_LIST="Kepler;Kepler+Tesla;Maxwell;Maxwell+Tegra;Pascal;Volta;Turing"
}
cuda_install_dockerfile
| true
|
36ab3a12a9a4821a874af001b0cdc95c2488d6d0
|
Shell
|
steerforth/docker-redis-cluster
|
/redis_cluster_failover.sh
|
UTF-8
| 850
| 3.046875
| 3
|
[] |
no_license
|
n=5
port=7001
query=`redis-cli -c -p $port cluster nodes |awk '{print $3}'`
########
#echo ${query}
for ((i=0;i<$n;i++));
do
if [[ $query == *fail* ]]
then
echo "[exist cluster node fail or pfail!!!]"
sleep 1
else
result=`redis-cli -c -p $port cluster failover`
######
#echo $result
if [[ $result == "ERR You should send CLUSTER FAILOVER to a slave" ]]
then
echo "no need to redis cluster failover"
else
echo "redis cluster failover:$result"
fi
for ((j=0;j<$n;j++));
do
endQuery=`redis-cli -c -p $port cluster nodes|awk 'BEGIN {count=0;} {ip[count] = $2;name[count] = $3;count++;}; END{for (i = 0; i < NR; i++) if (match(name[i],"myself,master")){print ip[i],name[i];break}}'`
if [[ $endQuery != "" ]]
then
echo "result====>>>>"$endQuery
break;
fi
echo "sleep 2..."
sleep 2
done
break;
fi
done
| true
|
7efb7104d9e633689194f6efdd1d4e19bce6db09
|
Shell
|
xhkyyy/mongosh
|
/.evergreen/setup-env.sh
|
UTF-8
| 1,724
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
set -e
set -x
export BASEDIR="$PWD/.evergreen"
export PATH="$BASEDIR/mingit/cmd:$BASEDIR/mingit/mingw64/libexec/git-core:$BASEDIR/git-2:$BASEDIR/node-v$NODE_JS_VERSION-win-x64:/opt/python/3.6/bin:/opt/chefdk/gitbin:/cygdrive/c/Python39/Scripts:/cygdrive/c/Python39:/cygdrive/c/cmake/bin:/opt/mongodbtoolchain/v3/bin:$PATH"
export IS_MONGOSH_EVERGREEN_CI=1
if [ "$OS" != "Windows_NT" ]; then
if which realpath; then # No realpath on macOS, but also not needed there
export HOME="$(realpath "$HOME")" # Needed to de-confuse nvm when /home is a symlink
fi
export NVM_DIR="$HOME/.nvm"
echo "Setting NVM environment home: $NVM_DIR"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
nvm use $NODE_JS_VERSION
export PATH="$NVM_BIN:$PATH"
export CC=gcc
export CXX=c++
echo "Using gcc version:"
gcc --version
echo "Using g++ version:"
g++ --version
if [ -x "$BASEDIR/git-2/git" ]; then
export GIT_EXEC_PATH="$BASEDIR/git-2"
fi
fi
export EVERGREEN_EXPANSIONS_PATH="$BASEDIR/../../tmp/expansions.yaml"
if [ "$OS" == "Windows_NT" ]; then
export EVERGREEN_EXPANSIONS_PATH="$(cygpath -w "$EVERGREEN_EXPANSIONS_PATH")"
fi
# On RHEL hosts, we run as root for some reason
if [ `uname` = Linux ]; then
export npm_config_unsafe_perm=true
fi
export npm_config_registry=https://registry.npmjs.org/
echo "Running on:"
uname -a
echo "Full path:"
echo $PATH
echo "Using node version:"
node --version
echo "Using npm version:"
npm --version
echo "Using git version:"
git --version
echo "Using python version:"
python --version
echo "Node.js OS info:"
node -p '[os.arch(), os.platform(), os.endianness(), os.type(), os.release()]'
echo "/etc/os-release contents:"
cat /etc/os-release || true
| true
|
a9b685d2230d75180b3dd41641d80096ff03d002
|
Shell
|
latifkabir/Computation_using_Fortran90
|
/mxm/mxm.sh
|
UTF-8
| 308
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#
gfortran -c mxm.f90 >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling mxm.f90"
exit
fi
rm compiler.txt
#
gfortran mxm.o
if [ $? -ne 0 ]; then
echo "Errors linking and loading mxm.o"
exit
fi
rm mxm.o
#
mv a.out ~/bin/$ARCH/mxm
#
echo "Executable installed as ~/bin/$ARCH/mxm"
| true
|
08d71b3c1cb9b8b1dddcdb128455f6cb98d40958
|
Shell
|
obana2010/snorttest
|
/testinclude.sh
|
UTF-8
| 716
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
. envinclude.sh
export nodecount=200
export nodeinit=10000
export NODEMAX=$(($nodeinit+$nodecount))
# test timeslot size
export TIMESLOTSIZE=10
export timeslot_test_start=0 # always 0
export timeslot_test_end=30
export clients_count=200; # number of clients
export timeslots_client_attack_continue=30; # timeslot count a client continue to attack
export attacks_per_timeslot=1;
export attack_timeslots=1; # a client attack every $attack_timeslots timeslots
export client_random_ratio=50; # randam client percentage
export avg_domainlist_ratio=0.4;
#export avg_domainlist_count=$(($nodecount*$avg_domainlist_ratio)); # 平均ドメイン内ノード数
export domains_count=20; # number of domains
| true
|
bb00cb1fe7704c1b7207503c0731dba433f9b2d3
|
Shell
|
joefutrelle/pocean-core
|
/docs/deploy.sh
|
UTF-8
| 265
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -ev
if [ "$TRAVIS_PYTHON_VERSION" == "3.5" ]; then
cd docs
conda install --file requirements.txt
sphinx-apidoc -M -f -o api ../pocean ../pocean/tests
make html
doctr deploy --built-docs=_site/html --gh-pages-docs .
cd ..
fi
| true
|
7e0ff4acc9ef718fa95f5eae2b16eafb02b5d02c
|
Shell
|
climbcomp/climbcomp-api
|
/scripts/autocomplete
|
UTF-8
| 614
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
: "${PROG:=$(basename "${BASH_SOURCE[0]}")}"
_cli_bash_autocomplete() {
if [[ "${COMP_WORDS[0]}" != "source" ]]; then
local cur opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
if [[ "$cur" == "-"* ]]; then
opts=$( "${COMP_WORDS[@]:0:$COMP_CWORD}" "${cur}" --generate-bash-completion )
else
opts=$( "${COMP_WORDS[@]:0:$COMP_CWORD}" --generate-bash-completion )
fi
# shellcheck disable=2207
COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") )
return 0
fi
}
complete -o bashdefault -o default -o nospace -F _cli_bash_autocomplete "$PROG"
unset PROG
| true
|
9dfb3773f8ac50fd463bdcccf35b50204b5aa039
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/kcm-imchooser-frameworks/PKGBUILD
|
UTF-8
| 1,446
| 2.578125
| 3
|
[] |
no_license
|
# Mantainer maz-1 < ohmygod19993 at gmail dot com >
pkgname=kcm-imchooser-frameworks
pkgver=0.1
pkgrel=2
pkgdesc="A input method starter and configure tool. It can help you set annoying system environment variable. Unofficial KF5 port."
arch=('i686' 'x86_64')
url='http://kde-apps.org/content/show.php/kcm+imchooser?content=146776'
license=('GPL')
provides=kcm-imchooser
conflicts=kcm-imchooser
install=kcm-imchooser.install
depends=('qt5-base'
'kio' 'kdelibs4support')
makedepends=('extra-cmake-modules' 'git' 'kdoctools')
source=("http://kde-apps.org/CONTENT/content-files/146776-kcm_imchooser-${pkgver}.tar.bz2"
"port_to_kf5.patch"
"imchooser-helper.sh"
"imchooser-helper.service")
groups=('plasma')
md5sums=('3015a7e5fb2e28bcf9bb413d6c004ab0'
'SKIP'
'SKIP'
'SKIP')
prepare() {
rm -rf build
mkdir -p build
cd kcm_imchooser-$pkgver
patch -p1 -i "$srcdir/port_to_kf5.patch"
}
build() {
cd build
cmake ../kcm_imchooser-$pkgver \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLIB_INSTALL_DIR=lib \
-DKDE_INSTALL_USE_QT_SYS_PATHS=ON \
-DSYSCONF_INSTALL_DIR=/etc \
-DBUILD_TESTING=OFF
make
}
package() {
cd build
make DESTDIR="$pkgdir" install
install -Dm755 "$srcdir/imchooser-helper.sh" "$pkgdir/usr/share/imchooser/imchooser-helper"
install -Dm755 "$srcdir/imchooser-helper.service" "$pkgdir/usr/lib/systemd/system/imchooser-helper.service"
}
| true
|
46494d7b6627721fa39be01ecd67018daf173751
|
Shell
|
anilsarma/misc
|
/njt/jenkins.script.sh
|
UTF-8
| 430
| 2.90625
| 3
|
[] |
no_license
|
#git clone git@github.com:anilsarma/misc.git
cd misc/njt
git pull
before=`git log --pretty='%H' master|head -1`
python check_for_changes.py
after=`git log --pretty='%H' master|head -1`
EMAIL=<>
# send out and email if the version file has been updated.
if [ "$before" != "$after" ]; then
python ../google/gmail/gmail_send.py --to "$EMAIL" --from "$EMAIL" --subject "updated NJ Transit" --body ./version.txt;
fi
cat ./version.txt
| true
|
b4e4c3769d8f0627c7e51246dac77be3975e64f8
|
Shell
|
rpdroky/android_device_samsung_i9300
|
/configs/configure_zram
|
UTF-8
| 814
| 3.453125
| 3
|
[] |
no_license
|
#!/system/bin/sh
set -e
ZRAM_DEVICES="zram0 zram1 zram2 zram3"
ZRAM_SIZE="157286400" # Per each ZRAM_DEVICE, 150 MB (150 * 1048576)
SWAPPINESS="80"
SWAP_BIN="swapon"
SWAP_CMD="-p 2"
if ! swapon 2>&1 | grep -qi "\-p pri"; then # If swapon doesn't support priority
if ! busybox swapon 2>&1 | grep -qi "\-p pri"; then # If busybox swapon doesn't support priority
SWAP_CMD="" # Disable priority
else
SWAP_BIN="busybox swapon" # Change binary to busybox swapon, as it supports priority
fi
fi
for ZRAM_DEVICE in $ZRAM_DEVICES; do
if [[ -e "/dev/block/$ZRAM_DEVICE" && -e "/sys/block/$ZRAM_DEVICE" ]]; then
echo "$ZRAM_SIZE" > "/sys/block/$ZRAM_DEVICE/disksize"
mkswap "/dev/block/$ZRAM_DEVICE"
$SWAP_BIN $SWAP_CMD "/dev/block/$ZRAM_DEVICE"
fi
done
echo "$SWAPPINESS" > /proc/sys/vm/swappiness
exit 0
| true
|
a8ba9f985199b9f283e3e1abc185a4fcc231e317
|
Shell
|
deapplegate/wtgpipeline
|
/non_essentials/postH_preprocess_2015-12-15_W-S-Z+-superflats-finish_FRINGE.sh
|
UTF-8
| 3,858
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
set -xv
### superscript template to do the preprocessing
. progs.ini > /tmp/progs.out 2>&1
REDDIR=`pwd`
####################################################
### the following need to be specified for each run
####################################################
export SUBARUDIR=/gpfs/slac/kipac/fs1/u/awright/SUBARU
export BONN_TARGET=${run}
export BONN_FILTER=${filter}
export INSTRUMENT=SUBARU
SKYBACK=256 # in case of SKYFLAT: size of background mesh for superflat
# illumination construction
# use 256 if no "blobs" due to stars are visible (in BVR?)
# 16 (or 32) if lots of blobs
pprun="2015-12-15_W-S-Z+"
#in 2015-12-15_W-J-B 2015-12-15_W-S-Z+ 2015-12-15_W-C-RC 2013-06-10_W-S-Z+ 2012-07-23_W-C-RC 2010-11-04_W-J-B 2010-11-04_W-S-Z+ 2010-03-12_W-C-RC 2010-03-12_W-J-B 2010-03-12_W-S-Z+ 2009-09-19_W-J-V 2009-04-29_W-J-B 2009-04-29_W-S-Z+ 2009-03-28_W-J-V
filter=${pprun#2*_}
run=${pprun%_*}
SET=SET1 # sets time period of flat to use
#this sets: FLAT= # SKYFLAT or DOMEFLAT
if [ -d ${SUBARUDIR}/${run}_${filter}/DOMEFLAT ]; then
FLAT=DOMEFLAT
elif [ -d ${SUBARUDIR}/${run}_${filter}/SKYFLAT ]; then
FLAT=SKYFLAT
else
continue
fi
SCIENCEDIR=SCIENCE_${FLAT}_${SET}
./setup_SUBARU.sh ${SUBARUDIR}/${run}_${filter}/SCIENCE/ORIGINALS
. ${INSTRUMENT:?}.ini > /tmp/instrum.out 2>&1
# this sets: config="10_3"
#adam# fringing correction for Z band only
FRINGE=NOFRINGE
if [ "${filter}" == "W-S-Z+" ] ; then
FRINGE="FRINGE"
fi
SCIENCEDIR=SCIENCE_${FLAT}_${SET}
if [ ${FRINGE} == "FRINGE" ]; then
ending="OCFSF"
elif [ ${FRINGE} == "NOFRINGE" ]; then
ending="OCFS"
else
echo "You need to specify FRINGE or NOFRINGE for the fringing correction!"
exit 2;
fi
#questions#
# * what is eclipse anyway? -> this is the software that messes with stuff (does the Overscan, Cut/Trim, Flat-fielding, Renormalizing, etc.)
# * what is RESCALE? (In process_sub_images_para.sh it determines FLAT properties) -> it's just the renormalizing thing
# * should I even run lines like "./create_binnedmosaics.sh ${SUBARUDIR}/${run}_${filter} $SCIENCEDIR $SCIENCEDIR "_fringe${SKYBACK}" 8 -32" if there is no fringing correction? -> No, you don't have to run them
# * illum isn't the IC, so what is it? -> Its the superflat
# * is the superflat correction supposed to be run only on the OC.fits, not OCF.fits? -> run it on the OCF files
#for other example see: ~/thiswork/preprocess_scripts/do_Subaru_preprocess_2007-07-18_W-J-V.sh #
if [ ${FRINGE} == "FRINGE" ]; then
./create_binnedmosaics.sh ${SUBARUDIR}/${run}_${filter} $SCIENCEDIR $SCIENCEDIR "_fringe${SKYBACK}" 8 -32
fi
exit_stat=$?
if [ "${exit_stat}" -gt "0" ]; then
exit ${exit_stat};
fi
if [ ${FRINGE} == "FRINGE" ]; then
./create_binnedmosaics.sh ${SUBARUDIR}/${run}_${filter} ${SCIENCEDIR}_norm ${SCIENCEDIR} "_fringe${SKYBACK}" 8 -32
fi
exit_stat=$?
if [ "${exit_stat}" -gt "0" ]; then
exit ${exit_stat};
fi
### Apply Corrections to Science Data
if [ ${FRINGE} == "NOFRINGE" ]; then
#adam: this only does the superflat (aka the "illum")
./parallel_manager.sh ./process_science_illum_eclipse_para.sh ${SUBARUDIR}/${run}_${filter} ${SCIENCEDIR}_norm RESCALE ILLUM ${SKYBACK} ${SCIENCEDIR}
else
./parallel_manager.sh ./process_science_illum_fringe_eclipse_para.sh ${SUBARUDIR}/${run}_${filter} ${SCIENCEDIR}_norm RESCALE ${SKYBACK} ${SCIENCEDIR}
fi
exit_stat=$?
if [ "${exit_stat}" -gt "0" ]; then
exit ${exit_stat};
fi
./create_binnedmosaics.sh ${SUBARUDIR}/${run}_${filter} ${SCIENCEDIR} SUP ${ending} 8 -32
exit_stat=$?
if [ "${exit_stat}" -gt "0" ]; then
exit ${exit_stat};
fi
exit 0;
| true
|
17987dea113e07ca33798e3f0a8b39c4598e2d5c
|
Shell
|
pchemguy/sqliteodbc
|
/mkall.sh
|
UTF-8
| 2,465
| 3.265625
| 3
|
[
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
#
# Build everything for Win32/Win64
rm -rf dist
mkdir dist
exec >mkall.log 2>&1
set -x
if test -r VERSION ; then
VER32=$(tr -d '.' <VERSION)
VER=$(cat VERSION)
else
VER32="0"
VER="0.0"
fi
if test $(arch) = "x86_64" ; then
CC32="gcc -m32 -march=i386 -mtune=i386"
SH32="linux32 sh"
else
CC32=gcc
SH32=sh
fi
case "$REL" in
[yYTt1-9]*)
MVER32=""
;;
*)
MVER32="-$VER32"
;;
esac
export MSVCRT=""
echo -n >/dev/tty "sqliteodbc_dl$MVER32.exe ... "
NO_SQLITE2=1 NO_TCCEXT=1 SQLITE_DLLS=2 CC=$CC32 $SH32 mingw-cross-build.sh
mv sqliteodbc.exe dist/sqliteodbc_dl$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc$MVER32.exe ..."
CC=$CC32 $SH32 mingw-cross-build.sh
mv sqliteodbc.exe dist/sqliteodbc$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc_w64_dl$MVER32.exe ..."
NO_SQLITE2=1 SQLITE_DLLS=2 sh mingw64-cross-build.sh
mv sqliteodbc_w64.exe dist/sqliteodbc_w64_dl$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc_w64$MVER32.exe ..."
sh mingw64-cross-build.sh
mv sqliteodbc_w64.exe dist/sqliteodbc_w64$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
export MSVCRT="100"
MVER32="_msvcr100$MVER32"
echo -n >/dev/tty "sqliteodbc_dl$MVER32.exe ..."
NO_SQLITE2=1 NO_TCCEXT=1 SQLITE_DLLS=2 CC=$CC32 $SH32 mingw-cross-build.sh
mv sqliteodbc.exe dist/sqliteodbc_dl$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc$MVER32.exe ..."
CC=$CC32 $SH32 mingw-cross-build.sh
mv sqliteodbc.exe dist/sqliteodbc$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc_w64_dl$MVER32.exe ..."
NO_SQLITE2=1 SQLITE_DLLS=2 sh mingw64-cross-build.sh
mv sqliteodbc_w64.exe dist/sqliteodbc_w64_dl$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc_w64$MVER32.exe ..."
sh mingw64-cross-build.sh
mv sqliteodbc_w64.exe dist/sqliteodbc_w64$MVER32.exe
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
echo -n >/dev/tty "sqliteodbc-$VER.tar.gz ..."
test -r ../sqliteodbc-$VER.tar.gz && cp -p ../sqliteodbc-$VER.tar.gz dist
if test $? = 0 ; then echo >/dev/tty OK ; else echo >/dev/tty ERROR ; fi
| true
|
ae2fd2d268eb84065acdf2560f204e90aa958f35
|
Shell
|
claudiopetrini/nginx-http2-pagespeed
|
/install.sh
|
UTF-8
| 1,648
| 3.234375
| 3
|
[] |
no_license
|
#bin/bash
#install dependencies
sudo apt-get install unzip zlibc zlib1g build-essential zlib1g-dev libpcre3 libpcre3-dev libssl-dev libxslt1-dev libxml2-dev libgd2-xpm-dev libgeoip-dev libgoogle-perftools-dev libperl-dev curl
#nginx version
NGINX_VERSION=1.12.1
#openssl version
OPENSSL_VERSION=1.0.2l
#ngx_pagespeed version
NPS_VERSION=1.9.32.3
#Directories
CURRENT_DIR=$(pwd)
OPENSSL_DIR=$(pwd)/openssl-${OPENSSL_VERSION}
#Save the nginx version
curl -O http://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz
#Extracting nginx
tar -xvf nginx-${NGINX_VERSION}.tar.gz
#Save the openssl version
curl -O https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
#Extracting openssl
tar -xvf openssl-${OPENSSL_VERSION}.tar.gz
#Save ngx_pagespeed module
wget https://github.com/pagespeed/ngx_pagespeed/archive/release-${NPS_VERSION}-beta.zip
#Extracting ngx_pagespeed module
unzip release-${NPS_VERSION}-beta.zip
#Entering in ngx_pagespeed
cd ngx_pagespeed-release-${NPS_VERSION}-beta/
#Downloading psol
wget https://dl.google.com/dl/page-speed/psol/${NPS_VERSION}.tar.gz
#Extracting psol
tar -xzvf ${NPS_VERSION}.tar.gz
# Exiting from ngx_pagespeed
cd ..
#Entering inside the nginx folder
cd nginx-${NGINX_VERSION}
#Configuring
./configure \
--prefix=/etc/nginx \
--with-http_ssl_module \
--with-http_v2_module \
--with-openssl=${OPENSSL_DIR} \
--with-cc-opt="-Wno-deprecated-declarations" \
--add-module=${CURRENT_DIR}/ngx_pagespeed-release-${NPS_VERSION}-beta
# Compiling everything on 64 bits systems
KERNEL_BITS=64 make install
#Cleaning sources
cd ${CURRENT_DIR}
rm *.tar.gz
rm -rf nginx-${NGINX_VERSION} ${OPENSSL_DIR}
| true
|
7d9e090e21607672d3b258ca90506d97cbd5ac0b
|
Shell
|
feckert/dotfiles
|
/bin/trash/graph-iptables.sh
|
UTF-8
| 392
| 3.375
| 3
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
#!/bin/sh
SRC="iptables"
DIR="/tmp"
if [ -e ${DIR}/${SRC}.txt ]; then
for t in "mangle" "filter" "nat" "raw"; do
~/bin/graph-iptables-save.pl -tables ${t} \
> ${DIR}/${SRC}-${t}.dot \
< ${DIR}/${SRC}.txt && \
dot -Tpng ${DIR}/${SRC}-${t}.dot > ${DIR}/${SRC}-${t}.png
done
else
echo "No input file ${DIR}/${SRC}.txt found"
echo "Execude: iptables-save > ${DIR}/${SRC}.txt"
fi
| true
|
11f582db41b65760e7e17859d956fef25e863a31
|
Shell
|
devilbox/devilbox-cli
|
/src/commands/config/php.sh
|
UTF-8
| 253
| 3.359375
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
get_current_php_version () {
get_readable_current_choice "PHP" "$PHP_CONFIG"
}
get_all_php_versions () {
get_readable_all_choices "PHP" "$PHP_CONFIG"
}
set_php_version () {
local new=$1
set_readable_choice "PHP" "$PHP_CONFIG" "$new"
}
| true
|
e5d7451f011c670d8eea6a8fb92b116e65e3287b
|
Shell
|
danchurch/MimulusLuteus_RNAseq
|
/RonPipe/Z40_SamtoolsMerge.sh
|
UTF-8
| 1,625
| 3.84375
| 4
|
[] |
no_license
|
## Merge BAM files. Meant to be used on sequencing data from
## 2 different lanes (i.e. L001 and L002 files). Files should already be sorted.
##
## Ron Smith - 2/13/2017
## rdsmith@email.wm.edu
##
## ------------------------------------------------------------------------
## ----- Settings ---------------------------------------------------------
## Path to main project directory
MAIN=/Volumes/Ron/3894
## Path to SAM files
SAM=$MAIN/SAMBAM
## Path to backup original BAM files
BAK=$MAIN/BAK
## Name of a log file
LOG=$MAIN/Log_Z40_SamtoolsMerge.txt
## ----- End of settings ---------------------------------------------------
## Ensure backup directory exists
mkdir $BAK
## Start log file (overwrite existing)
echo 'Starting : ' $(date) > $LOG
## Loop through all the BAM files containing L001 in the file name
for file1 in $SAM/*L001*.bam; do
## The second file is like the first, with L002 instead
file2=${file1/L001/L002}
## Output will have the same file name less the L00x part
outfile=${file1/_L001_/_}
## Add .merged to the output file name
outfile=${outfile/.bam/.merged.bam}
## Print file names to the screen
echo -e '\n'
echo -e '\tFile1:\t\t' $file1
echo -e '\tFile2:\t\t' $file2
echo -e '\tOutput:\t\t' $outfile
[ -e "$file1" ] && samtools merge $outfile $file1 $file2 || echo "Something went wrong with file " $file1 >> $LOG
echo $file1 ' : ' $(date) >> $LOG
## Move original files to BAK folder
base=${file1##*/}
mv $file1 $BAK/$base
base=${file2##*/}
mv $file2 $BAK/$base
done
echo 'Done : ' $(date) >> $LOG
| true
|
f856a56d39b3bc7f8e2d000b8c5f1293d6bea83f
|
Shell
|
amananas/tools
|
/customize/customize.sh
|
UTF-8
| 1,554
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
PROFILE_VARIABLES=(
'EDITOR="gvim"'
)
PROFILE_ALIASES=(
'drop_caches="sudo sh -c '"'"'echo 1 > /proc/sys/vm/drop_caches; echo 2 > /proc/sys/vm/drop_caches; echo 3 > /proc/sys/vm/drop_caches'"'"'"'
'git_remove_whitespaces="find . -type f -not -iwholename '"'"'*.git*'"'"' -print0 | xargs -0 perl -pi -e '"'"'s/[\t ]+$//'"'"'"'
'whereiscommand="command_not_found_handle"'
'rgrep="grep -r -n -C 2 --color"'
'tt="tree -L 3"'
'gdif="git difftool"'
'll="ls --color=auto"'
'rr="ranger"'
)
PROFILE_FUNCTIONS=(
'mak() {
make $@; notify-send "Make done."
}'
)
PROFILE_SOURCES=(
)
FILES=(
'.zshrc'
'.zshrc.pre-oh-my-zsh'
'.gitconfig'
)
# Making /etc/profile
echo -e "\n\n#\n# Global Variables\n#" >> /etc/profile
for variable in "${PROFILE_VARIABLES[@]}"; do echo "export $variable" >> /etc/profile; done
echo -e "\n\n#\n# Global Aliases\n#" >> /etc/profile
for alias in "${PROFILE_ALIASES[@]}"; do echo "alias $alias" >> /etc/profile; done
echo -e "\n\n#\n# Global functions\n#" >> /etc/profile
for function in "${PROFILE_FUNCTIONS[@]}"; do echo "function $function" >> /etc/profile; done
echo -e "\n\n#\n# Global sources\n#" >> /etc/profile
for source in "${PROFILE_SOURCES[@]}"; do echo "source $source" >> /etc/profile; done
echo -e "source /etc/profile" > $HOME/.bashrc
# Copy source files
for file in "${FILES[@]}"; do cp "$file" "$HOME/$file"; done
# Set up oh-my-zsh
here=$(pwd)
patches=$(ls "oh-my-zsh_patches")
cd $HOME/.oh-my-zsh && for patch in "$patches"; do cp $here/oh-my-zsh_patches/$patch . ; git apply $patch ; done; cd $here
| true
|
aaed2945a636aa6fa156c0c27096e5f74f7366f9
|
Shell
|
skyshaw/skynet3
|
/third_party/glog/fake_compiler.sh
|
UTF-8
| 153
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e -u
args=()
for arg in "$@"; do
case "${arg}" in
'-lgflags'|'-lgtest') :;;
*) args+=("${arg}");;
esac
done
"${args[@]}"
| true
|
6bcd00a6ab366d366b68c4d29befeff2c4eb3edb
|
Shell
|
LaurentColoma/urshi
|
/src/app/css/images/rename
|
UTF-8
| 336
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
img=0
imgs=$(ls | grep -E '^[0-9]+\.(png|jpg|jpeg)$')
while [ 1 ]
do
echo -n "level: "
read level
echo -n "images: "
read images
for nb in $(seq $images)
do
n=$(($img + $nb))
file=$(echo "$imgs" | grep -E "^$n\.")
mv "$file" "level$level-$nb.$(echo "$file" | cut -d '.' -f2)"
done
img=$(($img + $images))
done
| true
|
eb9949a483a8abb3bd37d44064cf44366e20395a
|
Shell
|
andreyvit/env
|
/bin/autoshot
|
UTF-8
| 563
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
DESTDIR=~/Workcast/Shots
PERIOD=60
DATEFMT=%Y-%m-%d_%a_%H_%M
if test "$1" = "-d"; then
PERIOD=5
DATEFMT=%Y-%m-%d_%a_%H_%M_%S_debug
fi
idle_time() {
ioreg -c IOHIDSystem | grep HIDIdleTime | head -1 | sed 's/[^0-9]//g; s,$,/1000000000,' | bc
}
capture() {
mkdir -p $DESTDIR
file=$DESTDIR/$(date "+$DATEFMT").png
screencapture -C -x $file 2>/dev/null
echo $file
}
while true; do
if test $(idle_time) -ge $PERIOD; then
echo "# $(date "+$DATEFMT") idle"
else
capture
fi
sleep $PERIOD
done
| true
|
f43e96b4fc4d18fe56476fe57f4195ee5eb3004b
|
Shell
|
KevinGrandon/run-docker-compose-buildkite-plugin
|
/hooks/command
|
UTF-8
| 1,549
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
set -ex
SCRIPT=$BUILDKITE_COMMAND
IMAGE=$BUILDKITE_PLUGIN_RUN_DOCKER_COMPOSE_IMAGE
COMPOSE_FILE=$BUILDKITE_PLUGIN_RUN_DOCKER_COMPOSE_COMPOSE_FILE
SERVICE=$BUILDKITE_PLUGIN_RUN_DOCKER_COMPOSE_SERVICE
SHOULD_BUILD=$BUILDKITE_PLUGIN_RUN_DOCKER_COMPOSE_BUILD
export PACKAGE=$BUILDKITE_PLUGIN_RUN_DOCKER_COMPOSE_PACKAGE
VERBOSE=""
if [ "$BUILDKITE_PLUGIN_RUN_DOCKER_COMPOSE_VERBOSE" == "true" ]; then
VERBOSE="--verbose"
fi
echo "--- Running ${CMD} for ${PACKAGE} in ${IMAGE}"
mkdir -p web-code-source && chmod a+w web-code-source
# pull out ci code and $PACKAGE code from the docker build image, and store in local /web-code-source folder
docker run \
-v ${PWD}/web-code-source:/web-code-source \
-i \
--rm \
-u 0 \
$IMAGE bash <<CMD
rm -rf /web-code-source/*
cp -rf /web-code/ci /web-code-source
mkdir -p /web-code-source/$(dirname $PACKAGE)
mkdir -p /web-code-source/projects
cp -rf /web-code/projects/monorepo-ci /web-code-source/projects
cp -rf /web-code/$PACKAGE /web-code-source/$PACKAGE
echo Changing owner of /web-code-source from \$(id -u):\$(id -g) to $(id -u):$(id -g)
chown -R $(id -u):$(id -g) /web-code-source
echo "chown DONE"
CMD
# copy contents of web-code-source to the working directory
cp -a web-code-source/* .
if [ -n "$SHOULD_BUILD" ]; then
IMAGE=$IMAGE docker-compose $VERBOSE -f $COMPOSE_FILE build $SERVICE
fi
if [ -z $SCRIPT ]
then
IMAGE=$IMAGE docker-compose $VERBOSE -f $COMPOSE_FILE run $SERVICE
else
IMAGE=$IMAGE docker-compose $VERBOSE -f $COMPOSE_FILE run $SERVICE bash $SCRIPT
fi
| true
|
c330ccef3470781b072f5e5a1c29ec393a685d01
|
Shell
|
marianoelsztain/Shcript
|
/ex7.sh
|
UTF-8
| 164
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
DIR=$1
if [ -d $DIR ]
then
COUNT=`ls $DIR | wc -l`
echo "O $DIR tem $COUNT arquivos"
else
echo "O argumento $DIR não é um diretorio"
fi
| true
|
c90b34f0b1c0f9f49b52ce0a5a0ec18ddf655463
|
Shell
|
fromeroj/mysql_replication_example
|
/source/provisionMaster.sh
|
UTF-8
| 433
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source /home/vagrant/source/provision.sh
if [[ ! -e /home/vagrant/.provision ]];then
update_system
setup_mysql
sudo /etc/init.d/mysqld stop
sudo sed -i 's/\[mysqld_safe\]/bind-address = 192.168.20.100\nserver-id = 100\nlog_bin = \/var\/log\/mysql\/mysql-bin.log\nbinlog_do_db = example\n[mysqld_safe]/' /etc/my.cnf
sudo /etc/init.d/mysqld start
echo "done" > /home/vagrant/.provision
fi
| true
|
c266d06bdb2ff97aac4374f2ae6f92b4adcadc34
|
Shell
|
Ju-lia/ng6-golden-layout
|
/initBuildServe.sh
|
UTF-8
| 476
| 2.75
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
npm install
ORG="@embedded-enterprises"
PKG="ng6-golden-layout"
echo "This scripts builds the projects."
echo "Building $ORG/$PKG"
ng build ee-golden-layout
echo "Copying readme"
cp README.md dist/$ORG/$PKG
echo "Repacking lib"
(cd dist/$ORG/$PKG && tar czvf ../$PKG.tgz *)
echo "Linking $ORG/$PKG"
rm -f node_modules/$ORG/$PKG
mkdir -p node_modules/$ORG
ln -s ../../dist/$ORG/$PKG node_modules/$ORG/$PKG
echo "Building testbed"
ng build testbed
ng serve
| true
|
ad20ecfbd378d3411172e52d0e7976779dd753b5
|
Shell
|
brainvisa/casa-distro
|
/etc/bash_completion.d/casa_distro-completion.bash
|
UTF-8
| 21,448
| 3.421875
| 3
|
[
"CECILL-B"
] |
permissive
|
function _complete_casa_distro_option_()
{
local word=${COMP_WORDS[COMP_CWORD]}
local opt_n=$(( COMP_CWORD - 2 ))
if [ "$word" = "=" ]; then
local word=""
local opt_n=$(( opt_n + 1 ))
fi
local opt=${COMP_WORDS[opt_n]}
# get repository location
if [ "${COMP_WORDS[1]}" = "-r" ] \
|| [ "${COMP_WORDS[1]}" = "--repository" ]; then
local CASA_BASE_DIRECTORY="${COMP_WORDS[2]}"
local cmd="${COMP_WORDS[3]}"
else
local cmd="${COMP_WORDS[1]}"
fi
# TODO: catch cmd option build_workflows_repository
if [ -z "$CASA_BASE_DIRECTORY" ]; then
local CASA_BASE_DIRECTORY="$HOME/casa_distro"
fi
local SHARE=$(realpath $(realpath $(dirname $(realpath $(which casa_distro))))/../share)
if [ -d "$SHARE"/casa-distro-* ]; then
SHARE="$SHARE"/casa-distro-*
fi
local SHARE_DIRS="${SHARE} ${CASA_BASE_DIRECTORY}/share ${HOME}/.config/casa-distro ${HOME}/.casa-distro"
case "$opt" in
distro)
local distro
if [ ! -d "$SHARE" ]; then
# no share dir (zip distrib): use builtin list
distro="brainvisa opensource cati_platform web cea"
fi
for d in ${SHARE_DIRS}; do
if [ -d "$d/distro" ]; then
for d2 in $d/distro/*/; do
distro="$distro $(basename $d2)"
done
fi
done
COMPREPLY=($(compgen -W "$distro" -- "${word}"))
;;
branch)
COMPREPLY=($(compgen -W "master integration latest_releasse release_candidate" -- "${word}"))
;;
system)
local sys
if [ ! -d "$SHARE" ]; then
# no share dir (zip distrib): use builtin list
sys="centos-7.4 ubuntu-12.04 ubuntu-14.04 ubuntu-16.04 ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 windows-7-32 windows-7-64"
else
for system in "$SHARE/docker/casa-dev"/*/; do
sys="$sys $(basename $system)"
done
fi
COMPREPLY=($(compgen -W "$sys" -- "${word}"))
;;
container_type|convert_from)
COMPREPLY=($(compgen -W "singularity docker vbox" -- "${word}"))
;;
name|environment_name)
if [ -z "$CASA_ENVIRONMENT" ]; then
local names=`casa_distro list base_directory=$CASA_BASE_DIRECTORY | grep -E -v '^( [a-z])'`
COMPREPLY=($(compgen -W "$names" -- "${word}"))
else
COMPREPLY=($(compgen -W "" -- "${word}"))
fi
;;
image|base_image|base|source)
# take existing singularity images
local images=$CASA_BASE_DIRECTORY/*.sif
# for f in $images
# do
# local b=$(basename "$f")
# local nimages="$nimages ${b:0:-4}"
# done
COMPREPLY=($(compgen -W "$images" -- "${word}"))
;;
image_names)
if [ "$cmd" = "publish_singularity" ] || [ "$cmd" = "clean_images" ]; then
# take existing singularity images
local images=$CASA_BASE_DIRECTORY/*.simg
fi
if [ "$cmd" = "create_docker" ]; then
local nimages
if [ ! -d "$SHARE" ]; then
# no share dir (zip distrib): use builtin list
nimages="cati/casa-test: cati/casa-dev: cati/cati_platform:"
fi
for d in ${SHARE_DIRS}; do
if [ -d "$d/docker" ]; then
for d2 in $d/docker/*/; do
nimages="${nimages} cati/$(basename $d2):"
done
fi
done
fi
for f in $images
do
local b=$(basename "$f")
# rebuild docker-like replacing 1st _ with /
local b="${b/_//}"
# then replace last _ with :
# local b="${b/_/:}"
local b=${b%_*}:${b##*_}
local nimages="$nimages ${b:0:-5}"
done
if [ "$cmd" = "publish_docker" ] || [ "$cmd" = "create_singularity" ];
then
# complete using existing docker images
local docker=$(which docker)
if [ "$?" -eq 0 ]; then
local dimages=$(docker images --format "{{.Repository}}:{{.Tag}}")
local nimages="$nimages $dimages"
fi
fi
COMPREPLY=($(compgen -W "$nimages" -- "${word}"))
;;
gui|verbose|force|root|install|install_doc|install_test|generate|zip|upload|interactive|json|update_casa_distro|update_base_images|dev_tests|update_user_images|user_tests|full|rw_install|cleanup|fakeroot)
COMPREPLY=($(compgen -W "True False true false 1 0 yes no Yes No" -- "${word}"))
;;
opengl)
COMPREPLY=($(compgen -W "auto nv container software" -- "${word}"))
;;
base_directory)
COMPREPLY=($(compgen -d -- "${word}"))
;;
type)
COMPREPLY=($(compgen -W "run dev system" -- "${word}"))
;;
format)
COMPREPLY=($(compgen -W "text rst" -- "${word}"))
;;
action)
COMPREPLY=($(compgen -W "next all casa_dir copy_files apt_dependencies pip_dependencies compiled_dependencies cleanup_build_dependencies cleanup_files copying_files apt_dev_dependencies fix_wsl2 pip_dev_dependencies compiled_dev_dependencies install_casa_distro casa_dev_components cleanup" -- "${word}"))
;;
bv_maker_steps)
COMPREPLY=($(compgen -W "info status sources configure build doc test pack install_pack test_pack testref testref_pack" -- "${word}"))
;;
install_thirdparty)
COMPREPLY=($(compgen -W "default all spm12-standalone freesurfer none" -- "${word}"))
;;
esac
}
function _complete_casa_distro_image_names_tag_()
{
local word=${COMP_WORDS[COMP_CWORD]}
local image=${COMP_WORDS[$(( COMP_CWORD - 2 ))]}
if [ "$word" = ":" ]; then
local image=${COMP_WORDS[$(( COMP_CWORD - 1 ))]}
fi
if [ "$word" = ":" ]; then
local word=""
fi
# get repository location
if [ "${COMP_WORDS[1]}" = "-r" ] \
|| [ "${COMP_WORDS[1]}" = "--repository" ]; then
local CASA_BASE_DIRECTORY="${COMP_WORDS[2]}"
local cmd="${COMP_WORDS[3]}"
else
local cmd="${COMP_WORDS[1]}"
fi
if [ -z "$CASA_BASE_DIRECTORY" ]; then
local CASA_BASE_DIRECTORY="$HOME/casa_distro"
fi
if [ "$cmd" = "publish_singularity" ]; then
local images=$CASA_BASE_DIRECTORY/*.simg
fi
local nimages=""
for f in $images
do
local b=$(basename "$f")
# rebuild docker-like replacing 1st _ with /
local b="${b/_//}"
# then replace last _ with :
# local b="${b/_/:}"
local b=${b%_*}:${b##*_}
local nimages="$nimages ${b:0:-5}"
done
if [ "$cmd" = "create_singularity" ] || [ "$cmd" = "publish_docker" ]; then
# complete using existing docker images
local docker=$(which docker)
if [ "$?" -eq 0 ]; then
local dimages=$(docker images --format "{{.Repository}}:{{.Tag}}")
local nimages="$nimages ${dimages}"
fi
fi
if [ "$cmd" = "create_docker" ]; then
local SHARE=$(realpath $(realpath $(dirname $(realpath $(which casa_distro))))/../share)
if [ -d "$SHARE"/casa-distro-* ]; then
SHARE="$SHARE"/casa-distro-*
fi
local SHARE_DIRS="${SHARE} ${CASA_BASE_DIRECTORY}/share ${HOME}/.config/casa-distro ${HOME}/.casa-distro"
local nimages
local image_dir=$(basename ${image})
for d in ${SHARE_DIRS}; do
if [ -d "$d/docker/${image_dir}" ]; then
for d2 in ${d}/docker/${image_dir}/*/; do
nimages="${nimages} ${image}:$(basename $d2)"
done
fi
done
if [ -z "${nimages}" ]; then
local sys
if [ ! -d "$SHARE" ]; then
# no share dir (zip distrib): use builtin list
sys="centos-7.4 ubuntu-12.04 ubuntu-14.04 ubuntu-16.04 ubuntu-18.04 ubuntu-20.04 ubuntu-22.04 windows-7-32 windows-7-64"
else
for system in "$SHARE/docker/casa-dev"/*/; do
sys="$sys $(basename $system)"
done
fi
local nimages
for system in $sys; do
nimages="${nimages} ${image}:${system}"
done
fi
fi
local matching=($(compgen -W "$nimages" -- "${image}:${word}"))
local m
for m in ${matching[@]}; do
COMPREPLY+=(${m/$image:/})
done
}
function _complete_casa_distro_()
{
local word=${COMP_WORDS[COMP_CWORD]}
local line=${COMP_LINE}
local cmd_list="help distro list list_images shell update pull_image run mrun bv_maker delete clean_images"
local opt_list="-h --help -v --verbose --version"
local cmd_wd_num=1
# find if 1st option is -r
if (( COMP_CWORD > 1 )) \
&& { [ "${COMP_WORDS[1]}" = "-r" ] \
|| [ "${COMP_WORDS[1]}" = "--repository" ]; }; then
case "$COMP_CWORD" in
2)
# completing dir
COMPREPLY=($(compgen -d -- "$word"))
return
;;
*)
# -r arg is already passed, cmd is arg 3
local cmd_wd_num=3
esac
fi
case $(( COMP_CWORD - cmd_wd_num )) in
0)
COMPREPLY=($(compgen -W "$cmd_list $opt_list" -- "${word}"))
if [ -n "$COMPREPLY" ]; then
COMPREPLY="$COMPREPLY "
fi
;;
*)
local cmd=${COMP_WORDS[cmd_wd_num]}
if [ "$word" = "=" ] \
|| [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = "=" ]; then
# after = sign: complete an option value
_complete_casa_distro_option_
return
fi
if { [ "$word" = ":" ] \
&& [ "${COMP_WORDS[$(( COMP_CWORD - 3 ))]}" = "image_names" ]; } \
|| { [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = ":" ] \
&& [ "${COMP_WORDS[$(( COMP_CWORD - 4 ))]}" = "image_names" ];}; then
# in image_names option, after : sign
_complete_casa_distro_image_names_tag_
return
fi
if { [ "$word" = ":" ] \
&& [ "${COMP_WORDS[$(( COMP_CWORD - 3 ))]}" = "image" ]; } \
|| { [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = ":" ] \
&& [ "${COMP_WORDS[$(( COMP_CWORD - 4 ))]}" = "image" ];}; then
# in image option, after : sign
_complete_casa_distro_image_names_tag_
return
fi
case "$cmd" in
help)
COMPREPLY=($(compgen -W "format= full= $cmd_list" -- "${word}"))
;;
bv_maker)
# use casa-distro options first
COMPREPLY1=($(compgen -W "type= distro= branch= system= image_version= name= base_directory= gui= opengl= root= image= cwd= env= container_options= verbose=" -- "${word}"))
# delegate to bv_maker completion
COMP_WORDS=("${COMP_WORDS[@]:1}")
COMP_CWORD=$(( COMP_CWORD - 1 ))
_complete_bv_maker_
COMPREPLY=( "${COMPREPLY1[@]}" "${COMPREPLY[@]}" )
;;
list)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= version= name= base_directory= verbose= json=" -- "${word}"))
;;
list_images)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= version= name= image= base_directory= verbose=" -- "${word}"))
;;
mrun)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= name= version= base_directory= gui= opengl= root= image= cwd= env= container_options= verbose=" -- "${word}"))
;;
run)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= name= version= base_directory= gui= opengl= root= image= cwd= env= container_options= verbose=" -- "${word}"))
;;
shell)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= name= version= base_directory= gui= opengl= root= image= cwd= env= container_options= verbose=" -- "${word}"))
;;
update)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= name= base_directory= writable= verbose=" -- "${word}"))
;;
pull_image)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= name= version= base_directory= image= url= force= verbose=" -- "${word}"))
;;
delete)
COMPREPLY=($(compgen -W "type= distro= branch= system= image_version= name= version= base_directory= interactive=" -- "${word}"))
;;
clean_images)
COMPREPLY=($(compgen -W "base_directory= image= distro= branch= system= image_version= name= version= type= verbose= interactive=" -- "${word}"))
;;
esac
;;
esac
}
function _complete_bv_()
{
local word=${COMP_WORDS[COMP_CWORD]}
local line=${COMP_LINE}
local opt_list="-h --help -v --verbose"
local kw_opt_list="gui= opengl= root= image= cwd= env= container_options= verbose="
local cmd_wd_num=1
# echo
# echo "word: $word"
# echo "line: $line"
# echo "COMP_CWORD: $COMP_CWORD"
# echo "COMP_WORDS: ${COMP_WORDS[@]}"
# find if 1st option is -r
if (( COMP_CWORD > 1 )) \
&& { [ "${COMP_WORDS[1]}" = "-h" ] \
|| [ "${COMP_WORDS[1]}" = "--help" ] \
|| [ "${COMP_WORDS[1]}" = "--verbose" ] \
|| [ "${COMP_WORDS[1]}" = "-v" ]; }; then
local cmd_wd_num=2
fi
if [ "$word" = "=" ] \
|| [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = "=" ]; then
# after = sign: complete an option value
_complete_casa_distro_option_
return
fi
COMPREPLY=($(compgen -W "$opt_list $kw_opt_list" -- "${word}"))
if [ -n "$COMPREPLY" ]; then
if [ ${COMPREPLY:(-1)} != "=" ]; then
COMPREPLY="$COMPREPLY "
fi
return
fi
# use completion within bv container
new_line=${COMP_LINE:$((${#COMP_WORDS[0]} + 1))}
if [ $(( COMP_CWORD - cmd_wd_num )) == "0" ]; then
COMPREPLY=($(${COMP_WORDS[0]} -- bash -i -l -c ". ~/.bashrc && compgen -c $new_line"))
return
else
tmp=$(mktemp)
cat << EOF > $tmp
#
# Author: Brian Beffa <brbsix@gmail.com>
# Original source: https://brbsix.github.io/2015/11/29/accessing-tab-completion-programmatically-in-bash/
# License: LGPLv3 (http://www.gnu.org/licenses/lgpl-3.0.txt)
# https://brbsix.github.io/2015/11/29/accessing-tab-completion-programmatically-in-bash/
#
get_completions(){
local completion COMP_CWORD COMP_LINE COMP_POINT COMP_WORDS COMPREPLY=()
# load bash-completion if necessary
declare -F _completion_loader &>/dev/null || {
source /usr/share/bash-completion/bash_completion
}
COMP_LINE=\$*
COMP_POINT=\${#COMP_LINE}
eval set -- "\$@"
COMP_WORDS=("\$@")
# add '' to COMP_WORDS if the last character of the command line is a space
[[ \${COMP_LINE[@]: -1} = ' ' ]] && COMP_WORDS+=('')
# index of the last word
COMP_CWORD=\$(( \${#COMP_WORDS[@]} - 1 ))
# determine completion function
completion=\$(complete -p "\$1" 2>/dev/null | awk '{print \$(NF-1)}')
# run _completion_loader only if necessary
[[ -n \$completion ]] || {
# load completion
_completion_loader "\$1"
# detect completion
completion=\$(complete -p "\$1" 2>/dev/null | awk '{print \$(NF-1)}')
}
# ensure completion was detected
[[ -n \$completion ]] || return 1
# execute completion function
"\$completion"
# print completions to stdout
printf '%s\n' "\${COMPREPLY[@]}" | LC_ALL=C sort
}
EOF
COMPREPLY=($(${COMP_WORDS[0]} -- bash -i -l -c ". ~/.bashrc && . $tmp && get_completions $new_line"))
rm -f $tmp
fi
}
function _complete_casa_distro_admin_()
{
local word=${COMP_WORDS[COMP_CWORD]}
local line=${COMP_LINE}
local cmd_list="help create_base_image convert_image publish_base_image publish_user_image create_user_image singularity_deb singularity_debs bbi_daily local_install"
local opt_list="-h --help -v --verbose --version"
local cmd_wd_num=1
# find if 1st option is -r
if (( COMP_CWORD > 1 )) \
&& { [ "${COMP_WORDS[1]}" = "-r" ] \
|| [ "${COMP_WORDS[1]}" = "--repository" ]; }; then
case "$COMP_CWORD" in
2)
# completing dir
COMPREPLY=($(compgen -d -- "$word"))
return
;;
*)
# -r arg is already passed, cmd is arg 3
local cmd_wd_num=3
esac
fi
case $(( COMP_CWORD - cmd_wd_num )) in
0)
COMPREPLY=($(compgen -W "$cmd_list $opt_list" -- "${word}"))
if [ -n "$COMPREPLY" ]; then
COMPREPLY="$COMPREPLY "
fi
;;
*)
local cmd=${COMP_WORDS[cmd_wd_num]}
if [ "$word" = "=" ] \
|| [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = "=" ]; then
# after = sign: complete an option value
_complete_casa_distro_option_
return
fi
if { [ "$word" = ":" ] \
&& [ "${COMP_WORDS[$(( COMP_CWORD - 3 ))]}" = "image" ]; } \
|| { [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = ":" ] \
&& [ "${COMP_WORDS[$(( COMP_CWORD - 4 ))]}" = "image" ];}; then
# in image_names option, after : sign
_complete_casa_distro_image_names_tag_
return
fi
case "$cmd" in
help)
COMPREPLY=($(compgen -W "format= full= $cmd_list" -- "${word}"))
;;
create_base_image)
COMPREPLY=($(compgen -W "type= name= base= output= container_type= image_version= force= memory= video_memory= disk_size= gui= cleanup= verbose=" -- "${word}"))
;;
convert_image)
COMPREPLY=($(compgen -W "source= container_type= verbose= convert_from=" -- "${word}"))
;;
publish_base_image)
COMPREPLY=($(compgen -W "type= image= container_type= verbose=" -- "${word}"))
;;
publish_user_image)
COMPREPLY=($(compgen -W "image=" -- "${word}"))
;;
create_user_image)
COMPREPLY=($(compgen -W "version= name= base_image= distro= branch= system= image_version= environment_name= container_type= output= force= base_directory= install= install_doc= install_test= install_thirdparty= generate= cleanup= zip= fakeroot= verbose=" -- "${word}"))
;;
singularity_deb)
COMPREPLY=($(compgen -W "system= output= dockerhub= version= go_version=" -- "${word}"))
;;
singularity_debs)
COMPREPLY=($(compgen -W "directory=" -- "${word}"))
;;
bbi_daily)
COMPREPLY=($(compgen -W "distro= branch= system= image_version= name= jenkins_server= jenkins_auth= update_casa_distro= update_base_images= bv_maker_steps= dev_tests= update_user_images= user_tests= base_directory= install_thirdparty= verbose=" -- "${word}"))
;;
local_install)
COMPREPLY=($(compgen -W "type= steps= system= log_file= action= user=" -- "${word}"))
;;
esac
;;
esac
}
function _complete_casa_container_()
{
local word=${COMP_WORDS[COMP_CWORD]}
local line=${COMP_LINE}
local cmd_list="help setup_user setup_dev config_gui"
local opt_list="-h --help -v --verbose --version"
local cmd_wd_num=1
case $(( COMP_CWORD - cmd_wd_num )) in
0)
COMPREPLY=($(compgen -W "$cmd_list $opt_list" -- "${word}"))
if [ -n "$COMPREPLY" ]; then
COMPREPLY="$COMPREPLY "
fi
;;
*)
local cmd=${COMP_WORDS[cmd_wd_num]}
if [ "$word" = "=" ] \
|| [ "${COMP_WORDS[$(( COMP_CWORD - 1 ))]}" = "=" ]; then
# after = sign: complete an option value
_complete_casa_distro_option_
return
fi
case "$cmd" in
help)
COMPREPLY=($(compgen -W "format= full= $cmd_list" -- "${word}"))
;;
setup_user)
COMPREPLY=($(compgen -W "dir= rw_install= distro= version= url=" -- "${word}"))
;;
setup_dev)
COMPREPLY=($(compgen -W "distro= branch= system= image_version= dir= name=" -- "${word}"))
;;
config_gui)
COMPREPLY=($(compgen -W "" -- "${word}"))
;;
esac
;;
esac
}
# complete -W "help create list update update_image shell run mrun bv_maker create_writable_image root_shell" casa_distro
# complete -W "help package_casa_distro publish_casa_distro create_release_plan update_release_plan html_release_plan create_latest_release create_docker update_docker publish_docker create_singularity publish_singularity publish_build_workflows" casa_distro_admin
complete -F _complete_casa_distro_ -o nospace -o default casa_distro
complete -F _complete_casa_distro_admin_ -o nospace -o default casa_distro_admin
complete -F _complete_bv_ -o nospace -o default bv
if [ -n "$CASA_ENVIRONMENT" ]; then
complete -F _complete_casa_container_ -o nospace -o default casa_container
fi
| true
|
07e6ab58fb1d1ebf202d9ca9283df6957b1720de
|
Shell
|
Dinglesworth/bin
|
/bluetooth_scripts/bc
|
UTF-8
| 630
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
MAC_ADDRESS=$1
if [ "$MAC_ADDRESS" == "" ]; then
echo -e "\n\033[1;33mNO MAC ADDRESS WAS ENTERED\n"
exit
fi
bluetoothctl connect $MAC_ADDRESS > /dev/null
CONNECT_STATUS=$(bluetoothctl info $MAC_ADDRESS | awk '$1 == "Connected:" {print $2}')
DEVICE_NAME=$(bluetoothctl info $MAC_ADDRESS | awk '$1 == "Name:" {print $2" "$3" "$4" "$5" "$6" "$7" "$8}')
if [ "$CONNECT_STATUS" == "yes" ]; then
echo -e "\n\033[1;32m[DEVICE]: \033[1;34m$DEVICE_NAME"
echo -e "\033[1;32mSuccesfully Connected\n"
else
echo -e "\n\033[1;32m[DEVICE]: \033[1;34m$DEVICE_NAME"
echo -e "\033[1;31mFailed to Connect\n"
fi
| true
|
4ebd5e35609d5bc52ef10a3cf4804d8c93650dc4
|
Shell
|
strange-jiong/small-demo
|
/shell/install-dog-tunnel.sh
|
UTF-8
| 733
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "===============install go======="
#!/bin/bash
apt-get install software-properties-common
apt-get install python-software-properties
add-apt-repository ppa:gophers/go
apt-get install golang-go git-core mercurial
echo "===============create GOPATH=============="
echo "export GOPATH=/opt/go" >> ~/.bashrc
source ~/.bashrc
mkdir /opt/go
chmod 777 /opt/go
cd /opt/go
mkdir bin pkg src
chmod 777 bin pkg src
echo "=========install dog tunnel dependency====="
go get github.com/go-sql-driver/mysql
go get github.com/go-sql-driver/mysql
go get github.com/klauspost/reedsolomon
go get github.com/cznic/zappy
go get -u -d github.com/vzex/dog-tunnel
cd $GOPATH/src/github.com/vzex/dog-tunnel/
git checkout master
make
| true
|
1ba5b2d43dbc83273a974fd4503ed2ce281d7ae4
|
Shell
|
cHolzberger/kvm-osx
|
/bin/machine-list-host
|
UTF-8
| 397
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
VERBOSE=0
if [[ "$1" == "-v" ]]; then
VERBOSE=1
fi
_sum=0
for MACHINE_PATH in /srv/kvm/vms/*; do
{
HOST="-unknown-"
{
vm=$(basename $MACHINE_PATH)
if [ -f $MACHINE_PATH/config ]; then
source $MACHINE_PATH/config
source $MACHINE_PATH/seat
source $MACHINE_PATH/../../seats/seat-$vm
if [[ "$HOST" == $(hostname) ]]; then
echo -e "$vm"
fi
fi
}
}
done
| true
|
f13fc2b6c5b9aa8855e891afc272604741e0e27c
|
Shell
|
linuxmap/node.lua
|
/targets/linux/hi3516a/S88debug
|
UTF-8
| 1,082
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# 初始开发模式
# ======
# 这个脚本只在开发时使用不会被打包, 需在开发前手动复制到开发板 `/etc/init.d/` 目录下
# IPv4 address
# 配置默认的固定 IP 地址, 需和开发服务器在同一网段, 请按需修改
ifconfig eth0 192.168.77.113 netmask 255.255.255.0
# Gateway
# 设置所在网络的网关地址, 请按需修改
route add default gw 192.168.77.1
# 设置主机名称
hostname hi3516a
# tmpfs
# 将 /tmp 挂接为内存文件系统, /tmp 下文件经常变动且都为临时文件,
# 所以用内存文件系统提高系统效率
mount tmpfs /tmp -t tmpfs
# FTP Server
# 可以通过 FTP 方便上传和下载开发板上的文件
# 通过匿名的方式即可访问
tcpsvd 0 21 ftpd -w / &
# WiFi
#cd /ko
#insmod 8188eu.ko
#ifconfig wlan0 up
# telnet
# 远程网络调试必备, 开发机可通过 telnet 协议连接到开发板
telnetd
# 在开机时自动加载所有驱动程序
cd /ko
./load3516a -a -sensor imx178 -osmem 64
#mount -t nfs -o nolock 192.168.77.125:/system/main/node /nfsroot &
| true
|
c994875f08bea80532e9be7243732f3d9ca36021
|
Shell
|
moneytech/sturm6502
|
/build.sh
|
UTF-8
| 377
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
cc sturm6502.c -o sturm6502
./sturm6502 -d 1 -l test/smoke/sturmdos.lst -o test/smoke/sturmdos.bin test/smoke/sturmdos.asm
xxd test/smoke/sturmdos.bin > test/smoke/smoke.hex
set +e
diff test/smoke/expected.hex test/smoke/smoke.hex
exit_code=$?
if [ $exit_code != 0 ]; then
printf "SMOKE TEST FAILED: %d\n" "$exit_code"
else
printf "SMOKE TEST OK\n"
fi
| true
|
b0921a4463f7ed6e94c1b14d799ae659145dde2e
|
Shell
|
j5s/tudo
|
/solution/chain.sh
|
UTF-8
| 1,248
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# @title Chains together vulnerabilities for TUDO
# @author William Moody
# @date 10.03.2021
if [ "$#" -ne 4 ] || [ $4 -gt 6 ] || [ $4 -lt 1 ]; then
echo "usage: $0 TARGET HOST USER CHAIN"
echo
echo "valid CHAIN values:"
echo "1 :: SQLi -> XSS -> SSTI"
echo "2 :: SQLi -> XSS -> Image Upload Bypass"
echo "3 :: SQLI -> XSS -> PHP Deserialization"
echo "4 :: Token Spray -> XSS -> SSTI"
echo "5 :: Token Spray -> XSS -> Image Upload Bypass"
echo "6 :: Token Spray -> XSS -> PHP Deserialization"
echo
exit
fi
new_pass=HACKED
cookie_tmp=.TMPCOOKIEOUTPUT
echo
echo "Step 1 - Authentication Bypass"
echo "-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
if [ $4 -lt 4 ]; then
python3 dump_token.py $1 $3 $new_pass
else
python3 token_spray.py $1 $3 $new_pass
fi
echo
echo "Step 2 - Privilege Escalation"
echo "-=-=-=-=-=-=-=-=-=-=-=-=-=-=-"
echo
python3 steal_cookie.py $1 $2 $3 $new_pass | tee $cookie_tmp
phpsessid=`tail -n 1 $cookie_tmp | awk '{split($0,a,"="); print a[2]}'`
rm $cookie_tmp
echo
echo "Step 3 - RCE"
echo "-=-=-=-=-=-="
echo
if [ $(($4%4)) -eq 1 ]; then
python3 set_motd.py $1 $2 $phpsessid
elif [ $(($4%4)) -eq 2 ]; then
python3 image_upload.py $1 $2 $phpsessid
else
python3 deserialize.py $1 $2 $phpsessid
fi
| true
|
1a174a3adf051469be15105fe005bc2577300f37
|
Shell
|
wxtim/workflows
|
/gui-demo/bin/fail_and_a_half
|
UTF-8
| 581
| 3.890625
| 4
|
[] |
permissive
|
#!/bin/bash
# Task succeeds or fails depending on Cylc task submit number
main() {
case "${CYLC_TASK_SUBMIT_NUMBER}" in
"1")
echo "Never succeed on the 1st try."
exit 1
;;
"2")
echo "Randomly succeed on the 2rd try."
if [[ $((RANDOM % 2)) == 1 ]]; then
echo "OK"
exit 0
else
echo "FAIL"
exit 1
fi
;;
*)
echo "Always succeed on the 3rd try."
exit 0
;;
esac
}
main
| true
|
138771c0d96c883566bd85aefa9745b7f96f5390
|
Shell
|
UoA-eResearch/Create_Dropbox_Team_folders
|
/bin/cron.sh
|
UTF-8
| 731
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#Run from cron (crontab -l)
#1 8,12,16,20 * * * /home/figshare/dropbox_gen_groups_from_ldap/bin/cron.sh > /home/figshare/dropbox_gen_groups_from_ldap/log/last_run.log 2>&1
#
RM="/bin/rm"
LOCKFILE="/home/figshare/bin/lockfile"
TMP_DIR="/tmp"
LOCK_PID_FILE=${TMP_DIR}/dropbox_hr_feed.lock
${LOCKFILE} ${LOCK_PID_FILE} $$
if [ $? != 0 ] ; then exit 0 ; fi
log_date=`/bin/date "+%Y-%m-%d-%H"`
base_dir="/home/figshare/dropbox_gen_groups_from_ldap"
/bin/date > ${base_dir}/log/run_${log_date}.log
${base_dir}/bin/add_ldap_group_to_dropbox.rb >> ${base_dir}/log/run_${log_date}.log 2>&1
/bin/date >> ${base_dir}/log/run_${log_date}.log
#
/usr/bin/find ${base_dir}/log -mtime +30 -exec rm -f {} \;
${RM} -f ${LOCK_PID_FILE}
| true
|
b3f3b17d5c303555756d85e5a1a2bdd3c36ae9d2
|
Shell
|
LI3DS/ros-li3ds
|
/arduino/LI3DS_ARDUINO/configure.sh
|
UTF-8
| 250
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
mkdir -p build/
cd build
# chemin vers le fichier tool-chain d'arduino-cmake
TOOLCHAIN_FILE=$ARDUINOCMAKE_DIR/cmake/ArduinoToolchain.cmake
# on lance la generation du projet via CMake
cmake -DCMAKE_TOOLCHAIN_FILE=$TOOLCHAIN_FILE ..
cd -
| true
|
e798497c4a9637a9a6f6df760077db7cca6f9f6f
|
Shell
|
tdharris/myNovellApp
|
/lib/qNotify/rc/notify
|
UTF-8
| 1,734
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#/etc/init.d/nodeup
### BEGIN INIT INFO
# Provides: notify
# Required-Start: $network
# Required-Stop: $network
# Should-Start: haveged auditd
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Description: Start the notify daemon
### END INIT INFO
function askYesOrNo {
REPLY=""
while [ -z "$REPLY" ] ; do
read -ep "$1 $YES_NO_PROMPT" REPLY
REPLY=$(echo ${REPLY}|tr [:lower:] [:upper:])
case $REPLY in
$YES_CAPS ) printf '\n'; return 0 ;;
$NO_CAPS ) printf '\n'; return 1 ;;
* ) REPLY=""
esac
done
}
# Initialize the yes/no prompt
YES_STRING=$"y"
NO_STRING=$"n"
YES_NO_PROMPT=$"[y/n]: "
YES_CAPS=$(echo ${YES_STRING}|tr [:lower:] [:upper:])
NO_CAPS=$(echo ${NO_STRING}|tr [:lower:] [:upper:])
export PATH=$PATH:/srv/www/qNotify
export NODE_PATH=$NODE_PATH:/usr/local/lib/node_modules
case "$1" in
start)
cd /srv/www/qNotify; rm err.log 2>/dev/null; node app.js >>qNotify.log 2>err.log &
sleep 0.5
if [ -s err.log ]; then {
echo "qNotify failed to start: "; cat err.log; rm err.log
}
else {
echo -e "\nqNotify started successfully with pid: " `pgrep node`"\n"
}
fi
;;
status)
pid=`pgrep node`
if [ -n "$pid" ]; then {
echo -e "\nqNotify ("`pgrep node`")"
netstat -ltp | grep node
echo ""
}
else {
echo -e "\nqNotify is not running...\n"
}
fi
;;
log)tailf /srv/www/qNotify/qNotify.log
;;
users) echo -e "\nCurrent users: " `grep userList /srv/www/qNotify/qNotify.log | tail -1 | cut -d ':' -f5-` "\n"
;;
stop)
cd /srv/www/qNotify; kill -INT `pgrep node` 2>/dev/null
if [ $? -eq 0 ]; then echo -e "\nqNotify has been stopped.\n"
else echo -e "\nqNotify isn't running.\n"
fi
;;
*)
echo "Usage: /etc/init.d/nodeup {start|stop}"
exit 1
;;
esac
exit 0
| true
|
a34c92abbcdc3dbbdfd3e113c9a762f4dc699ce5
|
Shell
|
breunigs/bravia-auth-and-remote
|
/auth_cookie_examples/auth.sh
|
UTF-8
| 2,480
| 3.609375
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Note: this is only useful when you want to build an APP that also supports
# authentication via cookie. The cookie method loses validity after a
# couple of weeks, so you need to run reauth.sh some time before that.
set -e
my_uuid=$(uuidgen)
cd $(dirname $0)
. ./bravia.cfg
if [ -e 'auth_cookie' ]; then
echo "There's already an auth_cookie file. Delete the file to continue."
exit 1
fi
if [ "$tv_ip" = "" ] || [ "$my_nick" = "" ] || [ "$my_device" = "" ]; then
echo "Missing configuration data, please edit the script and run it again."
exit 2
fi
data="{\"method\":\"actRegister\",\"params\":[{\"clientid\":\"$my_nick:$my_uuid\",\"nickname\":\"$my_nick ($my_device)\",\"level\":\"private\"},[{\"value\":\"yes\",\"function\":\"WOL\"}]],\"id\":8,\"version\":\"1.0\"}"
echo "-------------------------------"
echo "Trying to register on $tv_ip..."
curl --silent -XPOST http://$tv_ip/sony/accessControl -d "$data"
echo;echo
echo 'Response should contain an "Unauthorized" error and the TV should display'
echo 'a dialog with a 4-digit PIN.'
echo 'A message with "Registration has been cancelled" after this step means'
echo 'that the UUID seems to be registered already.'
echo 'Delete old registered devices in:'
echo 'Settings -> Network -> Home Network Setup -> Remote Device / Renderer'
echo ' -> Registered Remote Devices'
echo;echo
echo "Okay, now enter the 4-digit code shown on the TV:"
read tv_challenge
echo "export tv_challenge=${tv_challenge}" > 'tv_challenge'
echo;echo
echo "-------------------------------"
echo "Trying to register on $tv_ip, this time with the given code..."
echo;echo
cookie=$(curl --include --silent -XPOST http://$tv_ip/sony/accessControl --user :$tv_challenge -d "$data" | grep -o -E 'auth=([A-Za-z0-9]+)')
echo $cookie
echo $cookie > 'auth_cookie'
echo $my_uuid > 'uuid'
echo;echo
echo "If everything worked, you should see an auth=<code> line above."
echo "Your computer is now registered, use it like this:"
echo
echo " curl --cookie \"$cookie\" -XPOST http://$tv_ip/sony/system -d '<JSON STUFF>'"
# Uncomment this to capture the MAC address
#echo;echo
#echo "Saving MAC address. This useful to implement Wake-on-LAN"
#mac=$(arp -a | grep ${tv_ip} | awk '{ print $4 }')
#echo $mac > 'mac'
echo;echo
../print_ircc_codes.sh $tv_ip > ircc_command_list
echo "Available IRCC commands have been saved to 'ircc_command_list'"
echo
echo "Run a IRCC command with: ./example_curl.sh $tv_ip <IRCC-Code>"
| true
|
2717e0fe0a56d2be36377cac6be909ca6ef9d2f8
|
Shell
|
janarkopunk/a
|
/misc/bsa.sh
|
UTF-8
| 437
| 3.5625
| 4
|
[] |
no_license
|
# Binary search algorithm
function warn {
printf '\e[36m%s\e[m\n' "$*"
}
if [ $# != 2 ]
then
echo bsa.sh GOOD BAD
exit
fi
gb=$1
bb=$2
echo each iteration will be saved to clipboard
while :
do
(( ty = (gb + bb) / 2 ))
if (( sg[ty]++ ))
then
break
fi
warn $ty
printf $ty > /dev/clipboard
select co in good bad
do
break
done
if [ $co = good ]
then
(( gb = ty ))
else
(( bb = ty ))
fi
done
| true
|
3cad7cedd99acac61b77d5f51ce5fd5b3a0abdd1
|
Shell
|
jornbergmans/snippets
|
/01_bash/99_ambassadors/gifenh.sh
|
UTF-8
| 2,652
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
IFS=$'\n'
# echo "
# First variable not set. Please enter input info in the following order:
# 1. Input file or folder
# 2. Output framerate
# 3. Output height in pixels
# 4. Debug mode enables
# "
#
# else
if [[ -z $1 ]] || [[ -z $2 ]] || [[ -z $3 ]]; then
echo "Please input folder"
read inputfolder
echo "Please input desired output framerate"
read outputrate
echo "Please input desired output height in pixels. (input -1 for original size)"
read rez
echo "Do you want to enable debug mode? y/n"
select yn in "Yes" "No"; do
case $yn in
Yes ) debug=y; break;;
No) debug=n; break;;
esac
done
else
inputfolder="$1"
outputrate="$2"
rez="$3"
debug="$4"
fi
echo "Input set, creating file list"
inputfolder=$(echo "$inputfolder" | sed 's/[[:space:]]$//;s:\/$::')
filelist=$(find "$inputfolder" -iname "*.mov" -or -iname "*.mp4" -or -iname "*.mkv" -or -iname "*.avi" -or -iname "*.wmv" -or -iname "*.mxf" -and -not -iname "._*")
if [[ $rez = '-1' ]]; then
rezname='original-resolution_'
else
rezname="$rez"
fi
for f in $filelist; do
basef=$(basename "$f")
dirf=$(dirname "$f")
outputdir="$dirf"/gif-"$rezname"p"$outputrate"
mkdir -p "$outputdir"
if [[ ! "$debug" = '' ]] && [[ "$debug" = 'y' ]]; then
echo "- - - -"
echo "Debug mode enabled, listing input / output"
echo "- - - -"
echo "f is $f"
echo "outputdir is $outputdir"
echo "dirf is $dirf"
echo "basef is $basef"
echo "- - - -"
else
# echo "Creating palette for $basef"
ffmpeg -hide_banner -loglevel panic -y -i "$f" -vf fps=10,scale=-1:1080:flags=lanczos,palettegen "$outputdir"/."${basef/.mov/_palette.png}"
echo "Creating GIF file at $outputrate frames per second"
ffmpeg -hide_banner -loglevel panic -y -i "$f" -i "$outputdir"/."${basef/.mov/_palette.png}" -filter_complex \
"fps=$outputrate,scale=-1:$rez:flags=lanczos[x];[x][1:v]paletteuse" -f gif "$outputdir/${basef/.mov/_"$rezname"p"$outputrate".gif}"
if [[ ! "$debug" = '' ]] || [[ "$debug" = 'n' ]]; then
rm -f "$outputdir"/."${basef/.mov/_palette.png}"
fi
if [[ -f "$outputdir/${basef/.mov/_"$rezname"p"$outputrate".gif}" ]]; then
echo "GIF file created at $outputdir/${basef/.mov/_"$rezname"p"$outputrate".gif}"
elif [[ ! -f "$outputdir/${basef/.mov/_"$rezname"p"$outputrate".gif}" ]]; then
echo "Output file not found, please run in debug mode."
fi
echo ""
fi
done
#for name in $(find $1/gif-$3/ -iname "*mov.gif"); do
# mv "${name}" ${name/mov.gif/gif}
#done
#
# for gif in $(find $1 -type f -and -iname "*.gif" -or -iname "*palette*.png"); do
# mv "$gif" $1/gif-$3/
# done
#
# fi
| true
|
855a5cce6af0e30efc80e51d9bd9f891d3da195a
|
Shell
|
MaayanLab/MCF10A
|
/MCF10A_resources/L1000/run_L1000.sh
|
UTF-8
| 1,108
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# run_L1000.sh data\GSE70138_Broad_LINCS_Level3_INF_mlr12k_n115209x22268_2015-12-31.gct MCF10A ..\brd_drugname output\
gct_filename="$1"
cell_line="$2"
brd_filename="$3"
output_folder="$4"
output_filename="L1000_csv_filenames.txt"
chdir_filenames="L1000_chdir_filenames.txt"
up_down_filenames="L1000_chdir_up_down_filenames.txt"
python parse_gct_L1000.py "$gct_filename" "$cell_line" "$brd_filename" > "$output_filename" 2>> ../error.log
python L1000_csv_to_chdir.py "$output_filename" "$chdir_filenames" "$output_folder" 2>> ../error.log
python get_up_down_regulated_genes.py "$chdir_filenames" '..\output\up-down-chdir\' > "$up_down_filenames" 2>> ../error.log
python upload_to_L1000CDS2.py "$up_down_filenames" > L1000CDS2_dict 2>> ../error.log
# python correlation_L1000.py > correlation_filenames.txt # this script uploads genes to Enrichr, then gets enriched terms from appropriate gene lists, performs correlation
# python similarity_matrix.py correlation_filenames.txt # this script generates similarity matricies for Network2Canvas
# need to integrate with Network2Canvas somehow
| true
|
7046e462f04cbb68e3ade803551a7c6d6813a191
|
Shell
|
kt97679/rescue-system
|
/build-rescue-image.sh
|
UTF-8
| 809
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -u
RESCUE_ROOT=/run/rescue
RESCUE_SSH_PORT=11122
RESCUE_SSH_PASSWORD=rescueme
mkdir -p $RESCUE_ROOT
mount none -t tmpfs -o size=1G $RESCUE_ROOT
debootstrap trusty $RESCUE_ROOT
echo "rescue_system" > $RESCUE_ROOT/etc/debian_chroot
mkdir -p $RESCUE_ROOT/old_root
gcc --static -o fakeinit fakeinit.c
strip fakeinit
cp fakeinit $RESCUE_ROOT/
chroot $RESCUE_ROOT apt-get -y install lvm2 psmisc openssh-server openssh-client openssh-blacklist openssh-blacklist-extra --no-install-recommends
sed -i "s/^Port .*$/Port $RESCUE_SSH_PORT/" $RESCUE_ROOT/etc/ssh/sshd_config
sed -i 's/^PermitRootLogin .*$/PermitRootLogin yes/' $RESCUE_ROOT/etc/ssh/sshd_config
chroot $RESCUE_ROOT bash -c "echo root:$RESCUE_SSH_PASSWORD|chpasswd"
tar -C $RESCUE_ROOT -czf ./rescue-image.tgz .
umount $RESCUE_ROOT
| true
|
5d2fb1e30dfb4081139917347c29e7165a33fde0
|
Shell
|
mrinal10/covid-data-internal
|
/bin/start-test-db.sh
|
UTF-8
| 891
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
# This script starts the database in a Docker container locally. This is needed to run the tests locally, but not for
# Bitbucket Pipelines since the tests run in a container with their own database.
#
# See `../docker/test/db/docker-compose.yml` for details about the database and the container name.
# See `../sql` for the scripts that drop/create the database.
echo "Re-creating test database. All data of the existing database will be destroyed. Port 5432 must be free."
start_db() {
sudo docker-compose -f ../docker/test/db/docker-compose.yml up -d \
&& sleep 10s \
&& psql -h 127.0.0.1 -U postgres -f ../sql/drop-db.sql \
&& psql -h 127.0.0.1 -U postgres -f ../sql/create-db.sql
}
while true; do
read -p "Proceed? [y/n] " yn
case $yn in
[Yy]* ) start_db; break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
| true
|
37b8d9fc0ae9cc2795a6e799b4fda93173f9b692
|
Shell
|
yushixiang/my-crawler
|
/deploy/app.sh
|
UTF-8
| 3,354
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
ENV="prod"
cd `dirname $0`
APPLICATION="my-crawler"
SERVER_PORT=9206
LOGS_DIR="/tmp/logs/${APPLICATION}"
mkdir -p ${LOGS_DIR}
HOST_NAME=`hostname`
SPRING_PROFILE=" -Dspring.profiles.active=${ENV} "
JAVA_MEM_OPTS=" -server -Xms2000m -Xmx2000m -Xmn500m -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSCompactAtFullCollection -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:+ParallelRefProcEnabled -XX:-UseBiasedLocking "
JAVA_GC_LOG_OPTS=" -XX:+PrintGCDetails -Xloggc:$LOGS_DIR/gc_log.`date +%m%d%H%M` -XX:+PrintGCDateStamps "
JAVA_ERR_DUMP_OPTS=" -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=$LOGS_DIR/oom_err_heap_dump.`date +%m%d%H%M` "
JARFile="${APPLICATION}.jar"
PIDFile="${APPLICATION}.pid"
STDOUT_FILE="${LOGS_DIR}/stdout.log"
function check_if_pid_file_exists {
if [ ! -f ${PIDFile} ]
then
echo "PID file not found: $PIDFile"
exit 1
fi
}
function check_if_process_is_running {
local pid=$1
if ps -p ${pid} > /dev/null
then
return 0
else
return 1
fi
}
function print_process {
echo $(<"${PIDFile}")
}
case "$1" in
status)
if [ ! -f ${PIDFile} ]
then
echo "PID file not found: $PIDFile"
exit 0
fi
if check_if_process_is_running $(print_process)
then
echo $(print_process)" is running"
else
echo "Process not running: $(print_process)"
fi
;;
stop)
check_if_pid_file_exists
pid=$(print_process)
if ! check_if_process_is_running $pid
then
echo "Process $pid already stopped"
exit 0
fi
kill -TERM $pid
printf "Waiting for process to stop"
NOT_KILLED=1
for i in {1..20}; do
if check_if_process_is_running $pid
then
printf ". "
sleep 1
else
echo "killed"
NOT_KILLED=0
break
fi
done
echo
if [ ${NOT_KILLED} = 1 ]
then
echo "Cannot kill process $(print_process)"
exit 1
fi
echo "Process stopped"
;;
start)
if [ -f ${PIDFile} ] && check_if_process_is_running $(print_process)
then
echo "Process $(print_process) already running"
exit 1
fi
nohup java ${SPRING_PROFILE} ${JAVA_MEM_OPTS} ${JAVA_GC_LOG_OPTS} ${JAVA_ERR_DUMP_OPTS} -jar ${JARFile} > $STDOUT_FILE 2>&1 &
printf "Wait process to start"
for i in {1..60}; do
if [ ! -f ${PIDFile} ]
then
printf ". "
sleep 1
else
if check_if_process_is_running $(print_process)
then
echo "\nProcess started\n"
break
else
printf ". "
sleep 1
fi
fi
done
;;
restart)
$0 stop
if [ $? = 1 ]
then
exit 1
fi
$0 start
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
esac
exit 0
| true
|
c4417cc9a4bf08121485ae1123932fe8e638a959
|
Shell
|
hassanelseady09/vps_install
|
/my_lib/functions/remove_ng_php.sh
|
UTF-8
| 582
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
remove_ng_php(){
echo "function remove_ng_php"
sudo apt-get -y purge *php*
# remove from the /etc/nginx/sites-available/default
if [ "`grep -P "###php-code###" /etc/nginx/sites-available/default`" != "" ]; then
echo "==remove php from the /etc/nginx/sites-available/default==";
sudo sed -ri.bak ':a;N;$!ba s/###php-code###.*?###php-code###//g' /etc/nginx/sites-available/default
fi
#restart apache2
sudo systemctl restart nginx.service
sudo apt-get -y autoremove
sudo apt-get -y autoclean
echo
echo "php has been successfuly removed"
}
| true
|
f53ce500881ac500b1ff9818a4b734ceb3b1f87d
|
Shell
|
kitchell/app-LBspectrum
|
/start.sh
|
UTF-8
| 1,121
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#mainly to debug locally
if [ -z $WORKFLOW_DIR ]; then export WORKFLOW_DIR=`pwd`; fi
if [ -z $TASK_DIR ]; then export TASK_DIR=`pwd`; fi
if [ -z $SERVICE_DIR ]; then export SERVICE_DIR=`pwd`; fi
rm -f finished
if [ $ENV == "IUHPC" ]; then
#clean up previous job (just in case)
rm -f finished
#jobid=`qsub $SERVICE_DIR/submit.pbs`
if [ $HPC == "KARST" ]; then
#looks like preempt queue has small limit on how many jobs I can queue
#jobid=`qsub -q preempt $SERVICE_DIR/submit.pbs`
qsub $SERVICE_DIR/submit.pbs > jobid
fi
if [ $HPC == "CARBONATE" ]; then
qsub $SERVICE_DIR/submit.pbs > jobid
fi
exit $?
fi
if [ $ENV == "VM" ]; then
nohup time $SERVICE_DIR/submit.pbs > stdout.log 2> stderr.log &
echo $! > pid
fi
if [ $ENV == "SLURM" ]; then
cat <<EOT > _run.sh
#!/bin/bash
srun singularity run docker://kitchell/lb_spectrum
if [ -s 'spectrum.json' ];
then
echo 0 > finished
else
echo "spectrum.json missing"
echo 1 > finished
exit 1
fi
EOT
chmod +x _run.sh
jobid=$(sbatch -c 6 _run.sh | cut -d' ' -f4)
echo $jobid > slurmjobid
echo "submitted $jobid"
exit
fi
| true
|
b367566c519199040e69fc20f201dd5bd1694ad9
|
Shell
|
muvaf/crossplane-cli
|
/bin/kubectl-crossplane-package-install
|
UTF-8
| 3,245
| 4.40625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
function usage {
# The reason for putting package name before package image source is it seems like package name
# would be overridden more often than package image source, but I kept going back and
# forth on that originally. Overriding the source is very useful when developing a
# package locally, for example.
echo "Usage: kubectl crossplane package install [-h|--help] [-c|--cluster] [-n|--namespace NAMESPACE] [--imagePullSecret SECRET_NAME] PACKAGE_IMAGE_NAME [PACKAGE_NAME [PACKAGE_IMAGE_SOURCE]]" >&2
echo "" >&2
echo "PACKAGE_IMAGE_NAME is the name of the package in the registry to install." >&2
echo "If the PACKAGE_NAME is not provided, the package name will be the PACKAGE_IMAGE_NAME with any '/' characters" >&2
echo "converted to '-' characters." >&2
echo "" >&2
echo "-h, --help: Print usage" >&2
echo "-c, --cluster: Install a Cluster scoped package" >&2
echo "-n, --namespace: Install into the given namespace" >&2
echo "" >&2
echo 'For more advanced usage, see the lower-level `kubectl crossplane package generate-install` command.' >&2
}
CLUSTER_PACKAGE=""
CLUSTER_OPT=""
NAMESPACE_OPT=""
IMAGE_PULL_SECRET=""
IMAGE_PULL_SECRET_OPT=""
POSITIONAL=()
while [[ $# -gt 0 ]]; do
opt="$1"
case $opt in
-c|--cluster)
CLUSTER_PACKAGE="cluster"
CLUSTER_OPT="$opt"
shift
;;
-n=*|--namespace=*)
NAMESPACE="${opt#*=}"
NAMESPACE_OPT="--namespace=${NAMESPACE}"
shift;
;;
-n|--namespace)
NAMESPACE="$2"
NAMESPACE_OPT="--namespace=${NAMESPACE}"
shift
shift
;;
--imagePullSecret=*)
IMAGE_PULL_SECRET="${opt#*=}"
IMAGE_PULL_SECRET_OPT="--imagePullSecret=${IMAGE_PULL_SECRET}"
shift;
;;
--imagePullSecret)
IMAGE_PULL_SECRET="$2"
IMAGE_PULL_SECRET_OPT="--imagePullSecret=${IMAGE_PULL_SECRET}"
shift
shift
;;
-h|--help)
usage
exit 1
;;
-*)
echo "Unknown argument: $opt" >&2
usage
exit 1
;;
*)
POSITIONAL+=("$1")
shift
;;
esac
done
if [ "${#POSITIONAL[@]}" -eq "0" ]; then
echo "Missing arguments" >&2
usage
exit 1
fi
# Reset the positional parameters ($1, ..) from the array of arguments
# that didn't match our known options
set -- "${POSITIONAL[@]}"
PACKAGE_IMAGE_NAME="${1}"
# For kubernetes fields, we aren't able to use slashes, and
# slashes are common for docker image names. So we remove the
# slashes before we use the name for kubernetes resource fields.
# We also can't use colons and don't want to include tags in the name.
KUBEY_PACKAGE_IMAGE_NAME=$( echo "${PACKAGE_IMAGE_NAME}" | tr '/' '-' | sed 's/:.*//' )
# Defaults to the kubey package image name, but can be overridden
# by passing arguments
PACKAGE_NAME="${2:-${KUBEY_PACKAGE_IMAGE_NAME}}"
kubectl crossplane package generate-install ${CLUSTER_OPT} ${IMAGE_PULL_SECRET_OPT} "$@" | kubectl apply ${NAMESPACE_OPT} -f -
# Printing out the package install object from the cluster may be useful
# for whoever ran this command, and there's no other output anyway, so
# we might as well.
kubectl get ${NAMESPACE_OPT} -o yaml "${CLUSTER_PACKAGE}"packageinstall "${PACKAGE_NAME}"
| true
|
874544bb9de74b086738a7a08104ac1b871f2673
|
Shell
|
teotikalki/privatix
|
/tools/dump_mac/dump_mac.sh
|
UTF-8
| 1,306
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if (( "$#" > 1 ));
then
echo usage: dump_mac.sh [privatix_app_folder_path]
exit 1
fi
PRIVATIX_APP_FOLDER=${1:-/Applications/Privatix}
DESTINATION_FOLDER="${PRIVATIX_APP_FOLDER}/dump"
rm -rf "${DESTINATION_FOLDER}"
rm -rf "${DESTINATION_FOLDER}".zip
find_and_copy(){
mkdir -p "$2"
find "${PRIVATIX_APP_FOLDER}" -path "${DESTINATION_FOLDER}" -prune -o -name "$1" -exec \
cp '{}' "$2" \;
}
get_value(){
cat "${dappctrl_config}" | \
python -c 'import json,sys;obj=json.load(sys.stdin);print obj["DB"]["Conn"]["'$1'"]';
}
echo "copying files..."
find_and_copy "*.config.json" "${DESTINATION_FOLDER}/configs"
find_and_copy "settings.json" "${DESTINATION_FOLDER}/configs"
find_and_copy "*.log" "${DESTINATION_FOLDER}/logs"
find_and_copy "*.err" "${DESTINATION_FOLDER}/errs"
dappctrl_config=$(find "${DESTINATION_FOLDER}" -name "dappctrl.config.json")
echo "dumping db..."
find "${PRIVATIX_APP_FOLDER}" -name "pg_dump" -exec \
'{}' --create --column-inserts --clean --if-exists \
-d "$(get_value "dbname")" \
-h "$(get_value "host")" \
-p "$(get_value "port")" \
-U "$(get_value "user")" \
-f "${DESTINATION_FOLDER}/db_dump.sql" \
\;
echo "zipping files..."
zip -r "${DESTINATION_FOLDER}".zip "${DESTINATION_FOLDER}"
| true
|
aaabb34d07e7e7b8ab4ffd0b2911f8f482d42b2b
|
Shell
|
gabrielegiammatteo/maven-builder
|
/bin/mavenize
|
UTF-8
| 4,981
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
# Authors:
# Gabriele Giammatteo - gabriele.giammatteo@eng.it
#
# 2012
#
######################
# initialisation and commandline parsing
#
MAVENIZER_HOME=$(dirname $(dirname $0))
source $MAVENIZER_HOME/lib/shflags
source $MAVENIZER_HOME/lib/mavenizelib
SCRIPT_NAME="mavenize"
if [[ "$MAVEN_BUILD_TYPE" == "development" ]]; then
snapshotFlag="-s"
fi
FLAGS_HELP="USAGE: $0 [flags]"
DEFINE_string 'file' '' "artifact file to mavenize. If not specified, a search will be done in the etics module home trying to find a suitable artifact" 'f'
DEFINE_string 'profile' '' "profile.xml file from where take information about the artifact" 'p'
DEFINE_string 'groupid' '' "profile.xml file from where take information about the artifact" 'g'
DEFINE_string 'artifactid' '' "profile.xml file from where take information about the artifact" 'a'
DEFINE_string 'version' '' "optional: version" 'v'
DEFINE_string 'packaging' '' "packaging. If not provided, the file extension will be used" 'k'
DEFINE_string 'pom' '' "pom to use. If provided it will used without generate it" 'o'
DEFINE_boolean 'deps' true "If false, dependencies are in the pom are not generated" 'd'
DEFINE_boolean 'dryrun' false "no act. Perform just a simulation" 'n'
DEFINE_integer 'loglevel' 1 "log level. Accepted values are 0, 1, 2" 'l'
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
######################
# prints out the script's header
#
cat <<EOF
[mavenize] *** Mavenize script ***
[mavenize]
[mavenize] print env:
[mavenize] MAVEN_HOME=$MAVEN_HOME
[mavenize] MAVEN_SETTINGS=$MAVEN_SETTINGS
[mavenize] MAVEN_LOCAL_REPOSITORY=$MAVEN_LOCAL_REPOSITORY
[mavenize] MAVEN_BUILD_TYPE=$MAVEN_BUILD_TYPE
[mavenize] MAVEN_GCUBE_SNAPSHOTS=$MAVEN_GCUBE_SNAPSHOTS
[mavenize] MAVEN_GCUBE_RELEASES=$MAVEN_GCUBE_RELEASES
[mavenize] MAVEN_GCUBE_STAGING=$MAVEN_GCUBE_STAGING
[mavenize] MAVEN_SYSTEM_VERSION=$MAVEN_SYSTEM_VERSION
[mavenize] MAVENIZER_FAIL_ON_ERROR=$MAVENIZER_FAIL_ON_ERROR
[mavenize] VERSION_REWRITES=$VERSION_REWRITES
EOF
######################
# checks pre-requisites to run the script
#
[ -z "$MAVEN_HOME" ] && log_fatal "MAVEN_HOME is not set. exiting..." && exit 1;
[ -z "$MAVEN_SETTINGS" ] && log_fatal "MAVEN_SETTINGS is not set. exiting..." && exit 1;
[ -z "$MAVEN_LOCAL_REPOSITORY" ] && log_fatal "MAVEN_LOCAL_REPOSITORY is not set. exiting..." && exit 1;
[[ "$MAVEN_BUILD_TYPE" != "development" && "$MAVEN_BUILD_TYPE" != "candidate" && "$MAVEN_BUILD_TYPE" != "release" ]] && log_fatal "MAVEN_BUILD_TYPE is not set or not valid (one in development|candidate|release is expected). exiting..." && exit 1;
[ -z "$VERSION_REWRITES" ] && echo "VERSION_REWRITES is not set. exiting..." && exit 1;
######################
# search the artifact
#
ARTIFACT_SEARCHING_DIR=`pwd`/etics-tmp
if [ -n "${FLAGS_file}" ]; then
ARTIFACT_FILE=`readlink -f ${FLAGS_file}`
else
auto_select_artifact
fi
######################
# search the profile.xml
#
if [ -n "${FLAGS_profile}" ]; then
PROFILE_FILE=`readlink -f ${FLAGS_profile}`
else
PROFILE_FILE=`pwd`/etc/profile.xml
fi
######################
# generate the pom.xml
#
if [ -n "${FLAGS_pom}" ]; then
POM_FILE=${FLAGS_pom}
else
#
# prepares PomGenerator commandline
#
if [ ! -e "$PROFILE_FILE" ]; then
PROFILE_OPT=""
else
PROFILE_OPT="--profile $PROFILE_FILE"
fi
if [ -n "${FLAGS_groupid}" ]; then
GROUPID_OPT="--groupid ${FLAGS_groupid}"
fi
if [ -n "${FLAGS_artifactid}" ]; then
ARTIFACTID_OPT="--artifactid ${FLAGS_artifactid}"
fi
if [ -n "${FLAGS_version}" ]; then
VERSION_OPT="--version ${FLAGS_version}"
fi
if [ ${FLAGS_deps} -eq ${FLAGS_FALSE} ]; then
NODEPS_OPTION="--nodeps"
fi
if [ ! -e "$ARTIFACT_FILE" ]; then
ARTIFACT_OPT=""
else
ARTIFACT_OPT="--artifactname `basename $ARTIFACT_FILE`"
fi
if [ -n "${FLAGS_packaging}" ]; then
PACKAGING_OPT="--packaging ${FLAGS_packaging}"
elif [ -e "$ARTIFACT_FILE" ]; then
PACKAGING_OPT="--packaging `echo "$ARTIFACT_FILE"|awk -F . '{print $NF}'`"
else
PACKAGING_OPT=""
fi
if [ -n "$MAVEN_SYSTEM_VERSION" ]; then
SYS_VERSION_OPT="--systemversion $MAVEN_SYSTEM_VERSION"
fi
POM_FILE=`pwd`/mvnz_pom.xml
execute_PomGenerator $PROFILE_OPT $GROUPID_OPT $ARTIFACTID_OPT $VERSION_OPT $ARTIFACT_OPT -e $MAVENIZER_HOME/etc/externalsMapping.json -t $MAVENIZER_HOME/etc/staticMappings.json $snapshotFlag -o $POM_FILE $PACKAGING_OPT $NODEPS_OPTION $SYS_VERSION_OPT
fi
######################
# rewrite dependencies' versions in order to have in the pom the same versions
# used for integration
#
MVN="$MAVEN_HOME/bin/mvn -B -s $MAVEN_SETTINGS -Dmaven.repo.local=$MAVEN_LOCAL_REPOSITORY"
if [[ "$MAVEN_BUILD_TYPE" == "development" ]]; then
bash -c "$MVN -f $POM_FILE versions:use-latest-versions -DallowSnapshots=true -Dincludes=$VERSION_REWRITES"
else
bash -c "$MVN -f $POM_FILE versions:use-latest-versions -Dincludes=$VERSION_REWRITES"
fi
######################
# deploy on maven
#
execute_mvn $ARTIFACT_FILE $POM_FILE
| true
|
2de2851a32b68de9dd724b852a350c626c721b66
|
Shell
|
faisalbasha1982/pipeline-scripts
|
/dev-bobafett-rp.sh
|
UTF-8
| 1,080
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
unset http_proxy
unset HTTP_PROXY
unset https_proxy
unset HTTPS_PROXY
unset NO_PROXY
unset no_proxy
kubectl get nodes
kubectl create secret generic regcred \
--from-file=.dockerconfigjson=/home/dotnet_user/.docker/config.json \
--type=kubernetes.io/dockerconfigjson -n development
ls -ahl
cd bobafett/templates
sed -i 's#path: /#path: /swagger/#g' deployment.yaml
echo 'eng@osn' | sudo -S mv service.yaml old-service.yaml
file='old-service.yaml'
while IFS= read -r line;
do
echo "$line" >> service.yaml
n=$((n+1))
if [ "$n" -eq 12 ]
then
envfile='/home/dotnet_user/nodeport.txt'
while IFS= read -r newline;
do
echo "$newline" >> service.yaml
done < $envfile
fi
done < $file
cd ..
ls -ahl
cd ..
helm upgrade bobafett bobafett -n development \
--set 'imagePullSecrets[0].name=regcred' \
--set service.type='NodePort' \
--set nodePort='30385' \
--set image.repository=docker-registry.osn.com/bobafett-dev \
--set image.tag=$(Build.BuildNumber) -f bobafett/values.yaml \
--install --reset-values
| true
|
9da5a5a297a5edd0c962dc271e1fc927650e98c2
|
Shell
|
sakthivel9963/dot-source-files
|
/install/pgadmin-install.sh
|
UTF-8
| 450
| 3.078125
| 3
|
[] |
no_license
|
# Install the public key for the repository (if not done previously):
curl https://www.pgadmin.org/static/packages_pgadmin_org.pub | sudo apt-key add
# Create the repository configuration file:
sudo sh -c 'echo "deb https://ftp.postgresql.org/pub/pgadmin/pgadmin4/apt/$(lsb_release -cs) pgadmin4 main" > /etc/apt/sources.list.d/pgadmin4.list && apt update'
#
# Install pgAdmin
#
# Install for both desktop and web modes:
sudo apt install pgadmin4
| true
|
153ed88bb00ecf64e43aa7f159c95bf32c0a99bf
|
Shell
|
nchikuma/wagasci_software
|
/slowMonitor/script/eps2png.sh
|
UTF-8
| 256
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
inputfile=$1
outputfile=${inputfile//eps/png}
case "${inputfile}" in
*\.eps)
GS="gs -dBATCH -dNOPAUSE -dEPSCrop -r${2}x${3} -sDEVICE=png16m -sOutputFile=${outputfile} -f ${inputfile}"
$GS >> /dev/null
;;
*)
echo "Not eps file"
;;
esac
| true
|
dc6ed9578135cb6afff4a1570461c95fae19d919
|
Shell
|
FlorianHeigl/nagios
|
/check_mk/local/flow_tools
|
UTF-8
| 1,137
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/sh
# Flow-tools check for FreeBSD
# Checks if data has been received in last minutes
# Maximum data age in minutes
maxage=15
# Minimum Size (bytes) for a file with >1 Flows
minsize=160
cmk_out()
{
prog="flow-tools-data"
echo "$state $prog - $msg"
exit $state
}
nagios_out()
{
echo "$msg"
exit $state
}
get_checkmode()
{
echo "a"
# set cmk output checkmode if called as agent local check
if [ "X" != "X${MK_LIBDIR}" ] && [ `pwd` = "${MK_LIBDIR}/local" ]; then
checkmode=cmklocal
fi
if [ "ABC" = "ABC" ]; then
checkmode=abccheckmode
fi
export checkmode
}
state=0
get_checkmode && echo $checkmode
. /etc/rc.conf
if ! [ "$flow_capture_enable" = "YES" ]; then
exit 0
fi
if ! [ -r $flow_capture_datadir ]; then
state=3
msg="UNKW - flow-tools datadir was not found."
fi
num_valid_files=`find $flow_capture_datadir -mmin -${maxage} -size +${minsize} | wc -l`
if [ $num_valid_files -gt 0 ]; then
state=0
msg="OK - flow-tools is recording flows."
else
state=2
msg="CRIT - flow-tools has not recorded a flow in $maxage minutes."
fi
| true
|
fe026a831f127928615f08e95f425bb083397881
|
Shell
|
sorrowless/sensu-ansible
|
/files/data/static/sensu/checks/checks_tls_certs/check-cert.sh
|
UTF-8
| 688
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CERT=${1:-nothing}
EXPWARN=${2:-604800} # One week by default
EXPCRIT=${3:-172800} # Two days by default
openssl x509 -in ${CERT} -checkend 0 | grep 'not expire' -q
RC=$?
if [ "$RC" -ne "0" ]; then
echo "CRIT: ${CERT} is expired"
exit 2
fi
openssl x509 -in ${CERT} -checkend ${EXPCRIT} | grep 'not expire' -q
RC=$?
if [ "$RC" -ne "0" ]; then
echo "CRIT: ${CERT} will be expired soon"
exit 1
fi
openssl x509 -in ${CERT} -checkend ${EXPWARN} | grep 'not expire' -q
RC=$?
if [ "$RC" -ne "0" ]; then
echo "WARN: ${CERT} needs to be rotated"
exit 0
else
RES=$(awk "BEGIN{print ${EXPWARN}/86400}")
echo "OK: ${CERT} won't expire in ${RES} days"
exit 0
fi
| true
|
dfc57afeda876064538a21178de415bf784421b8
|
Shell
|
jfuku14/CMMPPT
|
/wit-sa-test/scripts/wit-test-case-save
|
UTF-8
| 888
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#------------------------------------------------------------------------------
# Script wit-test-case-save
#
# Saves the results of a single run of the WIT stand alone executable for test
# purposes.
# Log files are not saved.
# (CPLEX produces log files, whose contents can vary.
# By default, RTC ignores log files.)
#
# Parameters:
# $1: The subdirectory of $WIT_HOME/wit-sa-test/data in which the input
# data file, wit.data, is to be found.
# $2: The stem of the name of the parameters file.
#------------------------------------------------------------------------------
if [ ! -d $WIT_HOME/wit-sa-test/data/$1/$2/output-new ]
then
echo "Save not done: Directory data/$1/$2/output-new does not exist."
exit
fi
cd $WIT_HOME/wit-sa-test/data/$1/$2
rm -r output-new/*.log 2> /dev/null
rm -r output-sav 2> /dev/null
mv output-new output-sav
| true
|
26900f0a5c8e9843c098d46fc59ad046e724ddbd
|
Shell
|
jampaniuday/scripts-2
|
/oracle/ETL/backup_dp.sh
|
UTF-8
| 2,697
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
#INITIALIZE EVIRONMENT
#ORACLE_SID=orcl
#ORAENV_ASK=NO
#. oraenv
. /home/oracle/.bash_profile
#source backup.conf
#CONTENTS_METADATA=
CONTENTS_ALL=SCHEMA1,SCHEMA2
#TABLE_ONLY=REPORT.WD_SQLSTORE
DATE=$(date +\%Y-\%m-\%d)
ORA_DIR=BACKUP
DIRECTORY=$(sqlplus -S / as sysdba <<EOF
set head off
select directory_path from dba_directories where directory_name='$ORA_DIR';
exit
EOF
)
#### SETTINGS FOR ROTATED BACKUPS ####
# Which day to take the weekly backup from (1-7 = Monday-Sunday)
DAY_OF_WEEK_TO_KEEP=5
# Number of days to keep daily backups
DAYS_TO_KEEP=7
# How many weeks to keep weekly backups
WEEKS_TO_KEEP=4
function perform_backups()
{
SUFFIX=$1
ORADIR=$DATE"-"$SUFFIX
FINAL_BACKUP_DIR=$DIRECTORY/$ORADIR/
#MAKE DIRECTORY
if ! mkdir -p $FINAL_BACKUP_DIR; then
echo "Cannot create backup directory in $FINAL_BACKUP_DIR. Go and fix it!" 1>&2
exit 1;
fi;
#EXPORT ONLY METADATA
#if [ ! -z "$CONTENTS_METADATA" ]; then
# for SCHEMA in ${CONTENTS_METADATA//,/ }
# do
# expdp "'/ as sysdba'" schemas=$SCHEMA dumpfile=$SCHEMA directory=$ORA_DIR logfile=$SCHEMA reuse_dumpfiles=yes #compression=all content=metadata_only
# mv $DIRECTORY/$SCHEMA.* $FINAL_BACKUP_DIR/
# perform_logging
# done
#fi
#EXPORT DATA ALONG WITH METADATA
if [ ! -z "$CONTENTS_ALL" ]; then
for SCHEMA in ${CONTENTS_ALL//,/ }
do
expdp "'/ as sysdba'" schemas=$SCHEMA dumpfile=$SCHEMA directory=$ORA_DIR logfile=$SCHEMA reuse_dumpfiles=yes compression=all
mv $DIRECTORY/$SCHEMA.* $FINAL_BACKUP_DIR/
perform_logging
done
fi
#EXPORT 1 TABLE
#if [ ! -z "$TABLE_ONLY" ]; then
# for SCHEMA in ${TABLE_ONLY//,/ }
# do
# expdp "'/ as sysdba'" tables=$SCHEMA dumpfile=$SCHEMA.dmp logfile=$SCHEMA.log directory=BACKUP reuse_dumpfiles=yes #compression=all
# mv $DIRECTORY/$SCHEMA.* $FINAL_BACKUP_DIR/
# perform_logging
# done
#fi
}
#LOGGING FUNCTION. To be called inside backup function.
function perform_logging()
{
cd $FINAL_BACKUP_DIR
ERRORS=$(grep -c ORA- $SCHEMA.log)
sqlplus / as sysdba <<EOF
delete from log.export_log where tag='$SUFFIX' and sch='$SCHEMA';
insert into log.EXPORT_LOG values ('$DATE','$SCHEMA','$ERRORS','$SUFFIX');
commit;
EOF
}
### PERFORM BACKUP ITSELF ###
# WEEKLY BACKUPS
DAY_OF_WEEK=`date +%u` #1-7 (Monday-Sunday)
EXPIRED_DAYS=`expr $((($WEEKS_TO_KEEP * 7) + 1))`
if [ $DAY_OF_WEEK = $DAY_OF_WEEK_TO_KEEP ];
then
# Delete all expired weekly directories
find $DIRECTORY -maxdepth 1 -mtime +$EXPIRED_DAYS -name "*-weekly" -exec rm -rf '{}' ';'
perform_backups "weekly"
exit 0;
fi
# DAILY BACKUPS
# Delete daily backups 7 days old or more
find $DIRECTORY -maxdepth 1 -mtime +$DAYS_TO_KEEP -name "*-daily" -exec rm -rf '{}' ';'
perform_backups "daily"
| true
|
4a059486189a27430675b58205c413455ef5c346
|
Shell
|
lejacobs/ohayo
|
/cgi/wakeup.sh
|
UTF-8
| 2,585
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# We set the default variables
startvolume="0"
endvolume="60"
fade="60" # The duration in seconds of the fading
changeplaylist="false"
random="false" # Toggles the random order
remote="false" # If true starts anyremote
remotecfg="$HOME""/Extra/Anyremote/Server-mode/mpd.cfg"
playlist="http://provisioning.streamtheworld.com/pls/KDFCFM.pls"
# We cycle through the options and modify the variables accordingly
echo "$remotecfg"
displayhelp() {
echo "Usage: wakeup.sh [OPTIONS]..."
echo "Starts MPD with a gentle fading, which is useful to wake up nicely."
echo "It should be added to cron to wake up at a fixed time."
echo "By default it activates random playing in MPD (see below)."
echo ""
echo -e "-s, --startvolume \t\t VOL sets the initial volume to VOL (default=20)"
echo -e "-e, --endvolume VOL \t\t sets the final volume to VOL (default=80)"
echo -e "-f, --fade TIME \t\t sets the fading duration to TIME seconds (default=150)"
echo -e "-p, --playlist PLAYLIST \t before starting MPD, replaces the current playlist with PLAYLIST. If no PLAYLIST is given, it uses the default \"Sveglia\""
echo -e "-r, --remote CFG \t\t before starting MPD, activates anyremote with the config file CFG. If no CFG is given, it uses the default \$HOME/Extra/Anyremote/Server-mode/mpd.cfg"
echo -e "-n, --norandom \t\t\t deactivates the MPD random mode"
echo -e "-h, --help \t\t\t displays this help and exits"
}
while [ "$1" != "" ]; do
case "$1" in
-h | --help )
displayhelp
exit;;
-s | --startvolume )
shift
startvolume="$1"
shift;;
-e | --endvolume )
shift
endvolume="$1"
shift;;
-f | --fade )
shift
fade="$1"
shift;;
-p | --playlist )
shift
changeplaylist="true"
if [ ! -z "$1" ] && [ "$(echo "$1" | cut -b 1 )" != "-" ]; then
playlist="$1"
shift
fi;;
-r | --remote )
shift
remote="true"
if [ ! -z "$1" ] && [ "$(echo "$1" | cut -b 1 )" != "-" ]; then
remotecfg="$1"
shift
fi;;
-n | --norandom )
shift
random="false";;
* )
shift;;
esac
done
deltavol=$[endvolume-startvolume]
echo "$deltavol"
step=$[fade/deltavol]
#if [ $changeplaylist = "true" ]; then
#set the playlist each time
mpc clear
mpc load "$playlist"
#fi
if [ $random = "true" ]; then
mpc random on
else
mpc random off
fi
if [ "$remote" = "true" ]; then
anyremote -f "$remotecfg" &
fi
#mpc volume does not work
#mpc volume $startvolume
sudo amixer -q sset Headphone $startvolume
mpc play
i="0"
while [ "$i" -lt "$deltavol" ]; do
sleep "$step"
sudo amixer -q sset Headphone 1+
i=$[i+1]
done
| true
|
03ffedd62820ede1f1d16f3351e0959f92abfcea
|
Shell
|
OmkarSsawant/bash-scripter
|
/beg.sh
|
UTF-8
| 1,181
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
#BASCICS
#VARIABLE INPUT AND UNSET
#VAR="YOUR NAME IS"
#echo "Enter Your name :"
#read NAME
#NAME="HACKED"
#echo "$VAR $NAM"
#unset NAME VAR
# SPECIAL CHARACTERS
#FILE_NAME=$0
#ARG_1=$1
#ARG_2=$2
#...ARG_N=$N
#ARG_LEN=$#
#DOUBLE_QUOTED_ARGS=$*
#INDIVIDUAL_DOUBLE_QUOTED=$@
#EXIT_STATUS_LAST_CMD=$?
#PROCESS_NUM_CUR_SHELL=$$
#echo "FILE_NAME : $FILE_NAME"
#echo "ARG_1 : $ARG_1"
#echo "ARG_2 : $ARG_2"
#echo "ARG_LEN : $ARG_LEN"
#echo "DOUBLE_QUOTED_ARGS= $DOUBLE_QUOTED_ARGS"
#echo "INDIVIDUAL_DOUBLE_QUOTED : $INDIVIDUAL_DOUBLE_QUOTED"
#echo "EXIT_STATUS_LAST_CMD : $EXIT_STATUS_LAST_CMD"
#echo "PROCESS_NUM_CUR_SHELL : $PROCESS_NUM_CUR_SHELL"
# FOR LOOP
#for ARG in $*
#do
# echo "$ARG"
#done
#for ARG in $@
#do
# echo "$ARG"
#done
#for var in 0 1 2 3 4 5 6 7 8 9
#do
# echo "$var"
#done
#WHILE LOOP
#a=0
#while [ $a -lt 10 ]
#do
echo "$a"
a=`expr $a + 1`
#done
#until [ ! $a -lt 10 ]
#do
# echo "$a"
# a=`expr $a + 1`
#done
# NESTED_LOOP
#while [ $a -lt $1 ]
#do
# b="$a"
# while [ "$b" -ge 0 ]
# do
# echo -n " $b "
# b=`expr $b - 1`
# done
# echo
# a=`expr $a + 1`
#done
for i in {1...100}
do
echo "$i"
done
| true
|
2c05a2869f6211179fb0ba8f4b9cde1d57102739
|
Shell
|
MaxiLund/auto-sklearn
|
/ci_scripts/install.sh
|
UTF-8
| 1,078
| 2.90625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
pip --version
# Install general requirements the way setup.py suggests
pip install pytest pep8 codecov pytest-cov flake8
# Install the packages in the correct order specified by the requirements.txt file
cat requirements.txt | xargs -n 1 -L 1 pip install
# Debug output to know all exact package versions!
pip freeze
if [[ "$TEST_DIST" == "true" ]]; then
pip install twine
python setup.py sdist
# Find file which was modified last as done in https://stackoverflow.com/a/4561987
dist=`find dist -type f -printf '%T@ %p\n' | sort -n | tail -1 | cut -f2- -d" "`
echo "Installing $dist"
pip install "$dist"
twine_output=`twine check "$dist"`
if [[ "$twine_output" != "Checking $dist: PASSED" ]]; then
echo $twine_output
exit 1
else
echo "Check with Twine: OK: $twine_output"
fi
else
python setup.py check -m -s
python setup.py install
fi
# Install openml dependency for metadata generation unittest
pip install openml
mkdir ~/.openml
echo "apikey = 610344db6388d9ba34f6db45a3cf71de" > ~/.openml/config
| true
|
be135950d530c1632dd158534e69bbe11b0519fb
|
Shell
|
tarantool/tarantool-python
|
/tarantool/msgpack_ext/types/timezones/gen-timezones.sh
|
UTF-8
| 2,254
| 3.578125
| 4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -xeuo pipefail
SRC_COMMIT="9ee45289e01232b8df1413efea11db170ae3b3b4"
SRC_FILE=timezones.h
DST_FILE=timezones.py
[ -e ${SRC_FILE} ] && rm ${SRC_FILE}
wget -O ${SRC_FILE} \
https://raw.githubusercontent.com/tarantool/tarantool/${SRC_COMMIT}/src/lib/tzcode/timezones.h
# We don't need aliases in indexToTimezone because Tarantool always replace it:
#
# tarantool> T = date.parse '2022-01-01T00:00 Pacific/Enderbury'
# ---
# ...
# tarantool> T
# ---
# - 2022-01-01T00:00:00 Pacific/Kanton
# ...
#
# So we can do the same and don't worry, be happy.
cat <<EOF > ${DST_FILE}
"""
Tarantool timezone info. Automatically generated by
\`\`gen-timezones.sh\`\`.
"""
# pylint: disable=too-many-lines
TZ_UTC = 0x01
TZ_RFC = 0x02
TZ_MILITARY = 0x04
TZ_AMBIGUOUS = 0x08
TZ_NYI = 0x10
TZ_OLSON = 0x20
TZ_ALIAS = 0x40
TZ_DST = 0x80
indexToTimezone = {
EOF
grep ZONE_ABBREV ${SRC_FILE} | sed "s/ZONE_ABBREV( *//g" | sed "s/[),]//g" \
| awk '{printf(" %s: %s,\n", $1, $3)}' >> ${DST_FILE}
grep ZONE_UNIQUE ${SRC_FILE} | sed "s/ZONE_UNIQUE( *//g" | sed "s/[),]//g" \
| awk '{printf(" %s: %s,\n", $1, $2)}' >> ${DST_FILE}
cat <<EOF >> ${DST_FILE}
}
timezoneToIndex = {
EOF
grep ZONE_ABBREV ${SRC_FILE} | sed "s/ZONE_ABBREV( *//g" | sed "s/[),]//g" \
| awk '{printf(" %s: %s,\n", $3, $1)}' >> ${DST_FILE}
grep ZONE_UNIQUE ${SRC_FILE} | sed "s/ZONE_UNIQUE( *//g" | sed "s/[),]//g" \
| awk '{printf(" %s: %s,\n", $2, $1)}' >> ${DST_FILE}
grep ZONE_ALIAS ${SRC_FILE} | sed "s/ZONE_ALIAS( *//g" | sed "s/[),]//g" \
| awk '{printf(" %s: %s,\n", $2, $1)}' >> ${DST_FILE}
cat <<EOF >> ${DST_FILE}
}
timezoneAbbrevInfo = {
EOF
grep ZONE_ABBREV ${SRC_FILE} | sed "s/ZONE_ABBREV( *//g" | sed "s/[),]//g" \
| awk '{printf(" %s: {\"offset\": %d, \"category\": %s},\n", $3, $2, $4)}' | sed "s/|/ | /g" >> ${DST_FILE}
echo "}" >> ${DST_FILE}
rm timezones.h
python <<EOF
import pytz
from timezones import timezoneToIndex, timezoneAbbrevInfo
if __name__ != '__main__':
raise RuntimeError('Import not expected')
for timezone in timezoneToIndex.keys():
if timezone in pytz.all_timezones:
continue
if not timezone in timezoneAbbrevInfo:
raise KeyError(f'Unknown Tarantool timezone {timezone}')
EOF
| true
|
81826cfad36075ab69edbb6e74b6c61cdaebab8b
|
Shell
|
stiles69/bin
|
/Automation_Custom_Script.sh
|
UTF-8
| 1,589
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#===============================================================================
#
# FILE: Automation_Custom_Script.sh
#
# USAGE: ./Automation_Custom_Script.sh
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Brett Salemink (), admin@roguedesigns.us
# ORGANIZATION: Rogue Designs
# CREATED: 07/15/2018 10:04
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
function Main ()
{
echo "Setting up Hosts file."
export MYTEMP="$(mktemp -d /tmp/XXXXXXXX)"
touch $MYTEMP/hosts
MYHOST="$MYTEMP/hosts"
echo "# Static table lookup for hostnames." > $MYHOST
echo "# See hosts(5) for details." >> $MYHOST
echo "127.0.0.1 localhost" >> $MYHOST
echo "127.0.1.1 manjaro.roguedesigns.us manjaro" >> $MYHOST
echo "10.0.0.5 dietpi.roguedesigns.us slave1" >> $MYHOST
echo "10.0.0.6 slave2.roguedesigns.us slave2" >> $MYHOST
echo "10.0.0.11 manjaro.roguedesigns.us manjaro" >> $MYHOST
echo "10.0.0.12 slave3.roguedesigns.us slave3" >> $MYHOST
echo "10.0.0.14 master.roguedesigns.us master" >> $MYHOST
echo "::1 localhost ip6-localhost ip6-loopback" >> $MYHOST
echo "ff02::1 ip6-allnodes" >> $MYHOST
echo "ff02::2 ip6-allrouters" >> $MYHOST
cp $MYHOST /etc/hosts
echo "Finished hosts file"
echo "$(cat /etc/hosts)"
} # end Main
Main
#===EXIT===
exit 0
| true
|
db16a36418493f4b8812d1a8cc06baebedfe8352
|
Shell
|
naeramarth7/dotfiles
|
/zsh/.zprofile
|
UTF-8
| 634
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
if $ZSH_DEBUG; then; echo -en "$(gdate +%s.%N) > .zprofile start\r\n"; fi
# Load the shell dotfiles, and then some:
# * ~/exports/*.src can be used for extending `$PATH` or addding env variables.
for dir in $ZDOTDIR/{exports}; do
if [ -d $dir ]; then
for file in $dir/*.src; do
[ -r "$file" ] && [ -f "$file" ] && source "$file"
done
fi
done
unset dir
unset file
# Bind Alt+LeftArrow and Alt+RightArrow
bindkey "^[^[[C" forward-word
bindkey "^[^[[D" backward-word
# Add all ssh keys from keychain
ssh-add -A 2>/dev/null
if $ZSH_DEBUG; then; echo -en "$(gdate +%s.%N) > .zprofile end\r\n"; fi
| true
|
0877a08501b36d2777db6ad790d61147158360e8
|
Shell
|
ripa1993/tpch-spark
|
/bin/functions.sh
|
UTF-8
| 143
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function echo_and_run() { echo "$@" ; "$@" ; }
function get_start_ts() {
ts=`ssh ${MASTER} "date +%F-%T"`
echo $ts
}
| true
|
11939db402304b6ec8805d49bfc0de6c2166c086
|
Shell
|
freebsd/freebsd-ports
|
/sysutils/choria/files/choria-server.in
|
UTF-8
| 719
| 3.21875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# PROVIDE: choria_server
# REQUIRE: DAEMON
#
# Add the following lines to /etc/rc.conf to run the choria server:
#
# choria_server_enable (bool): Set it to "YES" to enable the choria server.
# Default is "NO".
# choria_server_options: Options to pass to the choria server
#
. /etc/rc.subr
name="choria_server"
rcvar=`set_rcvar`
pidfile="/var/run/${name}.pid"
load_rc_config ${name}
: ${choria_server_enable="NO"}
: ${choria_server_options="--config=%%PREFIX%%/etc/choria/server.conf"}
command=/usr/sbin/daemon
procname=%%PREFIX%%/bin/choria
command_args="-p ${pidfile} ${procname} server run ${choria_server_options}"
PATH="${PATH}:%%PREFIX%%/bin"
export GODEBUG=x509ignoreCN=0
run_rc_command "$1"
| true
|
97ed4da07d28a9a4410d23d63aea85cf0d15e8a7
|
Shell
|
isabella232/somerville-teacher-tool
|
/scripts/aws/base/clean_docker_remote.sh
|
UTF-8
| 493
| 3.03125
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
# Remote script to clear disk space inside the Docker filesystem.
echo "Cleaning Docker filesystem..."
echo "Cleaning images more than a week old..."
docker rmi $(docker images --no-trunc | egrep ' (weeks|months) ago' | tr -s ' ' | cut -d' ' -f3)
echo "Cleaning volumes..." # see http://blog.yohanliyanage.com/2015/05/docker-clean-up-after-yourself/
docker run -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/docker:/var/lib/docker --rm martin/docker-cleanup-volumes
echo "Done."
| true
|
593302d76b5f1a0f82c908c45416b9df85baaf4a
|
Shell
|
makkarpov/scalingua
|
/sbt-plugin/src/sbt-test/main/no-escape-unicode/verify-pot.sh
|
UTF-8
| 234
| 3.03125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ ! -f "target/scala-2.12/messages/test.pot" ]; then
echo "test.pot does not exists!" >&2
exit 1
fi
tail -n +2 "target/scala-2.12/messages/test.pot" > generated.pot
diff -u generated.pot messages.pot || exit 1
| true
|
32e48734d947167079d8ae4c50a031b7d01d2146
|
Shell
|
ProfessionalFarmer/lake
|
/mountain/mdk/mergeBed.sh
|
UTF-8
| 355
| 2.65625
| 3
|
[] |
no_license
|
#! /bin/bash
# 2018-01-06: change $1 to $@ variable
bedtools='/share/apps/bedtools2/bin/bedtools'
cat $@ | sort -V -k1,1 -k2,2n > bedtools.in.sorted.bed
# -o can be followed by distinct, sum, min, max, absmin, absmax, mean, median, collapse, count_distinct, count
$bedtools merge -i bedtools.in.sorted.bed -c 4 -o distinct
rm bedtools.in.sorted.bed
| true
|
1b9051b2b393db013ac5e519d913c30b31a6e402
|
Shell
|
ruflin/apm-python
|
/travis/run_tests.sh
|
UTF-8
| 959
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
PYTHON_MAJOR_VERSION=$(python -c "import sys; print(sys.version_info[0])");
mkdir -p $PIP_CACHE
mkdir -p wheelhouse
psql -c 'create database opbeat_test;' -U postgres
pip install -U pip
pip install -r test_requirements/requirements-${WEBFRAMEWORK}.txt --cache-dir ${PIP_CACHE}
pip install -r test_requirements/requirements-python-${PYTHON_MAJOR_VERSION}.txt --cache-dir ${PIP_CACHE}
if [[ $TRAVIS_PYTHON_VERSION == '3.5' ]]; then
pip install -r test_requirements/requirements-asyncio.txt --cache-dir ${PIP_CACHE};
fi
if [[ $TRAVIS_PYTHON_VERSION != 'pypy' ]]; then
pip install -r test_requirements/requirements-cpython.txt --cache-dir ${PIP_CACHE};
if [[ $PYTHON_MAJOR_VERSION == '2' ]]; then
pip install -r test_requirements/requirements-zerorpc.txt --cache-dir ${PIP_CACHE};
fi
fi
if [[ $TRAVIS_PYTHON_VERSION == 'pypy' ]]; then
pip install -r test_requirements/requirements-pypy.txt --cache-dir ${PIP_CACHE};
fi
make test
| true
|
0786349eb9a6fbf2609a4f8cb809cc26d3c2c55b
|
Shell
|
map-dcomp/map-code
|
/src/MAP-ChartGeneration/scripts/batch_charts_with_load_latency_5/generate_service_ncp_demand_load_cap_req_plot.sh
|
UTF-8
| 31,309
| 3.0625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#BBN_LICENSE_START -- DO NOT MODIFY BETWEEN LICENSE_{START,END} Lines
# Copyright (c) <2017,2018,2019,2020,2021>, <Raytheon BBN Technologies>
# To be applied to the DCOMP/MAP Public Source Code Release dated 2018-04-19, with
# the exception of the dcop implementation identified below (see notes).
#
# Dispersed Computing (DCOMP)
# Mission-oriented Adaptive Placement of Task and Data (MAP)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#BBN_LICENSE_END
#!/bin/bash
debug() { ! "${log_debug-false}" || log "DEBUG: $*" >&2; }
log() { printf '%s\n' "$*"; }
warn() { log "WARNING: $*" >&2; }
error() { log "ERROR: $*" >&2; }
fatal() { error "$*"; exit 1; }
try() { "$@" || fatal "'$@' failed"; }
mydir=$(cd "$(dirname "$0")" && pwd -L) || fatal "Unable to determine script directory"
cleanup() {
debug "In cleanup"
}
trap 'cleanup' INT TERM EXIT
var=${1:-default}
# check for ${word} in ${string}
test "${string#*$word}" != "$string" && echo "$word found in $string"
#configuration
folder=$1
scenario_run_name=$2
file_base="service_ncp_load_cap_req"
file_base_dns="service_dns_response"
file_base_load_latency="service_load_latency"
title="Load, Allocated Capacity, and Requests Results with overlayed Client Demand for a Service"
title_dns="DNS Response for a Service"
title_load_latency="Load and Processing Latency for a Service"
load_unit="TASK_CONTAINERS"
gnu_file="${folder}/${file_base}.gnu"
eps_file="${folder}/${file_base}"
eps_file_dns="${folder}/${file_base_dns}"
eps_file_load_latency="${folder}/${file_base_load_latency}"
echo "Making NCP load plot for ${file_path}"
# create GNU plot file
cat > ${gnu_file} <<- EOM
set terminal eps enhanced truecolor font 'Arial,11' dashed size 10,8
set datafile separator ","
# macros
set macro
str2num(a)=(a+0)
num2str(a)=sprintf("%f", a+0)
# service, client, region, node
service_name(long_name)=system("name='" . long_name . "' && name=\${name#*ncp*-} && name=\${name#*requests*-} && name=\${name#*dns_req_count*_} && name=\${name#*processing_latency-*-} && name=\${name#*processing_latency-} && name=\${name#*map} && name=\${name%.csv} && name=\${name%-${load_unit}} && printf \${name}")
client_name(long_name)=system("name='" . long_name . "' && name=\${name##*/} && echo \${name%-*.csv}")
region_name(file)=system(" echo '".file."' | sed -E 's/.*processing_latency-|-.*.csv//g'") #system(" echo '".file."' | sed -e \"s/.*processing_latency-\|-.*\.csv//g\"")
# colors
get_node_color_component(node, component)=(system("./node_colors.sh " . node . " " . component) + 0.0)
get_node_color(node)=hsv2rgb(get_node_color_component(node,'h')/100.0,get_node_color_component(node,'s')/100.0,get_node_color_component(node,'v')/100.0)
get_node_color_v(node,variance)=hsv2rgb(get_node_color_component(node,'h')/100.0,get_node_color_component(node,'s')/100.0,variance/100.0)
# time
convert_time_unit(time)=(time / 1000) # convert time from milliseconds to seconds
convert_time(time)=convert_time_unit(time - time_offset) # offset and convert time from milliseconds to seconds
# data columns
column_head(file, col)=system("cat ".file." | head -n 1 | sed -e 's/\r//g' | tr ',' '\n' | head -n ".col." | tail -n 1")
is_column_head_name(file, col)=system("cat ".file." | head -n 1 | sed -e 's/\r//g' | tr ',' '\n' | grep " . col . " | wc -l")
n_columns(file)=(system("cat ".file." | head -n 1 | sed -e 's/\r//g' | tr ',' '\n' | wc -l") + 0)
col_sum(a, b)=(sum [c=a:b] (valid(c) ? column(c) : 0))
# agent configuration
get_agent_config_value(property, default)=(val="".system("cat ${folder}/../scenario/agent-configuration.json 2> /dev/null | grep '\"".property."\" : ' | sed -E s/'.* : |,.*'//g"), (val != "" ? val : default))
get_agent_config_value_as_string(property, default)=system("echo '" . (get_agent_config_value(property, default)) . "' | sed -E s/'^\"|\"$'//g")
get_agent_config_value_as_num(property, default)=(get_agent_config_value_as_string(property, default) + 0)
# macros for accessing particular types of data files
n_demand_files_by_client=system("ls ${folder}/client_demand/client_*-${load_unit}.csv 2> /dev/null | wc -l")
n_demand_files_by_service=system("ls ${folder}/client_demand/service_*-${load_unit}.csv 2> /dev/null | wc -l")
n_demand_reported_files=system("ls ${folder}/load/ncp_demand-*-${load_unit}.csv 2> /dev/null | wc -l")
n_load_files=system("ls ${folder}/load/ncp_load-*-${load_unit}.csv 2> /dev/null | wc -l")
n_allocated_capacity_files=system("ls ${folder}/load/ncp_allocated_capacity-*-${load_unit}.csv 2> /dev/null | wc -l")
n_requests_results_count_files=system("ls ${folder}/requests_results/binned_request_count-*.csv 2> /dev/null | wc -l")
n_requests_results_load_files=system("ls ${folder}/requests_results/binned_request_load-*-${load_unit}.csv 2> /dev/null | wc -l")
demand_file_by_client(f)=system("ls ${folder}/client_demand/client_*-${load_unit}.csv 2> /dev/null | head -n " . f . " | tail -n 1")
demand_file_by_service(f)=system("ls ${folder}/client_demand/service_*-${load_unit}.csv 2> /dev/null | head -n " . f . " | tail -n 1")
demand_reported_file(f)=system("ls ${folder}/load/ncp_demand-*-${load_unit}.csv 2> /dev/null | head -n " . f . " | tail -n 1")
load_file(f)=system("ls ${folder}/load/ncp_load-*-${load_unit}.csv 2> /dev/null | head -n " . f . " | tail -n 1")
allocated_capacity_file(f)=system("ls ${folder}/load/ncp_allocated_capacity-*-${load_unit}.csv 2> /dev/null | head -n " . f . " | tail -n 1")
requests_results_count_file(f)=system("ls ${folder}/requests_results/binned_request_count-*.csv 2> /dev/null | head -n " . f . " | tail -n 1")
requests_results_load_file(f)=system("ls ${folder}/requests_results/binned_request_load-*-${load_unit}.csv 2> /dev/null | head -n " . f . " | tail -n 1")
n_dns_req_files=system("ls ${folder}/dns/dns_req_count_*.csv 2> /dev/null | grep -v -E '.*--.*' | wc -l")
dns_req_file(f)=system("ls ${folder}/dns/dns_req_count_*.csv 2> /dev/null | grep -v -E '.*--.*' | head -n " . f . " | tail -n 1")
dns_req_file_get_n_regions(file)=system("head -n 1 ".file." | tr ',' '\n' | tail -n +2 | grep -v -E '.-.*_' | wc -l")
dns_req_file_get_n_containers(file)=system("head -n 1 ".file." | tr ',' '\n' | tail -n +2 | grep -E '.-.*_' | wc -l")
n_server_latency_files=system("ls ${folder}/latency_dns/server_processing_latency-*.csv 2> /dev/null | wc -l")
n_region_server_latency_files(service)=system("ls ${folder}/latency_dns/server_processing_latency-*-".service.".csv 2> /dev/null | wc -l")
n_binned_server_latency_count_files=system("ls ${folder}/latency_dns/binned_server_processing_latency_counts-*.csv 2> /dev/null | wc -l")
server_latency_file(f)=system("ls ${folder}/latency_dns/server_processing_latency-*.csv 2> /dev/null | head -n " . f . " | tail -n 1")
region_server_latency_file(f,service)=system("ls ${folder}/latency_dns/server_processing_latency-*-".service.".csv 2> /dev/null | head -n " . f . " | tail -n 1")
binned_region_server_latency_count_file(f)=system("ls ${folder}/latency_dns/binned_server_processing_latency_counts-*.csv 2> /dev/null | head -n " . f . " | tail -n 1")
n_client_latency_files=system("ls ${folder}/latency_dns/client_processing_latency-*.csv 2> /dev/null | wc -l")
client_latency_file(f)=system("ls ${folder}/latency_dns/client_processing_latency-*.csv 2> /dev/null | head -n " . f . " | tail -n 1")
ncps = 0
services = 0
clients = 0
do for [f=1:n_load_files] {
stats load_file(f) skip 1 nooutput
cols = floor(STATS_columns)
ncps = (cols-1 > ncps ? cols-1 : ncps)
}
services = (n_load_files > services ? n_load_files : services)
services = (n_allocated_capacity_files > services ? n_allocated_capacity_files : services)
clients = n_demand_files_by_client
print "Services: " . services
print "NCPS: " . ncps
print "Clients: " . clients
# preprocess data to establish scales
set xrange [*:*]
set yrange [*:*]
x_offset = 0
x_scale_min = -1
x_scale_max = 0
y_scale_min = 0
y_scale_max = 0
y_scale_max_demand = 0
y_scale_max_load = 0
y_scale_max_allocated_capacity = 0
y_scale_max_processing_latency = 0
y_scale_max_mean_processing_latency = 0
y_scale_max_processing_latency_bin_count = 0
y_scale_max_requests_results = 0
# demand files
do for [f=1:n_demand_files_by_service] {
stats demand_file_by_service(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
do for [s=1:services] {
stats demand_file_by_service(f) skip 1 nooutput
cols = floor(STATS_columns)
cols=n_columns(demand_file_by_service(f))
stats demand_file_by_service(f) using (col_sum(2,cols)) nooutput
min_value = STATS_min
max_value = STATS_max
y_scale_max_demand = (max_value > y_scale_max_demand ? max_value : y_scale_max_demand)
}
}
# load and capacity files
do for [service=1:services] {
# load
f=service
stats load_file(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
cols=n_columns(load_file(f))
stats load_file(f) using (col_sum(2,cols)) nooutput
min_value = STATS_min
max_value = STATS_max
y_scale_max_load = (max_value > y_scale_max_load ? max_value : y_scale_max_load)
x_scale_min = (min_time < x_scale_min | x_scale_min == -1 ? min_time : x_scale_min)
x_scale_max = (max_time > x_scale_max ? max_time : x_scale_max)
# capacity
f=service
stats allocated_capacity_file(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
x_scale_min = (min_time < x_scale_min | x_scale_min == -1 ? min_time : x_scale_min)
x_scale_max = (max_time > x_scale_max ? max_time : x_scale_max)
cols=n_columns(allocated_capacity_file(f))
stats allocated_capacity_file(f) using (col_sum(2,cols)) nooutput
min_value = STATS_min
max_value = STATS_max
y_scale_max_allocated_capacity = (max_value > y_scale_max_allocated_capacity ? max_value : y_scale_max_allocated_capacity)
if (n_requests_results_count_files > 0 && n_requests_results_load_files > 0) {
# requests results
f=service
stats requests_results_count_file(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
stats requests_results_count_file(f) using 2 nooutput
min_value = STATS_min
max_value = STATS_max
x_scale_min = (min_time < x_scale_min | x_scale_min == -1 ? min_time : x_scale_min)
x_scale_max = (max_time > x_scale_max ? max_time : x_scale_max)
y_scale_max_requests_results = (max_value > y_scale_max_requests_results ? max_value : y_scale_max_requests_results)
stats requests_results_load_file(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
stats requests_results_load_file(f) using 2 nooutput
min_value = STATS_min
max_value = STATS_max
x_scale_min = (min_time < x_scale_min | x_scale_min == -1 ? min_time : x_scale_min)
x_scale_max = (max_time > x_scale_max ? max_time : x_scale_max)
y_scale_max_requests_results = (max_value > y_scale_max_requests_results ? max_value : y_scale_max_requests_results)
}
# server procesing latency
f=service
stats server_latency_file(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
stats server_latency_file(f) using (column('latency')) nooutput
min_value = STATS_min
max_value = STATS_max
mean_value = STATS_mean
y_scale_max_mean_processing_latency = (mean_value > y_scale_max_mean_processing_latency ? mean_value : y_scale_max_mean_processing_latency)
y_scale_max_processing_latency = (max_value > y_scale_max_processing_latency ? max_value : y_scale_max_processing_latency)
x_scale_min = (min_time < x_scale_min | x_scale_min == -1 ? min_time : x_scale_min)
x_scale_max = (max_time > x_scale_max ? max_time : x_scale_max)
# server procesing latency count
f=service
stats binned_region_server_latency_count_file(f) using 1 nooutput
min_time = STATS_min
max_time = STATS_max
x_scale_min = (min_time < x_scale_min | x_scale_min == -1 ? min_time : x_scale_min)
x_scale_max = (max_time > x_scale_max ? max_time : x_scale_max)
cols=n_columns(binned_region_server_latency_count_file(f))
do for [col=2:cols] {
stats binned_region_server_latency_count_file(f) using (column(col)) nooutput
min_value = STATS_min
max_value = STATS_max
y_scale_max_processing_latency_bin_count = (max_value > y_scale_max_processing_latency_bin_count ? max_value : y_scale_max_processing_latency_bin_count)
}
}
x_scale_min = (x_scale_min >= 0 ? x_scale_min : 0)
x_offset = x_scale_min
x_scale_min = x_scale_min - x_offset
x_scale_max = ceil((x_scale_max - x_offset) * 1.02)
y_scale_max = (y_scale_max_demand > y_scale_max ? y_scale_max_demand : y_scale_max)
y_scale_max = (y_scale_max_load > y_scale_max ? y_scale_max_load : y_scale_max)
y_scale_max = (y_scale_max_allocated_capacity > y_scale_max ? y_scale_max_allocated_capacity : y_scale_max)
#y_scale_max = (y_scale_max_requests_results > y_scale_max ? y_scale_max_demand : y_scale_max)
y_scale_max = y_scale_max * 1.1
rlg_overload_threshold=get_agent_config_value("rlgLoadThreshold", "0.0")
rlg_underload_ended_threshold=get_agent_config_value("rlgUnderloadEndedThreshold", "0.35")
rlg_underload_threshold=get_agent_config_value("rlgUnderloadThreshold", "0.25")
print "\n"
print sprintf("RLG Overload Threshold: %f", str2num(rlg_overload_threshold))
#print sprintf("RLG Underload Ended Threshold: %f", str2num(rlg_underload_ended_threshold))
#print sprintf("RLG Underload Threshold: %f", str2num(rlg_underload_threshold))
print sprintf("Max demand: %.2f", y_scale_max_demand)
print sprintf("Max load: %.2f", y_scale_max_load)
print sprintf("Max allocated capacity: %.2f", y_scale_max_allocated_capacity)
print sprintf("Max processing latency: %d", y_scale_max_processing_latency)
print sprintf("Max processing latency count: %d", y_scale_max_processing_latency_bin_count)
print sprintf("X scale: [%.0f, %.0f]", x_scale_min, x_scale_max)
print sprintf("Y scale: [%.2f, %.2f]", y_scale_min, y_scale_max)
print sprintf("X offset: %.0f", x_offset)
time_offset = x_offset
print("\n\n")
# configure chart parameters
unset xtics
set xtics rotate by -45 offset -1,-.5
set xtics auto
set ytics auto
set boxwidth 1
set grid y
set xrange [0:]
set yrange [0:]
#set offsets 0,0,1,0
set title "${title//_/\\\\_}\n${scenario_run_name//_/\\\\_}"
set xlabel "Time (s)"
set key center bottom box outside
set style fill solid 0.5 noborder
set style data lines
service_load_thresholds_plot_string=""
service_load_thresholds_plot_string=service_load_thresholds_plot_string . \
rlg_overload_threshold . "lw 3 lc rgb 'blue' t 'Overload (P+)', " . \
rlg_underload_threshold . "lw 3 lc rgb 'red' t 'Underload (P-)', " . \
rlg_underload_ended_threshold . "lw 2 lc rgb 'purple' t 'Underload Ended'"
do for [service=1:services] {
print("Service " . service)
#set xrange [x_scale_min:x_scale_max]
#set yrange[y_scale_min:]
set xrange [*:*]
set yrange [*:*]
# construct string for plot command
plot_string=""
# add demand lines
print(" Demand")
demand_line_string=""
service_demand_plot_string=""
service_all_demand_plot_string=""
service_all_demand_title_plot_string=""
f=service
do for [s=1:services] {
stats demand_file_by_service(f) skip 1 nooutput
cols = floor(STATS_columns)
if (s == service) {
demand_line_string = "demand_file_by_service(".f.") using (convert_time_unit(column(1))):(col_sum(2, ".cols.")) lc rgb 'black' lw 3 dt 1 t ('demand') with lines, "
service_demand_plot_string=service_demand_plot_string . demand_line_string
} else {
demand_line_string = "demand_file_by_service(".f.") using (convert_time_unit(column(1))):(col_sum(2, ".cols.")) lc rgb 'black' lw 3 dt 1 notitle with lines, "
}
service_all_demand_plot_string=service_all_demand_plot_string . demand_line_string
}
# add load stacked areas
print(" Load")
service_load_plot_string=""
load_service_names=""
f=service
stats load_file(f) skip 1 nooutput
cols = floor(STATS_columns)
do for [col=2:cols] {
service_load_plot_string=service_load_plot_string . \
"load_file(".f.") using (convert_time(column(1))):(col_sum(".col.",".cols.")):(column(1)*0) lt rgb get_node_color(column_head(load_file(".f."),".col.")) lw 2 t (columnhead(".col.")) with filledcurves, " . \
"load_file(".f.") using (convert_time(column(1))):(col_sum(".col.",".cols.")) with linespoints lt rgb get_node_color(column_head(load_file(".f."),".col.")) pt (7+".f."-1) ps 0.3 notitle, "
}
load_service_names=load_service_names . (f > 1 ? "\n" : "") . service_name(load_file(f))
# add Demand Reported stacked areas
print(" Demand Reported")
service_demand_reported_plot_string=""
demand_reported_service_names=""
f=service
stats demand_reported_file(f) skip 1 nooutput
cols = floor(STATS_columns)
do for [col=2:cols] {
service_demand_reported_plot_string=service_demand_reported_plot_string . \
"demand_reported_file(".f.") using (convert_time(column(1))):(col_sum(".col.",".cols.")):(column(1)*0) lt rgb get_node_color(column_head(load_file(".f."),".col.")) lw 2 t (columnhead(".col.")) with filledcurves, " . \
"demand_reported_file(".f.") using (convert_time(column(1))):(col_sum(".col.",".cols.")) with linespoints lt rgb get_node_color(column_head(load_file(".f."),".col.")) pt (7+".f."-1) ps 0.3 notitle, "
}
demand_reported_service_names=demand_reported_service_names . (f > 1 ? "\n" : "") . service_name(demand_reported_file(f))
# add allocated capacity stacked areas
print(" Allocated Capacity")
service_allocated_capacity_plot_string=""
allocated_capacity_service_names=""
f=service
stats allocated_capacity_file(f) skip 1 nooutput
cols = floor(STATS_columns)
do for [col=2:cols] {
service_allocated_capacity_plot_string=service_allocated_capacity_plot_string . \
"allocated_capacity_file(".f.") using (convert_time(column(1))):(col_sum(".col.",".cols.")):(column(1)*0) lt rgb get_node_color(column_head(load_file(".f."),".col.")) lw 2 t (columnhead(".col.")) with filledcurves, " . \
"allocated_capacity_file(".f.") using (convert_time(column(1))):(col_sum(".col.",".cols.")) with linespoints lt rgb get_node_color(column_head(load_file(".f."),".col.")) pt (7+".f."-1) ps 0.3 notitle, "
}
allocated_capacity_service_names=allocated_capacity_service_names . (f > 1 ? "\n" : "") . service_name(allocated_capacity_file(f))
# add requests results lines
print(" Request Results")
requests_results_count_plot_string=""
requests_results_load_plot_string=""
requests_results_count_service_names=""
requests_results_load_service_names=""
if (n_requests_results_count_files > 0 && n_requests_results_load_files > 0) {
f=service
requests_results_count_plot_string=requests_results_count_plot_string . \
"requests_results_count_file(".f.") using (convert_time(column(1))):(column('succeeded')) axes x1y2 lw 1 lt 1 lc rgb 'blue' title 'Succeeded', " . \
"requests_results_count_file(".f.") using (convert_time(column(1))):(column('failed_for_server')) axes x1y2 lw 1 lt 1 lc rgb 'red' title 'Failed (Server)', " . \
"requests_results_count_file(".f.") using (convert_time(column(1))):(column('failed_for_network')) axes x1y2 lw 1 lt 1 lc rgb 'black' title 'Failed (Network)', "
requests_results_count_service_names=requests_results_count_service_names . (f > 1 ? "\n" : "") . service_name(requests_results_count_file(f))
requests_results_load_plot_string=requests_results_load_plot_string . \
"requests_results_load_file(".f.") using (convert_time(column(1))):(column('succeeded')) axes x1y2 lw 1 lt 1 lc rgb 'blue' title 'Demand Succeeded', " . \
"requests_results_load_file(".f.") using (convert_time(column(1))):(column('failed_for_server')) axes x1y2 lw 1 lt 1 lc rgb 'red' title 'Demand Failed (Server)', " . \
"requests_results_load_file(".f.") using (convert_time(column(1))):(column('failed_for_network')) axes x1y2 lw 1 lt 1 lc rgb 'black' title 'Demand Failed (Network)', "
requests_results_load_service_names=requests_results_load_service_names . (f > 1 ? "\n" : "") . service_name(requests_results_load_file(f))
}
# add DNS response counts
print(" DNS Response Counts")
dns_req_count_regions_plot_string=""
dns_req_count_containers_plot_string=""
dns_req_count_plot_service_names=""
f=service
file=dns_req_file(f)
n_regions=dns_req_file_get_n_regions(file)
n_containers=dns_req_file_get_n_containers(file)
print "DNS Response: " . file . " (" . n_regions . " regions, " . n_containers . " containers)"
do for [r=1:n_regions] {
col=r+1
dns_req_count_regions_plot_string=dns_req_count_regions_plot_string . \
"dns_req_file(".f.") using ((convert_time(column(1)))):(column(".col.")) with lines dt 1 lc ".col." t column(".col."), \n"
}
do for [c=1:n_containers] {
col=1+n_regions+c
dns_req_count_containers_plot_string=dns_req_count_containers_plot_string . \
"dns_req_file(".f.") using ((convert_time(column(1)))):(column(".col.")) with lines dt 1 lc ".col." t column(".col."), \n"
}
dns_req_count_plot_service_names=dns_req_count_plot_service_names . (f > 1 ? "\n" : "") . service_name(file)
# add client processing latency data
print(" Client Processing Latency Data")
client_request_latency_plot_string=""
client_request_latency_smooth_plot_string=""
client_request_latency_plot_service_names=""
f=service
file=client_latency_file(f)
client_request_latency_plot_string=client_request_latency_plot_string . \
"client_latency_file(".f.") using ((convert_time(column(1)))):(convert_time_unit(column('latency'))) lc 'gray' t 'Latency (raw)', \n"
client_request_latency_smooth_plot_string=client_request_latency_smooth_plot_string . \
"client_latency_file(".f.") using ((convert_time(column(1)))):(convert_time_unit(column('latency'))) lw 5 lc 'red' smooth bezier t 'Latency (smooth)', \n"
client_request_latency_plot_service_names=client_request_latency_plot_service_names . (f > 1 ? "\n" : "") . service_name(file)
# add server processing latency data
print(" Server Processing Latency Data")
server_processing_latency_plot_string=""
server_processing_latency_smooth_plot_string=""
binned_processing_latency_count_plot_string=""
server_processing_latency_plot_service_names=""
f=service
file=server_latency_file(f)
service_name = service_name(file)
# server_processing_latency_plot_string=server_processing_latency_plot_string . \
# "server_latency_file(".f.") using ((convert_time(column(1)))):(convert_time_unit(column('latency'))) lc 'gray' t 'Latency (raw)', \n"
# server_processing_latency_smooth_plot_string=server_processing_latency_smooth_plot_string . \
# "server_latency_file(".f.") using ((convert_time(column(1)))):(convert_time_unit(column('latency'))) lw 5 lc 'red' smooth bezier t 'Latency (smooth)', \n"
server_processing_latency_plot_service_names=server_processing_latency_plot_service_names . (f > 1 ? "\n" : "") . service_name(file)
print "\n\n\n"
do for [r=1:n_region_server_latency_files(service_name)] {
col=r+1
file=region_server_latency_file(r,service_name)
region=region_name(file)
region_color=get_node_color(region)
region_color_smooth=get_node_color_v(region, 50)
server_processing_latency_plot_string=server_processing_latency_plot_string . \
"'" . file . "' using ((convert_time(column(1)))):(convert_time_unit(column('latency'))) lt rgb ".region_color." t 'Latency (raw) - Region ".region."', \n"
server_processing_latency_smooth_plot_string=server_processing_latency_smooth_plot_string . \
"'" . file . "' using ((convert_time(column(1)))):(convert_time_unit(column('latency'))) lw 5 lt rgb ".region_color_smooth." smooth bezier t 'Latency (smooth) - Region ".region."', \n"
if (is_column_head_name(binned_region_server_latency_count_file(f),region) > 0) {
binned_processing_latency_count_plot_string=binned_processing_latency_count_plot_string . \
"binned_region_server_latency_count_file(".f.") using (convert_time(column(1))):(column('".region."')) lt rgb get_node_color(column_head(binned_region_server_latency_count_file(".f."),".col.")) lw 2 t 'Region ".region."' with linespoints, "
}
print "Service: " . service_name . " Region: " . region . "\n (" . file . ")\n"
stats file using (column(1)):(column('latency'))
print "\n\n\n"
}
print("\n\n")
### Output chart for Load, Demand, Allocated Containers, and Requests Results
set output "${eps_file}-" . service . ".eps"
set multiplot layout 2,2 title "${title//_/\\\\_}\n${scenario_run_name//_/\\\\_}" font 'Garamond,14'
set xrange [convert_time_unit(x_scale_min):convert_time_unit(x_scale_max)]
set yrange [y_scale_min:*]
set key samplen 5 spacing 1.75font ",6" box opaque top right reverse
set key box width 1
set tmargin 5
set title load_service_names
set ylabel "Load (${load_unit})"
set key
eval "plot " . service_load_plot_string . service_demand_plot_string
set title demand_reported_service_names
set ylabel "Predicted Demand (${load_unit})"
set key
eval "plot " . service_demand_reported_plot_string . service_demand_plot_string
#if (strlen(requests_results_load_plot_string) > 0) {
set title requests_results_load_service_names
set ylabel "Request Demand (${load_unit})"
set key
eval "plot " . requests_results_load_plot_string . service_demand_plot_string
#}
set title allocated_capacity_service_names
set ylabel "Allocated Capacity (${load_unit})"
set key
eval "plot " . service_allocated_capacity_plot_string . service_all_demand_plot_string
unset multiplot
### Output chart for DNS response analysis
set output "${eps_file_dns}-" . service . ".eps"
set multiplot layout 3,1 title "${title_dns//_/\\\\_}\n${scenario_run_name//_/\\\\_}" font 'Garamond,14'
set xrange [convert_time_unit(x_scale_min):convert_time_unit(x_scale_max)]
set yrange [y_scale_min:]
set key samplen 5 spacing 1.75font ",6" box opaque top right reverse
set key box width 1
set key inside
set tmargin 5
set title allocated_capacity_service_names
set ylabel "Allocated Capacity\n(${load_unit})"
eval "plot " . service_allocated_capacity_plot_string . service_all_demand_plot_string
set title dns_req_count_plot_service_names
set ylabel "Number of DNS Responses\nto Region"
eval "plot " . dns_req_count_regions_plot_string
set title dns_req_count_plot_service_names
set ylabel "Number of DNS Responses\nto Container"
eval "plot " . dns_req_count_containers_plot_string
unset multiplot
### Output chart for DNS response analysis for regions
set output "${eps_file_dns}-reg-" . service . ".eps"
set multiplot layout 2,1 title "${title_dns//_/\\\\_}\n${scenario_run_name//_/\\\\_}" font 'Garamond,14'
set xrange [convert_time_unit(x_scale_min):convert_time_unit(x_scale_max)]
set yrange [y_scale_min:]
set key samplen 5 spacing 1.75font ",6" box opaque top right reverse
set key box width 1
set key inside
set tmargin 5
set title allocated_capacity_service_names
set ylabel "Allocated Capacity\n(${load_unit})"
eval "plot " . service_allocated_capacity_plot_string . service_all_demand_plot_string
set title dns_req_count_plot_service_names
set ylabel "Number of DNS Responses\nto Region"
eval "plot " . dns_req_count_regions_plot_string
unset multiplot
### Output chart for DNS response analysis for containers
set output "${eps_file_dns}-con-" . service . ".eps"
set multiplot layout 2,1 title "${title_dns//_/\\\\_}\n${scenario_run_name//_/\\\\_}" font 'Garamond,14'
set xrange [convert_time_unit(x_scale_min):convert_time_unit(x_scale_max)]
set yrange [y_scale_min:]
set key samplen 5 spacing 1.75font ",6" box opaque top right reverse
set key box width 1
set key inside
set tmargin 5
set title allocated_capacity_service_names
set ylabel "Allocated Capacity\n(${load_unit})"
eval "plot " . service_allocated_capacity_plot_string . service_all_demand_plot_string
set title dns_req_count_plot_service_names
set ylabel "Number of DNS Responses\nto Container"
eval "plot " . dns_req_count_containers_plot_string
unset multiplot
### Output chart for Load and Processing Latency
set output "${eps_file_load_latency}-" . service . ".eps"
set multiplot layout 4,1 title "${title_load_latency//_/\\\\_}\n${scenario_run_name//_/\\\\_}" font 'Garamond,14'
set xrange [convert_time_unit(x_scale_min):convert_time_unit(x_scale_max)]
set key samplen 5 spacing 1.75font ",6" box opaque top right noreverse
set key box width 1
set key top inside maxrows 1
set tmargin 5
set title load_service_names
set xlabel ""
set ylabel "Demand\n(${load_unit})"
set yrange [y_scale_min:y_scale_max_load*1.1]
#eval "plot " . service_demand_reported_plot_string
set title allocated_capacity_service_names
set xlabel
set ylabel "Allocated Capacity\n(${load_unit})"
set yrange [y_scale_min:y_scale_max_allocated_capacity*1.1]
eval "plot " . service_allocated_capacity_plot_string
set title load_service_names
set xlabel
set ylabel "Load\n(${load_unit})"
set yrange [y_scale_min:y_scale_max_load*1.1]
eval "plot " . service_load_plot_string . service_load_thresholds_plot_string
set title server_processing_latency_plot_service_names
set xlabel
set ylabel "Server Processing Latency (s)\n "
set yrange [convert_time_unit(y_scale_min):convert_time_unit(y_scale_max_mean_processing_latency*2)*1.1]
eval "plot " . server_processing_latency_plot_string . server_processing_latency_smooth_plot_string
set title server_processing_latency_plot_service_names
set xlabel "Time (s)"
set ylabel "Server Request Rate (requests/min) \n "
set yrange [0:y_scale_max_processing_latency_bin_count*1.1]
eval "plot " . binned_processing_latency_count_plot_string
set title client_request_latency_plot_service_names
set xlabel ""
set ylabel "Client Request Latency (s)\n "
set yrange [convert_time_unit(y_scale_min):convert_time_unit(y_scale_max_processing_latency)*1.1]
#eval "plot " . client_request_latency_plot_string . client_request_latency_smooth_plot_string
unset multiplot
}
print("\n\n\n")
EOM
# create eps plot file from gnu file and data
gnuplot "${gnu_file}"
if [ $? -ne 0 ]; then
warn "Plot failed"
else
log "Plot Succeeded"
fi
exit 0
| true
|
74bd04791e2b3148ff248cbdc85874bfaf3e0f05
|
Shell
|
DefaultValue/docker_infrastructure
|
/docker-image-build-and-push.sh
|
UTF-8
| 3,559
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# 1. Build development and production images
# 2. Run `DOCKERIZER magento:test dockerfiles`
# 3. Push images
# Check extensions list and custom PHP configuration file
# docker exec -it $(DOCKERIZER composition:get-container-name php) php -v
# docker exec -it $(DOCKERIZER composition:get-container-name php) php -r 'var_export(get_loaded_extensions());'
# docker exec -it $(DOCKERIZER composition:get-container-name php) php -r 'var_export(get_loaded_extensions(true));'
# docker exec -it $(DOCKERIZER composition:get-container-name php) cat /usr/local/etc/php/conf.d/docker-php-xxx-custom.ini
set -e
docker login -u defaultvalue -p
docker container prune -f
docker image prune -af
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/5.6/ || exit
docker build -t defaultvalue/php:5.6-production . -f production.Dockerfile
docker push defaultvalue/php:5.6-production
docker build -t defaultvalue/php:5.6-development . -f development.Dockerfile
docker push defaultvalue/php:5.6-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/7.0/ || exit
docker build -t defaultvalue/php:7.0-production . -f production.Dockerfile
docker push defaultvalue/php:7.0-production
docker build -t defaultvalue/php:7.0-development . -f development.Dockerfile
docker push defaultvalue/php:7.0-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/7.1/ || exit
docker build -t defaultvalue/php:7.1-production . -f production.Dockerfile
docker push defaultvalue/php:7.1-production
docker build -t defaultvalue/php:7.1-development . -f development.Dockerfile
docker push defaultvalue/php:7.1-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/7.2/ || exit
docker build -t defaultvalue/php:7.2-production . -f production.Dockerfile
docker push defaultvalue/php:7.2-production
docker build -t defaultvalue/php:7.2-development . -f development.Dockerfile
docker push defaultvalue/php:7.2-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/7.3/ || exit
docker build -t defaultvalue/php:7.3-production . -f production.Dockerfile
docker push defaultvalue/php:7.3-production
docker build -t defaultvalue/php:7.3-development . -f development.Dockerfile
docker push defaultvalue/php:7.3-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/7.4/ || exit
docker build -t defaultvalue/php:7.4-production . -f production.Dockerfile
docker push defaultvalue/php:7.4-production
docker build -t defaultvalue/php:7.4-development . -f development.Dockerfile
docker push defaultvalue/php:7.4-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/8.0/ || exit
docker build -t defaultvalue/php:8.0-production . -f production.Dockerfile
docker push defaultvalue/php:8.0-production
docker build -t defaultvalue/php:8.0-development . -f development.Dockerfile
docker push defaultvalue/php:8.0-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/8.1/ || exit
docker build -t defaultvalue/php:8.1-production . -f production.Dockerfile
docker push defaultvalue/php:8.1-production
docker build -t defaultvalue/php:8.1-development . -f development.Dockerfile
docker push defaultvalue/php:8.1-development
cd /home/maksymz/misc/apps/docker_infrastructure/templates/php/8.2/ || exit
docker build -t defaultvalue/php:8.2-production . -f production.Dockerfile
docker push defaultvalue/php:8.2-production
docker build -t defaultvalue/php:8.2-development . -f development.Dockerfile
docker push defaultvalue/php:8.2-development
| true
|
2410750d2f99f23d43c99d7093885fc2285a7a80
|
Shell
|
TheRockXu/deepdive
|
/database/test/tsv2tsj.bats
|
UTF-8
| 1,646
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
# Tests for tsv2tsj
load test_environ
load corner_cases
parse_each_tab_sep_json() {
tr '\t' '\n' |
tee /dev/stderr |
jq -c .
}
@test "tsv2tsj works" {
cd "$BATS_TEST_DIRNAME"
actual=$(eval keeping_output_of tsv2tsj $NastyTypes <<<"$NastyTSV" | parse_each_tab_sep_json)
diff -u <(echo "$NastyTSJ" | jq -c .) \
<(echo "$actual" | jq -c .) \
#
}
@test "tsv2tsj works with null in array" {
cd "$BATS_TEST_DIRNAME"
actual=$(eval keeping_output_of tsv2tsj $NullInArrayTypes <<<"$NullInArrayTSV" | parse_each_tab_sep_json)
diff -u <(echo "$NullInArrayTSJ" | jq -c .) \
<(echo "$actual" | jq -c .) \
#
}
@test "tsv2tsj works with nested array" {
skip "NOT SUPPORTED YET" # TODO
cd "$BATS_TEST_DIRNAME"
actual=$(eval keeping_output_of tsv2tsj $NestedArrayTypes <<<"$NestedArrayTSV" | parse_each_tab_sep_json)
diff -u <(echo "$NestedArrayTSJ" | jq -c .) \
<(echo "$actual" | jq -c .) \
#
}
@test "tsv2tsj works with unicode" {
cd "$BATS_TEST_DIRNAME"
actual=$(eval keeping_output_of tsv2tsj $UnicodeTypes <<<"$UnicodeTSV" | parse_each_tab_sep_json)
diff -u <(echo "$UnicodeTSJ" | jq -c .) \
<(echo "$actual" | jq -c .) \
#
}
@test "tsv2tsj works with timestamps" {
skip "NOT SUPPORTED YET" # TODO
cd "$BATS_TEST_DIRNAME"
actual=$(eval keeping_output_of tsv2tsj $TimestampTypes <<<"$TimestampTSV" | parse_each_tab_sep_json)
diff -u <(echo "$TimestampTSJ" | jq -c .) \
<(echo "$actual" | jq -c .) \
#
}
| true
|
b37b0b9df2231dc2b8193adaf2aebc55d0fe1a80
|
Shell
|
jgkennedy/openshift-tools
|
/docker/oso-rhel7-zagg-client/start.sh
|
UTF-8
| 515
| 2.96875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash -e
# set hostname to register against zabbix
echo 'setting hostname in ops-zagg-client'
CONTAINER_HOSTNAME=$(echo CTR-$(/usr/bin/pminfo -f kernel.uname.nodename | /usr/bin/awk -F \" /value/'{print $2}'))
sed -i -e "s/^ name:.*$/ name: $CONTAINER_HOSTNAME/" \
-e "s/^ host:.*$/ host: $ZAGG_SERVER/" \
-e "s/^ pass:.*$/ pass: $ZAGG_PASSWORD/" \
/etc/openshift_tools/zagg_client.yaml
echo
echo 'Starting crond'
echo '---------------'
exec /usr/sbin/crond -n
echo
| true
|
1c6d0f9b3f2c90d43e331a163c419c13bd481371
|
Shell
|
hackoregon/cropcompass-vagrant
|
/scripts/reload-django-code.bash
|
UTF-8
| 604
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
echo `date` "stopping app server"
sudo service uwsgi stop
echo `date` "backing up old cropcompass Django app"
export STAMP=`date -Iseconds`
cp -rp ~/cropcompass ~/cropcompass-${STAMP}
echo `date` "copying Django app from '/vagrant/django-app' to '~vagrant/cropcompass'"
diff -r /vagrant/django-app ~vagrant/cropcompass > ~vagrant/logs/${STAMP}.diff
cp -rp /vagrant/django-app/* ~vagrant/cropcompass/
echo `date` "running migrations"
cd ~vagrant/cropcompass
source ~vagrant/Env/cropcompass/bin/activate
python3 manage.py migrate
echo `date` "starting app server"
sudo service uwsgi start
| true
|
ea78eac6462c81ce6139d8b2a786019455d36c00
|
Shell
|
mateuszmidor/PythonStudy
|
/python-http-client-demo/run_all.sh
|
UTF-8
| 1,306
| 4.21875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
trap tearDown SIGINT
PYTHON=python3
PIP=pip3
function stage() {
BOLD_BLUE="\e[1m\e[34m"
RESET="\e[0m"
msg="$1"
echo
echo -e "$BOLD_BLUE$msg$RESET"
}
function checkPrerequsites() {
stage "Checking prerequisites"
command $PYTHON --version > /dev/null 2>&1
[[ $? != 0 ]] && echo "You need to install python3 to run this program" && exit 1
command $PIP --version > /dev/null 2>&1
[[ $? != 0 ]] && echo "You need to install pip3 to run this program" && exit 1
echo "Done"
}
function setupVirtualEnv() {
stage "Setting up virtualenv"
if [[ ! -d venv ]]; then
# install and initialize virtual env
sudo $PIP install -U virtualenv # system-wide install
virtualenv --system-site-packages -p $PYTHON ./venv
source ./venv/bin/activate
# install requirements into newly initialized virtualenv
$PIP install -r src/requirements.txt
else
# just activate virtualenv
source ./venv/bin/activate
fi
echo "Done"
}
function runProgram() {
stage "Running program"
$PYTHON src/main.py
echo "Done"
}
function tearDown() {
stage "Tear down"
deactivate # virtualenv
echo "Done"
}
checkPrerequsites
setupVirtualEnv
runProgram
tearDown
| true
|
775e1517094ad4accf395f7d38541fd23d321435
|
Shell
|
olecom/lftpd
|
/etc/rename_files_pre.sh
|
UTF-8
| 307
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#exec 2>put_debug
set -e -x
for f in *$1
do case $f in
"*"*) # no DATAEXT files, list DATAEXT PREEXT prefixed, if any
for f in *$1$2
do case $f in
"*"*) exit 1;;
*) echo "mv $f ${f%$2}";;
esac
done
exit
;;
*) mv "$f" "$f$2"
echo "mv $f$2 $f"
;;
esac
done >rename_remote_norm.lftp
| true
|
91926d807e482d3d8b0b75e9b79618958c77da79
|
Shell
|
apples/scc
|
/wrf/compile/beninstallformic_smokey.sh
|
UTF-8
| 946
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#Define file paths here
IMPI_DIR="/opt/intel/impi"
ICS_DIR="/opt/intel/ics"
HDF5_DIR="/data/mic/wrf/lib/hdf5"
NETCDF_DIR="/data/mic/wrf/lib/netcdf"
MKL_DIR="/opt/intel/ics/composer_xe_2013.1.117/mkl"
#Add the Intel MPI compilers to the PATH
export CC="mpiicc"
export CXX="mpiicpc"
export FC="mpiifort"
export PATH="$PATH:$ICS_DIR/bin:$IMPI_DIR/4.1.0/bin64"
export MKL_MIC_ENABLE=1
#Use the Intel scripts to set environment variables.
source $IMPI_DIR/4.1.0/bin/mpivars.sh
source $ICS_DIR/bin/compilervars.sh intel64
source $ICS_DIR/bin/ifortvars.sh intel64
source $ICS_DIR/composer_xe_2013.1.117/mkl/bin/mklvars.sh mic
#Tell WRF where its libraries are
export PHDF5="$HDF5_DIR"
export NETCDF="$NETCDF_DIR"
export LD_LIBRARY_PATH="/opt/intel/ics/composer_xe_2013.1.117/mkl/lib/mic:/opt/intel/ics/composer_xe_2013.1.117/mkl/lib/intel64"
./clean -a
./configure
cp ./custom_configure/configure.wrf .
./compile em_real &> compile.log
| true
|
c4df05801977f4f99126dc0f8f758bd518a4940a
|
Shell
|
Teamprojekt-HSTrier/Bidirektionales-Kommunikationssystem
|
/SMS/files/CRONsendSMS.sh
|
UTF-8
| 712
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#Endlosschleife zum versenden von SMS
while [ true ] ; do
#prüfen ob eine SMS angelegt wurde.
sms=$(cat /home/pi/SMS/sendsms)
#wenn keine SMS vorhanden is 10s warten
if [ -z "$sms" ]
then
sleep 10
else
#Sollte eine SMS vorhanden sein, so wird die Kontaktdatei ausgelesen und jedem Kontakt eine SMS übermittelt.
counter="$(wc -l /home/pi/STT/KontakteSMS.txt | cut -d' ' -f 1)"
for ((i=1;i<=$counter;++i)); do
SMSnr="$(head -n $i /home/pi/STT/KontakteSMS.txt | tail -n 1)"
SMSnr="$(echo -e "${SMSnr}" | tr -d '[:space:]')"
sudo gammu-smsd-inject TEXT $SMSnr -text "$sms"
done
#Zu letzt wird die SMS datei gelöscht und dann 10s gewartet.
sudo rm /home/pi/SMS/sendsms
sleep 10
fi
done
| true
|
749d861c73c884cd7976bf623dccfe1313155c24
|
Shell
|
SiliconMeeple/sbt-git-stamp
|
/sbt
|
UTF-8
| 367
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
. ./build-env
SBT_BOOT_DIR=$HOME/.sbt/boot/
if [ ! -d "$SBT_BOOT_DIR" ]; then
mkdir -p $SBT_BOOT_DIR
fi
java -Dfile.encoding=UTF8 -Xmx1024M -XX:+CMSClassUnloadingEnabled -XX:+UseCompressedOops -XX:MaxPermSize=768m \
$SBT_EXTRA_PARAMS \
-Dbuild.time="`date`" \
-Dsbt.boot.directory=$SBT_BOOT_DIR \
-jar $SBT_FILE "$@"
| true
|
c1d94a9d365e137e918bd7d74cdb6ad0d47df26a
|
Shell
|
obino/appscale
|
/scripts/init-filebeat.sh
|
UTF-8
| 1,101
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -u
usage()
{
echo "usage: init-filebeat.sh --logstash IP:PORT"
}
if [[ $# == 2 && $1 == '--logstash' ]]; then
LOGSTASH_LOCATION=$2
else
usage
exit 1
fi
while fuser /var/cache/apt/archives/lock /var/lib/apt/lists/lock /var/lib/dpkg/lock ; do
echo "Waiting for apt lock"
sleep 20
done
if ! apt-cache policy filebeat | grep Installed | grep -q ' 6.8'; then
echo "Installing Filebeat..."
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.8.0-amd64.deb
sudo dpkg -i filebeat-6.8.0-amd64.deb
else
echo "Filebeat has been already installed"
fi
echo "Configuring Filebeat..."
cat > /etc/filebeat/filebeat.yml << FILEBEAT_YML
filebeat.inputs:
- type: log
enabled: true
paths: ["/opt/appscale/logserver/requests-*"]
json.keys_under_root: true
output.logstash:
hosts: ["${LOGSTASH_LOCATION}"]
FILEBEAT_YML
# It's just a flag used in AppServer/../logservice_stub
touch /etc/appscale/elk-enabled
echo "Starting Filebeat service..."
systemctl enable filebeat.service
systemctl start filebeat.service
| true
|
0ef2a197486983be134b144953c9dd8d161ec019
|
Shell
|
srikanthvavila/openstack-opendaylight
|
/liberty-beryllium/createTenantAndVms1.sh
|
UTF-8
| 1,563
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
export TNT_ID=${TNT_ID:-1}
export VM_COUNT=${VM_COUNT:-1}
cd ~/devstack
source openrc admin admin ; export OS_PROJECT_NAME=admin
keystone tenant-create --name=tenant${TNT_ID} --enabled=true
keystone user-create --name=user${TNT_ID} --pass=user${TNT_ID} --email=user${TNT_ID}@example.com
keystone user-role-add --user=user${TNT_ID} --role=Member --tenant=tenant${TNT_ID}
source openrc user${TNT_ID} tenant${TNT_ID} ; export OS_PROJECT_NAME=tenant${TNT_ID} ; export OS_PASSWORD=user${TNT_ID}
if [ ! -f id_rsa_demo.pub ]; then ssh-keygen -t rsa -b 2048 -N '' -f id_rsa_demo; fi
nova keypair-add --pub-key id_rsa_demo.pub demo_key
nova secgroup-create sec1 sec1
nova secgroup-add-rule sec1 icmp -1 -1 0.0.0.0/0
for x in tcp udp; do nova secgroup-add-rule sec1 ${x} 1 65535 0.0.0.0/0 ; done
neutron net-create int
neutron subnet-create --gateway=2.0.0.254 --name=subint int 2.0.0.0/24 --enable-dhcp
IMAGE=$(nova image-list | grep 'cirros.*uec\s' | tail -1 | awk '{print $2}')
NETID=$(neutron net-list | grep -w int | awk '{print $2}')
for x in `seq 1 ${VM_COUNT}` ; do \
VMNAME="vm${x}"
echo creating ${TNT_ID}_${VMNAME}
nova boot --poll --flavor m1.nano --image ${IMAGE} --nic net-id=${NETID} \
--security-groups sec1 --key-name demo_key \
${TNT_ID}_${VMNAME}
sleep 5
done
#
# source openrc user1 tenant1 ; export OS_PASSWORD=user1
# source openrc user2 tenant2 ; export OS_PASSWORD=user2
# source openrc user${TNT_ID} tenant${TNT_ID} ; export OS_PASSWORD=user${TNT_ID}
# source openrc admin admin
#
| true
|
3291c3689513974b1007d56cb2d79f134d5ff89b
|
Shell
|
strategist922/ezplot
|
/files/opt/chia/bin/plotdst
|
UTF-8
| 526
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
mkdir -p /tmp/ezplot/
# Get the plot destination with the most space.
if [[ ! -f "/tmp/ezplot/dsts" ]]; then
touch /tmp/ezplot/dsts
fi
if [[ `cat /tmp/ezplot/dsts | wc -l` == 0 ]]; then
ezplot plotspace > /tmp/ezplot/dsts
fi
next=`cat /tmp/ezplot/dsts | tail -n 1`
next_dst=`echo ${next} | cut -d',' -f2`
sed -i '$ d' /tmp/ezplot/dsts
if [[ "${next_dst}/farm" == "/farm" ]]; then
echo "[`date`] ERROR: No available destinations found. Possibly out of space."
exit 1
fi
echo "${next_dst}/farm"
| true
|
f7c8517fd27297bafe859a076130733c27c6c5b6
|
Shell
|
cheyunhua/k8s_adm
|
/k8s_env.sh
|
UTF-8
| 1,637
| 2.59375
| 3
|
[] |
no_license
|
#/bin/bash
K8S_HOME=/root/k8s
DIST=$K8S_HOME/dist
#install docker
docker version || yum install -y $DIST/docker-ce-se*.rpm $DIST/docker-ce-17*.rpm
mkdir -p /etc/docker
cat << EOF > /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["http://ef017c13.m.daocloud.io"],
"live-restore": true
}
EOF
systemctl start docker && systemctl enable docker
#stop firewall and selinux
systemctl stop firewalld && systemctl disable firewalld
if ! cat /etc/selinux/config | grep '^SELINUX=disabled';then
echo SELINUX=disabled >> /etc/selinux/config
fi
setenforce 0
swapoff -a
echo "
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
" >> /etc/sysctl.conf
sysctl -p
#load docker images
docker load < $DIST/docker_images/etcd-amd64_v3.1.10.tar
docker load < $DIST/docker_images/flannel\:v0.9.1-amd64.tar
docker load < $DIST/docker_images/k8s-dns-dnsmasq-nanny-amd64_v1.14.7.tar
docker load < $DIST/docker_images/k8s-dns-kube-dns-amd64_1.14.7.tar
docker load < $DIST/docker_images/k8s-dns-sidecar-amd64_1.14.7.tar
docker load < $DIST/docker_images/kube-apiserver-amd64_v1.9.0.tar
docker load < $DIST/docker_images/kube-controller-manager-amd64_v1.9.0.tar
docker load < $DIST/docker_images/kube-scheduler-amd64_v1.9.0.tar
docker load < $DIST/docker_images/kube-proxy-amd64_v1.9.0.tar
docker load < $DIST/docker_images/pause-amd64_3.0.tar
#install k8s
cd $DIST
rpm -ivh socat-1.7.3.2-2.el7.x86_64.rpm
rpm -ivh kubernetes-cni-0.6.0-0.x86_64.rpm kubelet-1.9.9-9.x86_64.rpm kubectl-1.9.0-0.x86_64.rpm
rpm -ivh kubectl-1.9.0-0.x86_64.rpm
rpm -ivh kubeadm-1.9.0-0.x86_64.rpm
| true
|
3fb5998b9dac30474f830c2970a81b24bfe37155
|
Shell
|
ag-archlinux/arch
|
/install1.sh
|
UTF-8
| 4,764
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
##### Created by: ag
##### File: install1.sh
##### --------------------------------------------------
##### INPUTS #####
##### hostname
echo -n "Hostname: "
read HOSTNAME
: "${HOSTNAME:?"Missing hostname"}"
##### root password
echo -n "Root password: "
read -s ROOT_PASSWORD
echo
echo -n "Repeat root password: "
read -s ROOT_PASSWORD_REPEAT
echo
[[ "$ROOT_PASSWORD" == "$ROOT_PASSWORD_REPEAT" ]] || ( echo "Root passwords did not match"; exit 1; )
##### root space
read -p "What is your ROOT_SPACE (G)? " ROOT_SPACE
##### timezone
TIMEZONE="Europe/Bratislava"
##### locale
LOCALE="en_US.UTF-8"
##### drive
DRIVE="/dev/sda"
##### --------------------------------------------------
##### 1. Pre-Installation
##### a) Set the keyboard layout
loadkeys us
##### b) Verify the boot mode
if [-d "/sys/firmware/efi/efivars"]; then
echo "UEFI"
BOOT="UEFI"
else
echo "BIOS"
BOOT="BIOS"
fi
##### c) Connect to the Internet
ping -q -w1 -c1 google.com &>/dev/null && CONN="CONNECTED" || (CONN="NOT_CONNECTED";)
while [ "$CONN" != "CONNECTED" ]; do
echo -e "\033[0;36m'You are not connected to the internet!'\033[0;0m"
ip link
read -p "What is name of your wifi? (number:name: ...) : " WIFI
wifi-menu -o $WIFI
ping -q -w1 -c1 duckduckgo.com &>/dev/null && CONN="CONNECTED" || CONN="NOT_CONNECTED"
done
echo "You are connected to the internet!"
##### d) Update the system clock
timedatectl set-ntp true
##### e) Partition the disks
# swap
SWAP=$(free --mebi | awk '/Mem:/ {print $2}')
SWAP_SPACE=$(( $SWAP + 130 ))MiB
if [ "$BOOT" = "BIOS" ]; then
echo "BIOS"
# Prepare the disk
fdisk -l
cat<<EOF | fdisk /dev/sda
n
p
1
+${ROOT_SPACE}G
n
p
2
+${SWAP_SPACE}
t
2
82
n
p
3
w
EOF
else
echo "UEFI"
fdisk -l
# Prepare the disk
cat<<EOF | fdisk /dev/sda
n
p
1
+500M
t
ef
n
p
2
+${SWAP_SPACE}
t
2
82
n
p
3
+${ROOT_SPACE}G
n
p
4
w
EOF
fi
##### f) Format the partitions & Mount the file systems
if [ "$BOOT" = "BIOS" ]; then
mkfs.ext4 /dev/sda1
mount /dev/sda1 /mnt
mkswap /dev/sda2
swapon /dev/sda2
mkfs.ext4 /dev/sda3
mkdir -p /mnt/home
mount /dev/sda3 /mnt/home
else
yes | eval mkfs.fat -F32 /dev/sda1
mkfs.ext4 /dev/sda3
mkfs.ext4 /dev/sda4
mkswap /dev/sda2
swapon /dev/sda2
mount /dev/sda3 /mnt
mkdir -p /mnt/boot
mount /dev/sda1 /mnt/boot
mkdir -p /mnt/home
mount /dev/sda4 /mnt/home
fi
##### --------------------------------------------------
##### 2. Installation
##### a) Select the mirrors
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.backup
curl -s "https://www.archlinux.org/mirrorlist/?country=SK&country=CZ&protocol=https&use_mirror_status=on" | sed -e 's/^#Server/Server/' -e '/^#/d' > /etc/pacman.d/mirrorlist
##### b) Install the base packages
pacstrap /mnt base base-devel
##### 3. Configure the system
##### a) Fstab
cat /mnt/etc/fstab
genfstab /mnt >> /mnt/etc/fstab
##### b) Chroot
arch-chroot /mnt /bin/bash <<EOF
##### 1) Time zone
ln -sf /usr/share/zoneinfo/$TIMEZONE /etc/localtime
hwclock-systohc
##### 2) Locale
sed -i "s|#\($LOCALE.*\)\$|\1|" /etc/locale.gen
locale-gen
echo "LANG=$LOCALE" >> /etc/locale.conf
##### 3) Hostname
echo $HOSTNAME >> /etc/hostname
echo "127.0.0.1 localhost" >> /etc/hosts
echo "::1 localhost" >> /etc/hosts
echo "127.0.0.1 " + $HOSTNAME+ ".localdomain "+ $HOSTNAME >> /etc/hosts
##### 4) Network configuration
# configure network manager
pacman --noconfirm --needed -S networkmanager
systemctl enable NetworkManager
systemctl start NetworkManager
#pacman --noconfirm --needed -S iw wpa_supplicant dialog wpa-actiond
#systemctl enable dhcpcd
##### 5) Initramfs
mkinitcpio -p linux
##### 6) Root password
echo "root:$ROOT_PASSWORD" | /usr/sbin/chpasswd
##### 7) Boot loader
pacman --noconfirm --needed -S grub os-prober
grub-install --recheck --target=i386-pc $DRIVE
grub-mkconfig -o /boot/grub/grub.cfg
##### Exit chroot
exit
EOF
##### c) Unmount all the partitions
umount -R /mnt
##### e) Restart the machine
rm install1.sh
reboot
##### --------------------------------------------------
| true
|
9539c33288f95b8d0ceeea5f0e0d3eb25b296649
|
Shell
|
robb-broome/dotfiles
|
/bashrc
|
UTF-8
| 3,286
| 2.9375
| 3
|
[] |
no_license
|
HISTTIMEFORMAT='%F %T '
HISTFILESIZE=1000000000
HISTSIZE=1000000
# Compress the cd, ls -l series of commands.
alias lc="cl"
function cl () {
if [ $# = 0 ]; then
cd && ll
else
cd "$*" && ll
fi
}
# trigger an .rvmrc if it's there
cd .
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -AGlFT'
alias la='ls -A'
alias l='ls -CF'
alias r='rails'
alias ld='for i in $(ls -d */); do echo ${i%%/}; done'
alias ldot='for i in $(ls -d .*/); do echo ${i%%/}; done'
alias cozj="cd ~/work/rails/oozakazoo; rvm jruby-1.5.0.RC1; vim"
alias coz="cd ~/work/rails/oozakazoo; rvm ruby-1.9.2;"
alias neotest="cd ~/dotfiles; ./neotest.sh"
alias r3="rvm ruby-1.9.2@rails3"
# set up a tunnel for browsing via tunnel to home machine
# note: must also use the 'safetunnel' network configuration:w
alias safebrowse='ssh -D 8080 -f -C -q -N robb@robbinevanston.dyndns.org'
alias timesheet='cd ~/TimeSheet; open sears_timesheet.numbers'
export EDITOR='/usr/bin/vim'
export NODE_PATH="/usr/local/lib/node"
export PATH=/usr/local/share/npm/bin:$PATH
# rails shortcuts from http://blog.envylabs.com/2010/07/common-rails-command-shortcuts/
function be {
bundle exec $@
}
function ber {
bundle exec rails $@
}
function berg {
bundle exec rails generate $@
}
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
# stop screen from complaining with wuff-wuff!
alias screen='TERM=screen screen'
#tmux change
[[ $TERM == "screen" ]] && export -p TERM="screen-256color"
# use vim style editing on the command line
#set -o vi
#function parse_git_dirty {
# [[ $(git status 2> /dev/null | tail -n1) != "nothing to commit (working directory clean)" ]] && echo "*"
#}
#function parse_git_branch {
# git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/[\1$(parse_git_dirty)]/"
#}
#export PS1='\h:\W$(parse_git_branch "[\[\e[0;32m\]%s\[\e[0m\]\[\e[0;33m\]$(parse_git_dirty)\[\e[0m\]]")$ '
# see: http://stufftohelpyouout.blogspot.com/2010/01/show-name-of-git-branch-in-prompt.html
# see also: http://superuser.com/questions/31744/how-to-get-git-completion-bash-to-work-on-mac-os-x
# see also: http://stackoverflow.com/questions/347901/what-are-your-favorite-git-features-or-tricks
# install http://macports.org/
# sudo port selfupdate
# sudo port install git-core +bash_completion
#if [ -f /opt/local/etc/bash_completion ]; then
# . /opt/local/etc/bash_completion
# PS1='[\h \W$(__git_ps1 " (%s)")]\$ '
#fi
| true
|
327ddacc560c636eedc54e1bc228cd070b964560
|
Shell
|
unipheas/MAVSDK-Swift
|
/tools/generate_from_protos.bash
|
UTF-8
| 2,271
| 3.671875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
command -v protoc || { echo >&2 "Protobuf needs to be installed (e.g. '$ brew install protobuf') for this script to run!"; exit 1; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PB_PLUGINS_DIR=${PB_PLUGINS_DIR:-"${SCRIPT_DIR}/../proto/pb_plugins"}
PROTO_DIR=${PROTO_DIR:-"${SCRIPT_DIR}/../proto/protos"}
OUTPUT_DIR=${OUTPUT_DIR:-"${SCRIPT_DIR}/../Sources/MAVSDK-Swift/Generated"}
PLUGIN_LIST=$(cd ${PROTO_DIR} && ls -d */ | sed 's:/*$::')
if [ ! -d ${PROTO_DIR} ]; then
echo "Script is not in the right location! It will look for the proto files in '${PROTO_DIR}', which doesn't exist!"
exit 1
fi
if [ ! -d ${OUTPUT_DIR} ]; then
echo "Script is not in the right location! It is made to generate the files in '${OUTPUT_DIR}', which doesn't exist!"
exit 1
fi
echo ""
echo "-------------------------------"
echo "Generating pb and grpc.pb files"
echo "-------------------------------"
echo ""
TMP_DIR=${TMP_DIR:-"$(mktemp -d)"}
echo "Temporary directory for this build: ${TMP_DIR}"
if [ ! -d ${TMP_DIR}/grpc-swift ]; then
echo ""
echo "--- Cloning grpc-swift"
echo ""
git -C ${TMP_DIR} clone https://github.com/grpc/grpc-swift -b 0.11.0
fi
cd ${TMP_DIR}/grpc-swift && make
for plugin in ${PLUGIN_LIST}; do
protoc ${plugin}.proto -I${PROTO_DIR} -I${PROTO_DIR}/${plugin} --swift_out=${OUTPUT_DIR} --swiftgrpc_out=${OUTPUT_DIR} --swiftgrpc_opt=TestStubs=true --plugin=protoc-gen-swift=${TMP_DIR}/grpc-swift/protoc-gen-swift --plugin=protoc-gen-swiftgrpc=${TMP_DIR}/grpc-swift/protoc-gen-swiftgrpc
done
echo ""
echo "-------------------------------"
echo "Generating the SDK wrappers"
echo "-------------------------------"
echo ""
if [ ! -d ${PB_PLUGINS_DIR}/venv ]; then
python3 -m venv ${PB_PLUGINS_DIR}/venv
source ${PB_PLUGINS_DIR}/venv/bin/activate
pip install -r ${PB_PLUGINS_DIR}/requirements.txt
pip install -e ${PB_PLUGINS_DIR}
fi
source ${PB_PLUGINS_DIR}/venv/bin/activate
export TEMPLATE_PATH=${TEMPLATE_PATH:-"${SCRIPT_DIR}/../templates"}
for plugin in ${PLUGIN_LIST}; do
protoc ${plugin}.proto --plugin=protoc-gen-custom=$(which protoc-gen-dcsdk) -I${PROTO_DIR} -I${PROTO_DIR}/${plugin} --custom_out=${OUTPUT_DIR} --custom_opt=file_ext=swift
done
| true
|
9b0ba24d218c7aaf64a15f2588ea3b07a2ee4fee
|
Shell
|
petomajci/magneticDipoles
|
/create_feature_list_2JHC.sh
|
UTF-8
| 2,965
| 2.90625
| 3
|
[] |
no_license
|
CODE=$1
NBINS=10
(echo id
echo id1
echo id2
echo molecule
echo TARGET
echo distance HX
echo distance CX
echo distance CH
echo angle HXC
echo charge H
echo charge C
echo charge X
echo X=H
echo X=C
echo X=N
echo X=F
echo X=O
for TT in X C; do
for mol in H C N F O all; do
echo $TT: number of neighbors of type $mol;
done
done
for mol in H C N F O all; do
echo C: number of 2nd level neighbors of type $mol;
done
for TT in C X; do
for mol in H C N F O all; do
for((i=1;i<=$NBINS;i++)); do
echo $TT: distace bins to neighbors of type $mol, bin nr. $i;
done;
done
done
for TT in H C; do
for mol in H C N F O all; do
for((i=1;i<=$NBINS;i++)); do
echo angle $TT,X,$mol bin nr. $i;
done;
done
done
for mol in H C N F O all; do
for((i=1;i<=$NBINS;i++)); do
echo torsion H,X,C,$mol bin nr. $i;
done;
done
for TT in C X; do
for type in min max mean; do
echo $type charge of $TT neighbors
done
done
for TT in C X; do
echo Is $TT cyclic
echo Number of Cyclic neighbors of $TT
done
for X in H C X; do
for mol in H C N F O all; do
for((i=2;i<=10;i++)); do
d2=$(echo "scale=2; -5 + ($i-2)/8*10" |bc)
echo $X: G2 descriptor for $mol at distance ${i}A;
echo $X: G2b descriptor for $mol at distance ${d2}A;
echo $X: G2c descriptor for $mol at distance ${i}A;
done;
echo $X: G1:5A descriptor for $mol;
echo $X: G1b:5A descriptor for $mol;
echo $X: G1c:5A descriptor for $mol;
for level in 1; do
echo $X: Min distance-$level for $mol;
echo $X: Min distanceB-$level for $mol;
echo $X: Min distanceC-$level for $mol;
echo $X: Min distanceD-$level for $mol;
echo $X: Charge-$level of min distance $mol;
# if [ $X == H ]; then
# echo $X: Min torsion-$level for $mol;
# echo $X: Min angle-$level for $mol;
# fi
done;
done;
done
for distance in 3A 5A; do
for X in H C X; do
for mol in H C N F O all; do
for((i=1;i<=15;i++)); do
echo For $X: charges of atoms $mol within $distance, bin nr. $i;
done;
done
done
done
for TT in H C X; do
for mol in H C N F O all; do
for((i=1;i<=$NBINS;i++)); do
echo $TT: distace bins to 2nd level neighbors of type $mol, bin nr. $i;
done;
done
done
#for TT in C X; do
# for mol in H C N F O all; do
# for((i=1;i<=15;i++)); do
# echo For $TT: charges of its $mol neighbors, bin nr. $i;
# done;
# done
#done
) > feature.list.$CODE
# | head -n 910 > feature.list
#exit
sed -i 's/ /\t/g' $CODE.xxx
paste $CODE.xxx feature.list.$CODE > feature2.list
awk -v FS="\t" '{if(NR>=6 && $2!="" && $2>100) {i++; print i"\t"$0}}' feature2.list > feature3.list
awk -v FS="," '{printf("%d\n",$2)}' feature_iportance.txt > feature2_iportance.txt
paste feature3.list feature2_iportance.txt
| true
|
ebb78425d4d5cdbf677e1145e1810b5701d80a2c
|
Shell
|
sivaraj2112/xcp-16.4-docker
|
/bam-db/init-db-user.sh
|
UTF-8
| 363
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
BAM_DB_NAME=${BAM_DB_NAME:-bamdata}
BAM_DB_USER=${BAM_DB_USER:-bam}
BAM_DB_PWD=${BAM_DB_PWD:-password}
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
CREATE USER ${BAM_DB_USER} WITH PASSWORD '${BAM_DB_PWD}';
CREATE DATABASE ${BAM_DB_NAME};
GRANT ALL PRIVILEGES ON DATABASE ${BAM_DB_NAME} TO ${BAM_DB_USER};
EOSQL
| true
|
97fc8a4dfa9daacfe4c4219140e9380bb76389fb
|
Shell
|
audun/factorio-aloneinthedark
|
/setup_dev.sh
|
UTF-8
| 439
| 3.125
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
MOD=`grep \"name\" info.json | cut -f2 -d: | sed 's/[\", ]//g'`
VERSION=`grep \"version\" info.json | cut -f2 -d: | sed 's/[\", ]//g'`
NAME=${MOD}_$VERSION
ZIP=$NAME.zip
mkdir -p $NAME
cp -prv info.json *.lua locale *.txt *.md thumbnail.png $NAME/
rm -f $ZIP
find $NAME/ -name '*~' -delete
zip -r $ZIP $NAME
cp $ZIP ~/Library/Application\ Support/factorio/mods/
cd ~/Library/Application\ Support/factorio/mods/
unzip -o $ZIP
| true
|
716dbf328967cdf24b13b89c34bc24f70e75509f
|
Shell
|
stephenway/sysconfig
|
/bin/dns
|
UTF-8
| 741
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
if [ -n "$1" ]; then
echo ""
echo "<----- Domain Results ----->"
echo ""
echo "Nameservers for $1:"
dig ns $1 +short
echo ""
echo "IP address for $1:"
dig $1 +short
echo ""
echo "MX records for $1:"
dig mx $1 +short
echo ""
echo "IP address for MX records:"
dig $(dig mx $1 +short) +short
whois $1 > ~/.whois.tmp
echo ""
echo "Important Dates:"
grep Expiration ~/.whois.tmp
grep "Updated Date:" ~/.whois.tmp
echo ""
echo "Administrative Contact:"
awk '/Administrative\ Contact/ {getline; print}' ~/.whois.tmp
echo ""
echo "$1 Status:"
grep -i "status:" ~/.whois.tmp
rm ~/.whois.tmp
echo ""
echo "<----- End of Results ----->"
echo ""
else
echo " I need a domain"
echo " Usage: $0 DOMAIN.COM"
fi
| true
|
8069af5ae8b172754ded7f98be806a7918de747c
|
Shell
|
Vash2593/git-bin
|
/bin/git-list-sb
|
UTF-8
| 688
| 4.09375
| 4
|
[] |
no_license
|
#! /bin/sh
set -e
me=`basename $0`
unset GREP_OPTIONS
stderr ()
{
local i
for i
do
echo >&2 "$me: $i"
done
}
error ()
{
local sta=$1
shift
stderr "$@"
exit $sta
}
usage() {
cat <<EOF
usage: $0 <Options> [submodule...]
List all submodules
Options:
-h, --help Display this message and exit successfully.
EOF
exit $1
}
work() {
if ! [ -f .gitmodules ]; then
exit
fi
cat .gitmodules | grep -e "^\[submodule.*" | sed -re 's/\[submodule "(.*)"\]/\1/'
}
for opt
do
case $opt in
(-h|--help)
usage 0
;;
(*)
error 42 "No arguments needed"
;;
esac
done
work
| true
|
eefe5cd8dbeed834d746f572dedf347675914f06
|
Shell
|
home-things/rpi-light-ctr
|
/src/run
|
UTF-8
| 127
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
dir=$(dirname $0)
cd $dir
./isr >> log 2> >(while read line; do echo -e "\e[01;33m$line\e[0m" >&2; done)
| true
|
48e4bca29380063bd58f909e81636876bae6be78
|
Shell
|
owncloud/administration
|
/jenkins/obs_integration/import_deb.sh
|
UTF-8
| 5,713
| 3.75
| 4
|
[] |
no_license
|
#! /bin/bash
# This script import a debian binary package into obs.
# A data.tar.gz and all the required debian.* files are created, so that the package
# can pass a normal build cycle, as if it were source. The contents of the data tar is simply copied through unchanged.
#
# Requires:
# sudo apt-get install libdistro-info-perl
# sudo apt-get install osc
#
# FIXME: cannot directly handle multiple architctures.
# supports both i586 and x86_64, manually adapted:
# - merge the data tar as data-$arch.tar.gz into data.tar.gz
# - create a debian.NAME.dirs file with all possible dirs from both.
# - edit NAME.dsc file to list both architectures.
# - edit debian.NAME.install so that wildcards are used for the
# architectures and no directories are listed.
#
# Example: isv:ownCloud:devel:Qt562/libdouble-conversion-dev
# wget http://security.ubuntu.com/ubuntu/pool/universe/q/qttools-opensource-src/qttools5-dev_5.5.1-3build1_amd64.deb
#
# 2017-01-21, jw@owncloud.com -- double tar added to support multiarch later.
# -- support for some optional maintainer scripts.
#
# https://launchpad.net/ubuntu/xenial/+package/libqt5designer5
default_base_url=http://security.ubuntu.com/ubuntu/pool/universe
url=$1
if [ -z "$url" ]; then
cat <<EOF
URL or path of debian package needed. Please browse for inspiration:
$default_base_url
Example usage:
firefox https://launchpad.net/ubuntu/+source/qttools-opensource-src/
cd ~/src/obs/isv:ownCloud:devel:Ubuntu_16.04_Universe
osc mkpac qttools5-dev-tools
cd qttools5-dev-tools
$0 q/qttools-opensource-src/qttools5-dev-tools_5.5.1-3build1_amd64.deb
osc ci -m '$0 q/qttools-opensource-src/qttools5-dev-tools_5.5.1-3build1_i386.deb'
cd ~/src/obs/isv:ownCloud:devel:Ubuntu_16.04_Universe:i386
osc mkpac qttools5-dev-tools
cd qttools5-dev-tools
$0 q/qttools-opensource-src/qttools5-dev-tools_5.5.1-3build1_i386.deb
osc ci -m '$0 q/qttools-opensource-src/qttools5-dev-tools_5.5.1-3build1_i386.deb'
EOF
exit 1
fi
if [[ ! $url =~ '://' ]]; then
if [ ! -f $url ]; then
url=$default_base_url/$url
fi
fi
deb_in_pkg_name=$(echo "$url" | sed -e 's@.*/@@')
tmpdir=/tmp/import$$
tmpfile=$tmpdir/$deb_in_pkg_name
mkdir -p $tmpdir
if [ -f $url ]; then
cp $url $tmpfile
else
wget $url -O $tmpfile
fi
echo $tmpfile
name=$(echo $deb_in_pkg_name | sed -e 's@\(.*\)_\(.*\)_.*@\1@')
## version includes the buildrelease number. E.g. 5.5.1-3build1
version=$(echo $deb_in_pkg_name | sed -e 's@\(.*\)_\(.*\)_.*@\2@')
## architecture amd64 or i386
arch=$(echo $deb_in_pkg_name | sed -e 's@.*_\(.*\)_\([a-z0-9]*\).*@\2@')
echo name: $name
echo version: $version
echo arch: $arch
rm -f data-*.tar.gz
ar x $tmpfile
tar xf control.tar.gz
rm -f control.tar.gz
rm -f debian-binary
xzcat < data.tar.xz | gzip > data-$arch.tar.gz
rm -f data.tar.xz
tar zcvf data.tar.gz data-*.tar.gz
osc add data.tar.gz
if [ ! -f debian.$name.install ]; then
tar tf data-$arch.tar.gz | sed -e 's@^\./@@' -e 's@^/@@' > debian.$name.install
osc add debian.$name.install
fi
rm -f data-$arch.tar.gz
if [ ! -f debian.changelog ]; then
debchange -c debian.changelog --create --distribution stable -v ${version} --package $name "created with $0 $url"
osc add debian.changelog
fi
if [ ! -f debian.control ]; then
# dpkg-deb -I $deb_in_pkg_name | sed -e 's@^ @@' -e 's@^ @ @' | sed -n -e '/^Package:/,$p' > debian.control
echo "Source: $name" > debian.control
grep '^Maintainer: ' < control >> debian.control
grep '^Section: ' < control >> debian.control
grep '^Priority: ' < control >> debian.control
echo "" >> debian.control
echo "Package: $name" >> debian.control
grep -v '^Source: ' < control | grep -v '^Maintainer: ' | grep -v '^Original-Maintainer: ' | grep -v '^Installed-Size: ' | grep -v '^Package: ' | grep -v '^Version: ' >> debian.control
osc add debian.control
fi
if [ ! -f $name.dsc ]; then
echo "Format: 1.0" > $name.dsc
echo >> $name.dsc "Source: $name"
echo >> $name.dsc "Binary: $name"
echo >> $name.dsc "Version: ${version}"
grep < debian.control >> $name.dsc "^Maintainer: "
grep < debian.control >> $name.dsc "^Uploaders: "
grep < debian.control >> $name.dsc "^Homepage: "
grep < debian.control >> $name.dsc "^Architecture: "
echo >> $name.dsc "Build-Depends: debhelper (>= 7)"
echo >> $name.dsc "Standards-Version: 3.9.4"
echo >> $name.dsc "# DEBTRANSFORM-RELEASE: 0"
osc add $name.dsc
fi
if [ ! -f debian.compat ]; then
echo 9 > debian.compat
osc add debian.compat
fi
if [ ! -f debian.rules ]; then
# override_dh_shlibdeps avoids dependency errors like this:
# dpkg-shibdeps: error couldn't find library libQt5WebKit.so.5 ...
#
# override_dh_auto_install copies the data.tar
#
# TODO: check for more useless or problematic default actions.
#
cat << EOF > debian.rules
#!/usr/bin/make -f
# -*- makefile -*-
export DH_VERBOSE=1
SHELL=/bin/bash
%:
dh \$@
override_dh_shlibdeps:
echo skipping dh_shlibdeps
override_dh_auto_install:
mkdir -p \$(CURDIR)/debian/tmp
dh_auto_install -- INSTALL_ROOT=\$(CURDIR)/debian/tmp
tar xf /usr/src/packages/SOURCES/data.tar.gz
tar xf data-\$(DEB_BUILD_ARCH).tar.gz -C \$(CURDIR)/debian/tmp
EOF
osc add debian.rules
fi
# optional maintainer scripts:
for script in postinst postrm preinst preun shlibs; do
if [ ! -f debian.$name.$script ]; then
test -f $script && mv $script debian.$name.$script
fi
if [ -f debian.$name.$script ]; then
osc add debian.$name.$script
fi
done
echo "Next steps:"
echo " osc build"
echo " osc ci -m '$0 $url'"
rm -rf $tmpdir
rm -f control
rm -f md5sums
| true
|
281e18746a4a64fba07e1025026a6e1575a217ea
|
Shell
|
Kho-Dialga/configs
|
/.local/bin/dmenu/dmenucfg
|
UTF-8
| 2,884
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
# Small dmenu script for editing the config files of programs that I use
# Program # Command to edit file
cfg="\
alacritty $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/alacritty/alacritty.yml
awesome $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/awesome/rc.lua
awesome (themes) $TERMINAL -e lf $XDG_CONFIG_HOME/awesome/themes
bspwm $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/bspwm/bspwmrc
dunst $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/dunst/dunstrc
i3 $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/i3/config
kitty $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/kitty/kitty.conf
lf $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/lf/lfrc
mpd $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/mpd/mpd.conf
mpv $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/mpv/mpv.conf
mpv (key bindings) $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/mpv/input.conf
ncmpcpp $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/ncmpcpp/config
ncmpcpp (keybindings) $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/ncmpcpp/bindings
newsboat $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/newsboat/config
newsboat (feeds) $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/newsboat/urls
nvim $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/nvim/init.vim
picom $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/picom/picom.conf
polybar $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/polybar/config
qtile $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/qtile/config.py
rofi $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/rofi/config.rasi
spacevim $TERMINAL -e $EDITOR $HOME/.SpaceVim.d/init.toml
spacevim (autoload) $TERMINAL -e $EDITOR $HOME/.SpaceVim.d/autoload/myspacevim.vim
sxhkd $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/sxhkd/sxhkdrc
transmission-daemon $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/transmission-daemon/settings.json
xob $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/xob/styles.cfg
xmonad $TERMINAL -e $EDITOR $HOME/.xmonad/xmonad.hs
xmobar $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/xmobar/xmobarrc
xob $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/xob/$TERMINALyles.cfg
aliasrc $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/shell/aliasrc
shortcutrc $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/shell/shortcutrc
xprofile $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/x11/xprofile
xresources $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/x11/Xresources
xinitrc $TERMINAL -e $EDITOR $XDG_CONFIG_HOME/x11/xinitrc
zsh $TERMINAL -e $EDITOR $ZDOTDIR/.zshrc
zprofile $TERMINAL -e $EDITOR $HOME/.zprofile"
choice="$(echo "$cfg" | cut -d' ' -f 1 | dmenu -l 10 -p Config:)" || exit 1
`echo "$cfg" | grep "^$choice " | cut -d ' ' -f2-`
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.