blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bdc4f2c99435eab3bb182900557e1258a42c0e2e | Shell | Sota-Watanabe/study | /cmd/cpumem.sh | UTF-8 | 1,459 | 3.515625 | 4 | [] | no_license | #!/bin/bash
cd /home/watanabe/go/src/k8s.io/kubernetes*/
times=1
KSVC="array-init"
subdir=""
while getopts "tn:k:d:" OPT
do
case $OPT in
t) times=10;;
n) times=$OPTARG;;
k) KSVC="$OPTARG";;
d) subdir="$OPTARG";;
esac
done
echo $subdir
for((i = 0; i<$times; i++)); do
NUM=`docker ps -f name=user| wc -l`
while [ $NUM != '1' ];
do
echo 'still running'
sleep 1
NUM=`docker ps -f name=user| wc -l`
done
if cluster/kubectl.sh get configmap config-istio -n knative-serving > /dev/null; then
INGRESSGATEWAY=istio-ingressgateway
fi
export IP_ADDRESS=localhost:$(cluster/kubectl.sh get svc $INGRESSGATEWAY --namespace istio-system --output 'jsonpath={.spec.ports[?(@.port==80)].nodePort}')
OUTPUT=`curl -H "Host: $KSVC.default.example.com" http://${IP_ADDRESS}$subdir -w "%{time_total}" `
echo ''
NUM=`docker ps -f name=user| wc -l`
while [ $NUM != '2' ];
do
echo $NUM
echo 'not only one'
sleep 1
NUM=`docker ps -f name=user| wc -l`
done
CID=`docker ps -f name=user -q --no-trunc`
CPU_TIME="`docker top $CID |awk 'NR==2'| awk '{print $7}'`"
MEM="`docker stats $CID --no-stream --format "{{.MemUsage}}"| awk '{print $1}'`"
echo $OUTPUT | while read array loop version checkpoint latency; do
echo ${checkpoint#*=} $latency $CPU_TIME $MEM ${array#*=} ${loop#*=} >> evaluation.log
echo ${checkpoint#*=} $latency $CPU_TIME $MEM ${array#*=} ${loop#*=}
done;
done | true |
249dec74a988d39016ed73f6927b21dce681b465 | Shell | AIV-97/aosp-builder | /build | UTF-8 | 614 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
cd /tmp/rom # Depends on where source got synced
# Normal build steps
. build/envsetup.sh
lunch nad_RMX1941-userdebug
export CCACHE_DIR=/tmp/ccache
export CCACHE_EXEC=$(which ccache)
export USE_CCACHE=1
ccache -M 20G
ccache -o compression=true
ccache -z
ccache -s
make sepolicy
mka nad
#sleep 75m
#kill %1 || echo "Build already failed or completed"
ccache -s
# upload
up(){
curl --upload-file $1 https://transfer.sh/$(basename$1); echo
# 14 days, 10 GB limit
}
cd /tmp/rom
up /tmp/rom/out/target/product/RMX1941/*UNOFFICIAL*.zip || echo "Only ccache generated or build failed lol"
ccache -s
| true |
433ec857b392dfdaecb4961ecf679dfb52ab930d | Shell | bengtrj/cfcr-toolkit | /deploy-cfcr-gcp.sh | UTF-8 | 2,712 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
[[ -z "${DEBUG:-}" ]] || set -x
bosh_deployment_name=cfcr-bosh-gcp
kubernetes_master_host=""
bosh_dns_zone="$(bosh int ${bbl_state_directory}/bosh_config.yml --path=/dns-zone)"
print_usage() {
cat >&2 << EOF
Usage: $0 <CFCR cluster name>
Mandatory parameters:
<CFCR cluster name> the CFCR cluster name. This will be the BOSH deployment name for the cluster too.
EOF
}
setup_networking() {
gcloud dns record-sets transaction start \
--zone="${bosh_dns_zone}"
gcloud dns record-sets transaction add \
--ttl=300 \
--name="${cfcr_cluster_name}" \
--type=A \
--zone="${bosh_dns_zone}"
gcloud dns record-sets transaction execute \
--zone="${bosh_dns_zone}"
}
setup_node_service_acccounts() {
# master node
gcloud iam service-accounts create "${bosh_deployment_name}" --display-name="${bosh_deployment_name}"
gcloud projects add-iam-policy-binding "${gcp_project_name}" --member serviceAccount:"${gcp_account}" --role roles/owner >> /dev/null
# worker node
}
deploy_cfcr() {
export KD="$(pwd)/${bosh_deployment_name}/kubo-deployment"
bosh deploy -d "${cfcr_cluster_name}" "${KD}/manifests/cfcr.yml" \
-o "${KD}/manifests/ops-files/iaas/gcp/cloud-provider.yml" \
-o "${bosh_deployment_name}/cfcr-ops.yml" \
-v deployment_name="${cfcr_cluster_name}" \
-v kubernetes_master_host="${kubernetes_master_host}" \
-l <(bbl outputs) --non-interactive
}
# Upload default stemcell for GCP to director blobstore - https://bosh.cloudfoundry.org/stemcells/ - https://ultimateguidetobosh.com/stemcells/
upload_stemcell() {
local stemcell_version
stemcell_version="$(bosh int "${bbl_state_directory}"/kubo-deployment/manifests/cfcr.yml --path /stemcells/0/version)"
print "Uploading the bosh GCP stemcell version ${stemcell_version}"
bosh upload-stemcell "https://s3.amazonaws.com/bosh-gce-light-stemcells/light-bosh-stemcell-${stemcell_version}-google-kvm-ubuntu-trusty-go_agent.tgz"
}
upload_cfcr_release() {
print "Uploading CFCR release"
bosh upload-release $(curl --silent "https://api.github.com/repos/cloudfoundry-incubator/kubo-release/releases/latest" | bosh int - --path=/assets/0/browser_download_url | grep http)
}
print() {
echo -e "\n****** $1 ******\n"
}
main() {
eval "$(BBL_STATE_DIRECTORY="bbl-state" bbl print-env | grep -vE "BOSH_ALL_PROXY|CREDHUB_PROXY")"
cfcr_cluster_name=$1
kubernetes_master_host=$2
if [[ -z "${cfcr_cluster_name}" || -z "${kubernetes_master_host}" ]]; then
print_usage
exit 1
fi
# upload_cfcr_release
deploy_cfcr
}
[[ "$0" == "${BASH_SOURCE[0]}" ]] && main "$@" | true |
ca31a60199c647e1d2c42d39e7a22cea925f7e41 | Shell | bgruening/docker-galaxy-stable | /compose/galaxy-server/files/start.sh | UTF-8 | 5,017 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
create_user() {
GALAXY_PROXY_PREFIX=$(cat $GALAXY_CONFIG_DIR/GALAXY_PROXY_PREFIX.txt)
echo "Waiting for Galaxy..."
until [ "$(curl -s -o /dev/null -w '%{http_code}' ${GALAXY_URL:-nginx}$GALAXY_PROXY_PREFIX)" -eq "200" ] && echo Galaxy started; do
sleep 0.1;
done;
echo "Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing"
. $GALAXY_VIRTUAL_ENV/bin/activate
python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \
-c "$GALAXY_CONFIG_FILE" --username "$GALAXY_DEFAULT_ADMIN_USER" --key "$GALAXY_DEFAULT_ADMIN_KEY"
deactivate
}
# start copy lib/tools. Looks very hacky.
tools_dir="/galaxy/lib/galaxy/tools/"
exp_dir="/export$tools_dir"
mkdir -p $exp_dir
chown "$GALAXY_USER:$GALAXY_USER" $exp_dir
cp -rf $tools_dir/* $exp_dir
# end copy lib/tools.
# First start?? Check if something exists that indicates that environment is not new.. Config file? Something in DB maybe??
echo "Initialization: Check if files already exist, export otherwise."
# Create initial $GALAXY_ROOT in $EXPORT_DIR if not already existent
mkdir -p "$EXPORT_DIR/$GALAXY_ROOT"
declare -A exports=( ["$GALAXY_STATIC_DIR"]="$EXPORT_DIR/$GALAXY_STATIC_DIR" \
["$GALAXY_CONFIG_TOOL_PATH"]="$EXPORT_DIR/$GALAXY_CONFIG_TOOL_PATH" \
["$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR"]="$EXPORT_DIR/$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR" \
["$GALAXY_CONFIG_TOOL_DATA_PATH"]="$EXPORT_DIR/$GALAXY_CONFIG_TOOL_DATA_PATH" \
["$GALAXY_VIRTUAL_ENV"]="$EXPORT_DIR/$GALAXY_VIRTUAL_ENV" )
# shellcheck disable=SC2143,SC2086,SC2010
for galaxy_dir in "${!exports[@]}"; do
exp_dir=${exports[$galaxy_dir]}
if [ ! -d $exp_dir ] || [ -z "$(ls -A $exp_dir)" ]; then
echo "Exporting $galaxy_dir to $exp_dir"
mkdir $exp_dir
chown "$GALAXY_USER:$GALAXY_USER" $exp_dir
cp -rpf $galaxy_dir/* $exp_dir
fi
rm -rf $galaxy_dir
ln -v -s $exp_dir $galaxy_dir
chown -h "$GALAXY_USER:$GALAXY_USER" $galaxy_dir
done
# Export galaxy_config seperately (special treatment because of plugins-dir)
# shellcheck disable=SC2143,SC2086,SC2010
if [ ! -d "$EXPORT_DIR/$GALAXY_CONFIG_DIR" ] || [ -z "$(ls -p $EXPORT_DIR/$GALAXY_CONFIG_DIR | grep -v /)" ]; then
# Move config to $EXPORT_DIR and create symlink
mkdir "$EXPORT_DIR/$GALAXY_CONFIG_DIR"
chown "$GALAXY_USER:$GALAXY_USER" "$EXPORT_DIR/$GALAXY_CONFIG_DIR"
cp -rpf $GALAXY_CONFIG_DIR/* $EXPORT_DIR/$GALAXY_CONFIG_DIR
cp -rpf $GALAXY_CONFIG_DIR/plugins/* $EXPORT_DIR/$GALAXY_CONFIG_DIR/plugins
fi
rm -rf "$GALAXY_CONFIG_DIR"
ln -v -s "$EXPORT_DIR/$GALAXY_CONFIG_DIR" "$GALAXY_CONFIG_DIR"
chown -h "$GALAXY_USER:$GALAXY_USER" "$GALAXY_CONFIG_DIR"
# Export database-folder (used for job files etc)
rm -rf "$GALAXY_DATABASE_PATH"
mkdir -p "$EXPORT_DIR/$GALAXY_DATABASE_PATH"
chown "$GALAXY_USER:$GALAXY_USER" "$EXPORT_DIR/$GALAXY_DATABASE_PATH"
ln -v -s "$EXPORT_DIR/$GALAXY_DATABASE_PATH" "$GALAXY_DATABASE_PATH"
chown -h "$GALAXY_USER:$GALAXY_USER" "$GALAXY_DATABASE_PATH"
# Try to guess if we are running under --privileged mode
if mount | grep "/proc/kcore"; then
PRIVILEGED=false
else
PRIVILEGED=true
echo "Privileged mode detected"
chmod 666 /var/run/docker.sock
fi
if $PRIVILEGED; then
echo "Mounting CVMFS"
chmod 666 /dev/fuse
mkdir /cvmfs/data.galaxyproject.org
mount -t cvmfs data.galaxyproject.org /cvmfs/data.galaxyproject.org
mkdir /cvmfs/singularity.galaxyproject.org
mount -t cvmfs singularity.galaxyproject.org /cvmfs/singularity.galaxyproject.org
fi
echo "Finished initialization"
echo "Waiting for RabbitMQ..."
until nc -z -w 2 rabbitmq 5672 && echo RabbitMQ started; do
sleep 1;
done;
echo "Waiting for Postgres..."
until nc -z -w 2 postgres 5432 && echo Postgres started; do
sleep 1;
done;
if [ "$SKIP_LOCKING" != "true" ]; then
echo "Waiting for Galaxy configurator to finish and release lock"
until [ ! -f "$GALAXY_CONFIG_DIR/configurator.lock" ] && echo Lock released; do
sleep 0.1;
done;
fi
if [ -f "/htcondor_config/galaxy.conf" ]; then
echo "HTCondor config file found"
cp -f "/htcondor_config/galaxy.conf" /etc/condor/condor_config.local
echo "Starting HTCondor.."
service condor start
fi
if [ -f /etc/munge/munge.key ]; then
echo "Munge key found"
echo "Starting Munge.."
/etc/init.d/munge start
fi
# In case the user wants the default admin to be created, do so.
if [[ -n $GALAXY_DEFAULT_ADMIN_USER ]]; then
# Run in background and wait for Galaxy having finished starting up
create_user &
fi
# Ensure proper permission (the configurator might have changed them "by mistake")
chown -RL "$GALAXY_USER:$GALAXY_GROUP" "$GALAXY_CONFIG_DIR"
echo "Starting Galaxy now.."
cd "$GALAXY_ROOT" || { echo "Error: Could not change to $GALAXY_ROOT"; exit 1; }
"$GALAXY_VIRTUAL_ENV/bin/uwsgi" --yaml "$GALAXY_CONFIG_DIR/galaxy.yml" --uid "$GALAXY_UID" --gid "$GALAXY_GID"
| true |
f53233eac06c6354aee4288844666ae3b5b5ad65 | Shell | project-treble-s2/android_device_leeco_s2 | /rootdir/etc/init.treble.sh | UTF-8 | 2,243 | 3.015625 | 3 | [] | no_license | #! /vendor/bin/sh
# common functions
symlink() {
ln -sn $1 $2;
}
# mount system (which could be root or /system depending on ROM) and vendor as rw
mount -o remount,rw /
mount -o remount,rw /system
mount -o remount,rw /vendor
# symlink camera
symlink /vendor/etc/firmware/a530_pfp.fw /system/etc/firmware/a530_pfp.fw
symlink /vendor/etc/firmware/a530_pm4.fw /system/etc/firmware/a530_pm4.fw
symlink /vendor/etc/firmware/cpp_firmware_v1_5_0.fw /system/etc/firmware/cpp_firmware_v1_5_0.fw
symlink /vendor/etc/firmware/leia_pfp_470.fw /system/etc/firmware/leia_pfp_470.fw
symlink /vendor/etc/firmware/leia_pm4_470.fw /system/etc/firmware/leia_pm4_470.fw
symlink /vendor/etc/firmware/venus-v1.b00 /system/etc/firmware/venus-v1.b00
symlink /vendor/etc/firmware/venus-v1.b01 /system/etc/firmware/venus-v1.b01
symlink /vendor/etc/firmware/venus-v1.b02 /system/etc/firmware/venus-v1.b02
symlink /vendor/etc/firmware/venus-v1.b03 /system/etc/firmware/venus-v1.b03
symlink /vendor/etc/firmware/venus-v1.b04 /system/etc/firmware/venus-v1.b04
symlink /vendor/etc/firmware/venus-v1.mbn /system/etc/firmware/venus-v1.mbn
symlink /vendor/etc/firmware/venus-v1.mdt /system/etc/firmware/venus-v1.mdt
symlink /vendor/etc/firmware/venus.b00 /system/etc/firmware/venus.b00
symlink /vendor/etc/firmware/venus.b01 /system/etc/firmware/venus.b01
symlink /vendor/etc/firmware/venus.b02 /system/etc/firmware/venus.b02
symlink /vendor/etc/firmware/venus.b03 /system/etc/firmware/venus.b03
symlink /vendor/etc/firmware/venus.b04 /system/etc/firmware/venus.b04
symlink /vendor/etc/firmware/venus.mbn /system/etc/firmware/venus.mbn
symlink /vendor/etc/firmware/venus.mdt /system/etc/firmware/venus.mdt
symlink /vendor/etc/firmware/wlan/prima/WCNSS_qcom_wlan_nv.bin /system/etc/firmware/wlan/prima/WCNSS_qcom_wlan_nv.bin
symlink /vendor/etc/firmware/wlan/prima/WCNSS_qcom_wlan_nv_nofem.bin /system/etc/firmware/wlan/prima/WCNSS_qcom_wlan_nv_nofem.bin
if [ ! -d "/system/etc/firmware" ]; then mkdir /system/etc/firmware; fi
chmod 644 /system/etc/firmware
cd /vendor/etc/firmware/
for f in *; do
symlink /vendor/etc/firmware/$f /system/etc/firmware/$f
done
# remount system and vendor as ro
mount -o remount,ro /
mount -o remount,ro /system
mount -o remount,ro /vendor
| true |
1b8cc66a1c51d36377f4e1fe36a3ebbd169e0315 | Shell | AkshayAnku/StarCount | /Akshay.sh | UTF-8 | 194 | 2.65625 | 3 | [] | no_license | #!/bin/bash
while true
do
# Grabs GitLab star counts
echo "HomelabOS Stars: $(curl -s https://gitlab.com/api/v4/projects/6853087 | jq .star_count)" > ./tmp/gitlabStarCount.txt
sleep 30
done
| true |
df31ecbd6aefd61fb3cb673289561147b99df8e7 | Shell | cgathuru/dotfiles | /install.sh | UTF-8 | 2,049 | 2.6875 | 3 | [] | no_license | #!/bin/sh
# You should already have git, sudo and the dotfiles repo cloned
# First get packages
sudo pacman -S vim cmake wget zsh tmux i3 termite redshift xorg-server \
xorg-xinit openssh adobe-source-code-pro-fonts ttf-symbola ttf-dejavu \
adobe-source-serif-pro-fonts adobe-source-sans-pro-fonts python2 \
xorg-xrandr numlockx xorg-xset polkit bc xf86-video-intel
# Set up vim
mkdir -p $HOME/.vim/after $HOME/.VIM_UNDO_FILES
ln -s $PWD/vim/after/ftplugin $HOME/.vim/after/ftplugin
ln -s $PWD/vimrc $HOME/.vimrc
curl -fLo $HOME/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Set up tmux
ln -s $PWD/tmux.conf $HOME/.tmux.conf
git clone https://github.com/tmux-plugins/tpm $HOME/.tmux/plugins/tpm
# Set up i3 and termite and redshift
mkdir -p $HOME/.config
ln -s $PWD/config/i3 $HOME/.config/i3
ln -s $PWD/config/termite $HOME/.config/termite
ln -s $PWD/config/systemd $HOME/.config/systemd
ln -s $PWD/config/redshift.conf $HOME/.config/redshift.conf
# Set up pacman mirrorlist auto-update
sudo pacman -S reflector
sudo mkdir -p /etc/pacman.d/hooks
sudo ln -s $PWD/mirrorupgrade.hook /etc/pacman.d/hooks/mirrorupgrade.hook
# Set up packer
sudo pacman -S expac jshon
mkdir packer && cd packer
wget https://aur.archlinux.org/cgit/aur.git/plain/PKGBUILD?h=packer
mv PKGBUILD?h=packer PKGBUILD
makepkg
sudo pacman -U packer-*
cd ..
rm -rf packer
# Install font-awesome
packer -S ttf-font-awesome
# Install enpass and caffeine
packer -S enpass-bin caffeine-ng
# Setup audio
packer -S pulseaudio pavucontrol pulseaudio-ctl playerctl
# Setup extract utils
packer -S atool unzip p7zip zip unrar
# Setup network
packer -S networkmanager network-manager-applet networkmanager-openvpn private-internet-access-vpn gnome-keyring libsecret
# Set up zsh
ln -s $PWD/zprofile $HOME/.zprofile
ln -s $PWD/zshrc $HOME/.zshrc
ln -s $PWD/zshenv $HOME/.zshenv
ln -s $PWD/xinitrc $HOME/.xinitrc
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
| true |
db8201724b2e0c7cfee67533eaa815a4b643158f | Shell | arthursimas1/gateway | /entrypoint.sh | UTF-8 | 1,161 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
cleanup() {
echo "caught Signal ... stopping nginx ..."
nginx -s stop
echo "done."
exit 0
}
trap cleanup HUP INT QUIT TERM
renew() {
touch do_not_stop
nginx -s quit
for domain in $(ls /renew-routines); do
./renew-routines/${domain}
done
nginx
rm do_not_stop
}
renew_maintainer() {
renew
while sleep 7d; do
renew
done
}
secure_ssl_files() {
chown -R root:root /ssl-cert
chmod -R 600 /ssl-cert
}
if [[ -z "$@" ]]; then
# https://gist.github.com/tsaarni/14f31312315b46f06e0f1ecc37146bf3
mkdir -p -m 600 /etc/nginx/ssl
echo -e ".\n.\n.\n\n.\n.\n.\n" | openssl req -x509 -newkey ec:<(openssl ecparam -name secp384r1) -nodes -days 365 -out /etc/nginx/ssl/cert.pem -keyout /etc/nginx/ssl/privkey.pem
echo
secure_ssl_files
nginx
renew_maintainer &
while sleep 30s; do
ps | grep nginx | grep -q -v grep
nginx=$?
if [[ ! -f ./do_not_stop && $nginx != 0 ]]; then
echo "nginx stopped working!"
exit 1
fi
done
elif [[ "$@" == "reload" ]]; then
secure_ssl_files
nginx -s reload
exit $?
elif [[ "$@" == "renew" ]]; then
renew
exit $?
fi
exec "$@"
| true |
20295f66a0e3d7f2ac193593a0f000e3e177cffe | Shell | BeeksRM1/Sys-scripts | /SystemAnalysisTABLES.sh | UTF-8 | 2,089 | 3.390625 | 3 | [] | no_license | #!/bin/bash
##System Analisis##
echo "SYSTEM INFORMATION>>"
echo "--------------------"
date;
echo "uptime:"
uptime
echo "--------------------"
echo "Version:"
uname -a
echo "--------------------"
printf "\n"
echo "Currently connected:"
w
echo "--------------------"
echo "Last logins:"
last -a |head -3
echo "--------------------"
printf "\n"
echo "Disk and memory usage:"
df -h | xargs | awk '{print "Free/total disk: " $11 " / " $9}'
free -m | xargs | awk '{print "Free/total memory: " $17 " / " $8 " MB"}'
echo "--------------------"
start_log=`head -1 /var/log/messages |cut -c 1-12`
oom=`grep -ci kill /var/log/messages`
echo -n "OOM errors since $start_log :" $oom
echo ""
echo "--------------------"
printf "\n"
echo "Users:"
cat /etc/passwd
echo "--------------------"
echo "Wheel Group:"
getent group wheel
echo "--------------------"
printf "\n"
echo "Process Info>>"
echo "--------------------"
echo "Utilization and most expensive processes:"
top -b |head -5
echo
top -b |head -10 |tail -4
printf "\n"
echo "--------------------"
printf "\n"
echo "Processes:"
ps auxf --width=200 | tail -20
printf "\n"
echo "--------------------"
printf "\n"
echo "vmstat:"
vmstat -V 1 5
vmstat 1 5
printf "\n"
echo "--------------------"
printf "\n"
echo "Security Status>>"
echo "--------------------"
sestatus
echo "--------------------"
printf "\n"
systemctl status firewalld
echo "--------------------"
printf "\n"
systemctl status iptables
echo "--------------------"
printf "\n"
echo "IPtables rules"
iptables -t nat -vnL
echo "--------------------"
printf "\n"
echo "Network Info>>"
echo "--------------------"
echo "Interfaces:"
ifconfig -a
echo "--------------------"
echo "Connectivity Check:"
ping -c 4 8.8.8.8
echo "--------------------"
echo "Routes:"
route -n
printf "\n"
echo "--------------------"
printf "\n"
echo "Net Hardware:"
lshw -class network
printf "\n"
echo "--------------------"
printf "\n"
echo "Open TCP ports:"
netstat -plunt
printf "\n"
echo "--------------------"
printf "\n"
echo "Current connections:"
ss -s
echo "--------------------"
| true |
2c3b9303d7c7597ea5cc3e99266261b9b56b4213 | Shell | s-noda/kabu | /test/wakamoto_logger.sh | UTF-8 | 330 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env bash
source kdb_log.sh;
DATE=`date +"%Y%m%d"`;
TARGET="わかもと製薬";
DATA=`cat $DATE_utf.csv | grep $TARGET`;
LOG=wakamoto.log;
get_kdb_data $DATE ;
echo -n $DATE >> $LOG;
echo -n `get_tagged_value $DATA "始値";` >> $LOG;
echo -n `get_tagged_value $DATA "終値";` >> $LOG;
echo "" >> $LOG;
cat $LOG;
| true |
3f1a1f625a0f0ee47fb9fd7309e43712f178e62f | Shell | abenbihi/pydata | /scripts/cmu_colmap_manual_undistortion.sh | UTF-8 | 5,166 | 3.15625 | 3 | [] | no_license | #!/bin/sh
## TODO: specify your path here
#MACHINE=1
#if [ "$MACHINE" -eq 0 ]; then
# WS_PATH=/home/abenbihi/ws/
# IMG_DIR=/mnt/data_drive/dataset/CMU-Seasons/images/
#elif [ "$MACHINE" -eq 1 ]; then
# WS_PATH=/home/gpu_user/assia/ws/
# #IMG_DIR="$WS_PATH"/datasets/Extended-CMU-Seasons/
# IMG_DIR="$WS_PATH"datasets/Extended-CMU-Seasons-Undistorted/
#else
# echo "test_lake.sh: Get your MTF MACHINE correct"
# exit 1
#fi
#COLMAP_BIN="$WS_PATH"tools/colmap/build/src/exe/colmap
##IMG_DIR=/mnt/data_drive/dataset/Extended-CMU-Seasons/
#
. ./scripts/export_path.sh
IMG_DIR="$IMG_UNDISTORTED_DIR"
if [ "$#" -eq 0 ]; then
echo "Arguments: "
echo "1: slice"
echo "2: camera id"
echo "3: survey id"
exit 1
fi
if [ "$#" -ne 3 ]; then
echo "Error: bad number of arguments"
echo "1: slice"
echo "2: camera id"
echo "3: survey id"
exit 1
fi
slice_id="$1"
cam_id="$2"
survey_id="$3"
if [ "$survey_id" -eq -1 ]; then
survey_id=db
fi
project_name="$slice_id"/"$slice_id"_c"$cam_id"_"$survey_id"
colmap_ws=pycmu/res/colmap/"$project_name"/
# used for CMU seasons ONLY, because the images from Extended-CMU-Seasons are
# not undistorted !!!!
if [ "$cam_id" -eq 0 ]; then
camera_model=PINHOLE
camera_params=868.99,866.06,525.94,420.04
#camera_model=OPENCV
#camera_params=868.993378,866.063001,525.942323,420.042529,-0.399431,0.188924,0.000153,0.000571
elif [ "$cam_id" -eq 1 ]; then
camera_model=PINHOLE
camera_params=873.38,876.49,529.32,397.27
else
echo "Error: Wrong cam_id="$cam_id" != {0,1}."
exit 1
fi
if [ 1 -eq 1 ]; then
rm -f "$colmap_ws"/database.db
rm -rf "$colmap_ws"/sparse
rm -rf "$colmap_ws"/dense
if [ $? -ne 0 ]; then
echo "Error when deleting previous colmap workspace"
exit 1
fi
mkdir -p "$colmap_ws"/sparse/
mkdir -p "$colmap_ws"/sparse/
mkdir -p "$colmap_ws"/dense
if [ $? -ne 0 ]; then
echo "Error when creating colmap workspace"
exit 1
fi
fi
if [ 1 -eq 1 ]; then
# import known poses
if [ -d "$colmap_ws"/colmap_prior ]; then
rm -rf "$colmap_ws"/colmap_prior/
fi
cp -r pycmu/meta/surveys/"$project_name"/colmap_prior/ \
"$colmap_ws"/colmap_prior
if [ $? -ne 0 ]; then
echo "Error when copying prior information."
exit 1
fi
fi
#echo "$IMG_DIR"
#echo "$colmap_ws"
#exit 0
if [ 1 -eq 1 ]; then
## feature extraction with known camera params on masked imgs, from directory
"$COLMAP_BIN" feature_extractor \
--database_path "$colmap_ws"/database.db \
--image_path "$IMG_DIR" \
--image_list_path "$colmap_ws"/colmap_prior/image_list.txt \
--ImageReader.camera_model "$camera_model" \
--ImageReader.camera_params "$camera_params"
if [ $? -ne 0 ]; then
echo "Error during feature_extractor."
exit 1
fi
fi
if [ 1 -eq 1 ]; then
### feature matching with custom matches. Here I specify image pairs to match.
## I think you can also specify features inliers directly.
#"$COLMAP_BIN" matches_importer \
# --database_path $colmap_ws/database.db \
# --match_list_path $colmap_ws/mano/match_list.txt \
# --match_type pairs
"$COLMAP_BIN" exhaustive_matcher \
--database_path "$colmap_ws"/database.db
if [ $? -ne 0 ]; then
echo "Error during matcher."
exit 1
fi
fi
if [ 1 -eq 1 ]; then
echo "$IMG_DIR"
# for when you know the camera pose beforehand
"$COLMAP_BIN" point_triangulator \
--database_path $colmap_ws/database.db \
--image_path "$IMG_DIR" \
--input_path "$colmap_ws"/colmap_prior/ \
--output_path "$colmap_ws"/sparse/
if [ $? -ne 0 ]; then
echo "Error during point_triangulator."
exit 1
fi
fi
#if [ 0 -eq 1 ]; then
# # for when you have no prior on the camera pose, img list
# #"$COLMAP_BIN" mapper \
# # --database_path $colmap_ws/database.db \
# # --image_path "$IMG_DIR" \
# # --image_list_path "$colmap_ws"/colmap_prior/image_list.txt \
# # --output_path $colmap_ws/sparse/
#
# # for when you have no prior on the camera pose, from dir
# "$COLMAP_BIN" mapper \
# --database_path $colmap_ws/database.db \
# --image_path "$colmap_ws"img \
# --output_path $colmap_ws/sparse/
#
# if [ $? -ne 0 ]; then
# echo "Error during mapper."
# exit 1
# fi
#fi
#if [ 0 -eq 1 ]; then
#
# mkdir -p "$colmap_ws"/dense/images
# while read -r line
# do
# cp "$IMG_DIR""$line" "$colmap_ws"/dense/images/
# done < "$colmap_ws"/colmap_prior/image_list.txt
# cp -r "$colmap_ws"/sparse "$colmap_ws"/dense/
#fi
if [ 1 -eq 1 ]; then
"$COLMAP_BIN" image_undistorter \
--image_path "$IMG_DIR" \
--input_path "$colmap_ws"/sparse/ \
--output_path "$colmap_ws"/dense/
if [ $? -ne 0 ]; then
echo "Error during image_undistorter."
exit 1
fi
fi
if [ 1 -eq 1 ]; then
"$COLMAP_BIN" patch_match_stereo \
--workspace_path "$colmap_ws"/dense/
if [ $? -ne 0 ]; then
echo "Error during patch_match_stereo."
exit 1
fi
fi
#if [ 1 -eq 1 ]; then
# "$COLMAP_BIN" stereo_fusion \
# --workspace_path "$colmap_ws"/dense/ \
# --output_path "$colmap_ws"/dense/fused.ply
# if [ $? -ne 0 ]; then
# echo "Error during stereo_fusion."
# exit 1
# fi
#fi
| true |
68196c247f03ec72b885516f09461c81ee68150f | Shell | MishkaKolpakov/file-storage-auth-service | /generate_liquibase_prop | UTF-8 | 554 | 3.4375 | 3 | [] | no_license | #!/bin/sh
DIR=$PWD/liquibase
mkdir -p ${DIR}
FILE=${DIR}/liquibase.properties
echo '# generated liquibase properties' > ${FILE}
host=$1
database_name=$2
username=$3
password=$4
echo driver=org.postgresql.Driver >> ${FILE}
echo url=jdbc:postgresql://${host}:5432/${database_name} >> ${FILE}
echo username=${username} >> ${FILE}
echo password=${password} >> ${FILE}
echo dialect=org.hibernate.dialect.PostgreSQL9Dialect >> ${FILE}
echo
echo Following file has been generated:
echo
echo ${FILE}
echo
while read line; do
echo $line
done < $FILE
echo
| true |
ca5dc589a22af9942409451530a564224860167e | Shell | art65536/abs | /extra/harfbuzz/PKGBUILD | UTF-8 | 1,879 | 2.859375 | 3 | [] | no_license | # Maintainer: Jan de Groot <jgc@archlinux.org>
# Contributor: Wolfgang Bumiller <blub@speed.at>
pkgbase=(harfbuzz)
pkgname=(harfbuzz harfbuzz-icu)
pkgver=0.9.19
pkgrel=1
pkgdesc="OpenType text shaping engine."
arch=(i686 x86_64)
url="http://www.freedesktop.org/wiki/Software/HarfBuzz"
license=(MIT)
makedepends=('glib2>=2.34.3' freetype2 'graphite>=1.2.0' cairo icu)
optdepends=('cairo: hb-view program')
options=(!libtool)
source=(http://www.freedesktop.org/software/harfbuzz/release/${pkgbase}-${pkgver}.tar.bz2)
sha256sums=('d2da0f060d47f6ad9de8c8781bb21fa4b9eae8ea1cd1e956b814095baa002f35')
build() {
cd $pkgbase-$pkgver
sed -i '' -e "/^ltmain=/!s|\$$ac_aux_dir/ltmain.sh|/usr/share/libtool/config/ltmain.sh|g" \
-e "/^LIBTOOL=/s|\$$\(top_builddir\)/libtool|/usr/bin/libtool|g" \
${srcdir}/${pkgname}-${pkgver}/aclocal.m4
find ${srcdir}/${pkgname}-${pkgver} -name "Makefile.in" | xargs sed -i '' -e 's|^LIBTOOL[ ]*=.*|LIBTOOL=/usr/bin/libtool|g'
./configure --prefix=/usr \
--with-glib --with-freetype --with-cairo --with-icu --with-graphite2
gmake
}
package_harfbuzz() {
depends=(glib2 freetype2 graphite)
optdepends=('cairo: hb-view program')
cd $pkgbase-$pkgver
gmake DESTDIR="$pkgdir" install
install -dm755 "$pkgdir/usr/share/licenses/harfbuzz"
install -m644 COPYING "$pkgdir/usr/share/licenses/harfbuzz/COPYING"
# Split harfbuzz-icu
mkdir -p ../hb-icu/usr/{include/harfbuzz,lib/pkgconfig}; cd ../hb-icu
mv "$pkgdir"/usr/lib/libharfbuzz-icu* ./usr/lib
mv "$pkgdir"/usr/lib/pkgconfig/harfbuzz-icu.pc ./usr/lib/pkgconfig
mv "$pkgdir"/usr/include/harfbuzz/hb-icu.h ./usr/include/harfbuzz
}
package_harfbuzz-icu(){
pkgdesc="$pkgdesc (ICU integration)"
depends=(harfbuzz icu)
mv hb-icu/* "$pkgdir"
install -dm755 "$pkgdir/usr/share/licenses/harfbuzz-icu"
install -m644 $pkgbase-$pkgver/COPYING "$pkgdir/usr/share/licenses/harfbuzz-icu/COPYING"
}
| true |
d2628cb4a2831a5eeceaa4f2aa862c5827ef8489 | Shell | dcsim0n/wxnow_json | /wxnowjson.sh | UTF-8 | 251 | 2.75 | 3 | [] | no_license | #!/bin/bash
# Shell script to fetch data from weather station
# pipe data into python script and format the data
#
# Dana Simmons 2020
WEATHER_STATION="http://10.1.1.252/i"
WORK_DIR="/home/pi/wxnow_json"
curl -s $WEATHER_STATION | python3 $WORK_DIR/app.py | true |
b954936331e3fe92b53dc2078a2e37d4cd54a309 | Shell | TyPetrochko/cs323 | /bsh/Test/t13.v0 | UTF-8 | 441 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# Wait for some children to die
# REQUIRES: BG, BUILT-IN (wait)
ERROR="+Bsh.err.$$"
FILES="$ERROR"
PROGS="sleeper echo"
RUN="/c/cs323/bin/run -wall=2"
trap "/bin/rm -f $FILES; \
/usr/bin/killall -q -u $USER Bsh $PROGS 2>/dev/null" 0 1 2 3 9 15
ulimit -u 300
$RUN ./Bsh <<END 2>$ERROR
/c/cs323/Hwk5/Tests/sleeper alpha 0.4 gamma & sleep 0.2
echo beta
wait
echo End of test
END
echo
/c/cs323/Hwk5/Tests/suppress $ERROR
| true |
2dac9e07726f5e145d705f6d4814da6414d5a744 | Shell | rafeyhusain/ip | /tools/mailchimp/run.sh | UTF-8 | 314 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env bash
DATE=$(date +"%Y-%m-%d")
OUTPUT_DIR="/srv/ftp/mailchimp/${DATE}/"
if [ ! -d "$OUTPUT_DIR" ]; then
mkdir -p "$OUTPUT_DIR"
fi
cat mailing-list.cfg | \
awk -F'=' -v output_dir="$OUTPUT_DIR" '{print $1" "$2" "output_dir}' | \
xargs -I{} python backup.py {} 2>${OUTPUT_DIR}error.log
| true |
76b02a60a353606bb0d3e0ec89fe0f26a0a53dd3 | Shell | 19521791/CS523.L21 | /Btree/CreateDatabase.sh | UTF-8 | 1,940 | 3.109375 | 3 | [] | no_license | SIZE=$1
if [ $SIZE -eq 250000000 ]; then
echo "250 trieu"
sqlite3 ./database_250000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_250000000.db <./sql/CreateDatabase/250000000/CreateDatabase_1.sql
elif [ $SIZE -eq 500000000 ]; then
echo "500 trieu"
sqlite3 ./database_500000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_500000000.db <./sql/CreateDatabase/500000000/CreateDatabase_1.sql
elif [ $SIZE -eq 750000000 ]; then
echo "750 trieu"
sqlite3 ./database_750000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_750000000.db <./sql/CreateDatabase/750000000/CreateDatabase_1.sql
elif [ $SIZE -eq 1000000000 ]; then
echo "1000 trieu"
sqlite3 ./database_1000000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_1000000000.db <./sql/CreateDatabase/1000000000/CreateDatabase_1.sql
elif [ $SIZE -eq 1250000000 ]; then
echo "1250 trieu"
sqlite3 ./database_1250000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_1250000000.db <./sql/CreateDatabase/1250000000/CreateDatabase_1.sql
elif [ $SIZE -eq 1500000000 ]; then
echo "1500 trieu"
sqlite3 ./database_1500000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_1500000000.db <./sql/CreateDatabase/1500000000/CreateDatabase_1.sql
elif [ $SIZE -eq 1750000000 ]; then
echo "1750 trieu"
sqlite3 ./database_1750000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_1750000000.db <./sql/CreateDatabase/1750000000/CreateDatabase_1.sql
elif [ $SIZE -eq 2000000000 ]; then
echo "2000 trieu"
sqlite3 ./database_2000000000.db "CREATE TABLE Btree (NUMBER INTEGER PRIMARY KEY);"
sqlite3 ./database_2000000000.db <./sql/CreateDatabase/2000000000/CreateDatabase_1.sql
fi
echo "finish creating databases"
exit 0
| true |
4c59b90c933b8ea2135fe5c50a56843ed8b6d415 | Shell | adeelmalik78/dxapikit | /API/jetstream_container_active_branch_at_timestamp.sh | UTF-8 | 7,658 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017 by Delphix. All rights reserved.
#
# Program Name : jetstream_container_active_branch_at_timestamp_jq.sh
# Description : Delphix API to get JetStream Active Branch at Timestamp
# Author : Alan Bitterman
# Created : 2017-11-20
# Version : v1.2
#
# Requirements :
# 1.) curl and jq command line libraries
# 2.) Populate Delphix Engine Connection Information . ./delphix_engine.conf
# 3.) Include ./jqJSON_subroutines.sh
# 4.) Change values below as required
#
# Interactive Usage:
# ./jetstream_container_active_branch_at_timestamp_jq.sh
#
# Non-interactive Usage:
# ./jetstream_container_active_branch_at_timestamp_jq.sh [template_name] [container_name] [timestamp]
# ./jetstream_container_active_branch_at_timestamp_jq.sh [template_name] [container_name] [YYYY-MM-DDTHH:MI:SS.FFFZ]
#
#########################################################
# DELPHIX CORP #
# Please make changes to the parameters below as req'd! #
#########################################################
#########################################################
## Parameter Initialization ...
. ./delphix_engine.conf
#
# Default Values if not provided on Command Line ...
#
# For non-interactive defaults ...
#
#DEF_JS_TEMPLATE="tpl"
#DEF_JS_CONTAINER_NAME="dc"
#DEF_TS="2017-12-03T21:11:00.000Z"
#
# For full interactive option, set default values to nothing ...
#
DEF_JS_TEMPLATE=""
DEF_JS_CONTAINER_NAME=""
DEF_TS=""
#########################################################
# NO CHANGES REQUIRED BELOW THIS POINT #
#########################################################
#########################################################
## Subroutines ...
source ./jqJSON_subroutines.sh
#########################################################
## Session and Login ...
echo "Authenticating on ${BaseURL}"
RESULTS=$( RestSession "${DMUSER}" "${DMPASS}" "${BaseURL}" "${COOKIE}" "${CONTENT_TYPE}" )
#echo "Results: ${RESULTS}"
if [ "${RESULTS}" != "OK" ]
then
echo "Error: Exiting ..."
exit 1;
fi
#########################################################
## Get Template Reference ...
#echo "Getting Jetstream Template Reference ..."
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/template -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
JS_TEMPLATE="${1}"
if [[ "${JS_TEMPLATE}" == "" ]]
then
ZTMP="Template Name"
if [[ "${DEF_JS_TEMPLATE}" == "" ]]
then
TMP=`echo "${STATUS}" | jq --raw-output '.result[] | .name '`
echo "---------------------------------"
echo "${ZTMP}s: [copy-n-paste]"
echo "${TMP}"
echo " "
echo "Please Enter ${ZTMP} (case sensitive): "
read JS_TEMPLATE
if [ "${JS_TEMPLATE}" == "" ]
then
echo "No ${ZTMP} Provided, Exiting ..."
exit 1;
fi
else
echo "No ${ZTMP} Provided, using Default ..."
JS_TEMPLATE=${DEF_JS_TEMPLATE}
fi
fi
echo "template name: ${JS_TEMPLATE}"
#
# Parse ...
#
JS_TPL_REF=`echo "${STATUS}" | jq --raw-output '.result[] | select(.name=="'"${JS_TEMPLATE}"'") | .reference '`
echo "template reference: ${JS_TPL_REF}"
if [[ "${JS_TPL_REF}" == "" ]]
then
echo "${ZTMP} Reference ${JS_TPL_REF} for ${JS_TEMPLATE} not found, Exiting ..."
exit 1
fi
#########################################################
## Get container reference...
#echo "Getting Jetstream Template Container Reference ..."
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/container -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
JS_CONTAINER_NAME="${2}"
if [[ "${JS_CONTAINER_NAME}" == "" ]]
then
ZTMP="Container Name"
if [[ "${DEF_JS_CONTAINER_NAME}" == "" ]]
then
TMP=`echo "${STATUS}" | jq --raw-output '.result[] | select (.template=="'"${JS_TPL_REF}"'") | .name '`
echo "---------------------------------"
echo "${ZTMP}s: [copy-n-paste]"
echo "${TMP}"
echo " "
echo "Please Enter ${ZTMP} (case sensitive): "
read JS_CONTAINER_NAME
if [ "${JS_CONTAINER_NAME}" == "" ]
then
echo "No ${ZTMP} Provided, Exiting ..."
exit 1;
fi
else
echo "No ${ZTMP} Provided, using Default ..."
JS_CONTAINER_NAME=${DEF_JS_CONTAINER_NAME}
fi
fi
echo "template container name: ${JS_CONTAINER_NAME}"
JS_CONTAINER_REF=`echo "${STATUS}" | jq --raw-output '.result[] | select(.template=="'"${JS_TPL_REF}"'" and .name=="'"${JS_CONTAINER_NAME}"'") | .reference '`
echo "template container reference: ${JS_CONTAINER_REF}"
if [[ "${JS_CONTAINER_REF}" == "" ]]
then
echo "${ZTMP} Reference ${JS_CONTAINER_REF} for ${JS_CONTAINER_NAME} not found, Exiting ..."
exit 1
fi
JS_DC_ACTIVE_BRANCH=`echo "${STATUS}" | jq --raw-output '.result[] | select(.template=="'"${JS_TPL_REF}"'" and .name=="'"${JS_CONTAINER_NAME}"'") | .activeBranch '`
echo "Container Active Branch Reference: ${JS_DC_ACTIVE_BRANCH}"
JS_DC_LAST_UPDATED=`echo "${STATUS}" | jq --raw-output '.result[] | select(.template=="'"${JS_TPL_REF}"'" and .name=="'"${JS_CONTAINER_NAME}"'") | .lastUpdated '`
#########################################################
#
# Get Remaining Command Line Parameters ...
#
#
# Timestamp ...
#
TS="${3}"
if [[ "${TS}" == "" ]]
then
if [[ "${DEF_TS}" == "" ]]
then
echo "---------------------------------"
echo "Timestamp Format: YYYY-MM-DDTHH:MI:SS.FFFZ"
echo "Container Last Updated: ${JS_DC_LAST_UPDATED}"
echo "Please Enter Timestamp: "
read TS
if [[ "${TS}" == "" ]]
then
echo "No Timestamp Name Provided, Exiting ..."
exit 1;
fi
else
echo "No Timestamp Provided, using Default ..."
TS=${DEF_TS}
fi
fi
#########################################################
#
# jetstream container [container_name] getActiveBranchAtTime ...
#
json="
{
\"type\": \"JSDataContainerActiveBranchParameters\",
\"time\": \"${TS}\"
}"
echo "JSON: ${json}"
STATUS=`curl -s -X POST -k --data @- $BaseURL/jetstream/container/${JS_CONTAINER_REF}/getActiveBranchAtTime -b "${COOKIE}" -H "${CONTENT_TYPE}" <<EOF
${json}
EOF
`
RESULTS=$( jqParse "${STATUS}" "status" )
if [ "${RESULTS}" != "OK" ]
then
echo "${RESULTS}"
echo "Error: Exiting ..."
exit 1;
fi
#echo "${STATUS}" | jq "."
#echo "Get Active Branch At Time Results: ${STATUS}"
#########################################################
## Get Branch Reference ...
JS_BRANCH_REF=$( jqParse "${STATUS}" "result" )
echo "Branch Reference: ${JS_BRANCH_REF}"
#########################################################
## Get Branch Name ...
STATUS=`curl -s -X GET -k ${BaseURL}/jetstream/branch/${JS_BRANCH_REF} -b "${COOKIE}" -H "${CONTENT_TYPE}"`
RESULTS=$( jqParse "${STATUS}" "status" )
#echo "${STATUS}" | jq "."
ACTIVE_BRANCH_NAME=`echo "${STATUS}" | jq --raw-output '.result.name'`
echo "Active Branch at ${TS}: ${ACTIVE_BRANCH_NAME}"
############## E O F ####################################
echo " "
echo "Done ..."
echo " "
exit 0
| true |
88de570e4b89c19b103b43d2b233e2358a37a923 | Shell | bakterian/WeMosSketches | /AudioPlayer/installLibraries.bash | UTF-8 | 2,234 | 4.375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# useful functions
checkIfPrereqPresent ()
{
command -v $1 >/dev/null 2>&1 || { echo "I require a binary called: $1 , but it's not installed. Aborting."; exit 1; }
}
exitIfPathDoesNotExits()
{
if [ ! -d $1 ]; then
echo "The path: \"$1\" does not exit have you installed/downloaded everything? Aborting";
exit 1;
fi
}
createDirWhenNotFound ()
{
if [ ! -d $1 ]; then
mkdir $1
fi
}
deleteDirIfFound ()
{
if [ -d $1 ]; then
echo "removing \"$1\""
rm -rfv $1
fi
}
checkIfPrereqPresent jq
checkIfPrereqPresent unzip
checkIfPrereqPresent wget
# CONFIGURATION VARIABLES
CONFIG_FOLDER_PATH="../../Esp8266-Arduino-Makefile"
CONFIG_FILE_PATH="$CONFIG_FOLDER_PATH/config.json"
ESP_VERSION=`cat $CONFIG_FILE_PATH | jq '.espVersions.ESP8266_VER' | cut -d "\"" -f 2`
DOWNLOAD_FOLDER=`cat $CONFIG_FILE_PATH | jq '.paths.cacheFolder' | cut -d "\"" -f 2`
LIBS_FOLDER="$CONFIG_FOLDER_PATH/esp8266-$ESP_VERSION/libraries"
# LIBRARIES
LIB_LINKS=(`cat libraries.json | jq --compact-output '.[].weblink' | cut -d "\"" -f 2`)
LIB_NAMES=(`cat libraries.json | jq --compact-output '.[].folderName' | cut -d "\"" -f 2`)
# Make sure that the folder names in the libraries.json match the include libraries header files <fileName.h>
# This is how we detetmine in espXArduino.mk which Esp Arduino libraries have to be compiled.
# Check if the esp-8266 project and other depencies are there
exitIfPathDoesNotExits $CONFIG_FOLDER_PATH
exitIfPathDoesNotExits $LIBS_FOLDER
# download folder creation if not already exists
createDirWhenNotFound $DOWNLOAD_FOLDER
# iterate over the libraries, download them and extract them alongside the ESP Core libs
i=0
for link in "${LIB_LINKS[@]}"
do
echo "downloading from $link"
wget --no-clobber -q $link -P $DOWNLOAD_FOLDER/${LIB_NAMES[$i]}
zipFileName=$(basename $link)
zipInnerFolder=`unzip -qql $DOWNLOAD_FOLDER/${LIB_NAMES[$i]}/$zipFileName | head -n1 | tr -s ' ' | cut -d' ' -f5- | sed 's/.$//'`
unzip -o $DOWNLOAD_FOLDER/${LIB_NAMES[$i]}/$zipFileName -d $DOWNLOAD_FOLDER/${LIB_NAMES[$i]}
deleteDirIfFound $LIBS_FOLDER/${LIB_NAMES[$i]}
cp -a $DOWNLOAD_FOLDER/${LIB_NAMES[$i]}/$zipInnerFolder/. $LIBS_FOLDER/${LIB_NAMES[$i]}/
((i++))
done
| true |
56708fb2af6c47582cfe4c8e6c7fd91da5e27932 | Shell | g-six/cheatsheets | /aws/ec2/deregister-image.sh | UTF-8 | 334 | 3.546875 | 4 | [] | no_license | #!/bin/bash
if [[ -z $1 ]] ; then
exit 1
fi
AMI_ID=$1
AMI_STATE=$(bash ./get-image-state.sh $AMI_ID)
if [[ $AMI_STATE = "available" ]] ; then
aws ec2 deregister-image --image-id $AMI_ID
AMI_STATE=pending
fi
while [[ $AMI_STATE = "pending" ]]
do
sleep 3
AMI_STATE=$(bash ./get-image-state.sh $AMI_ID)
done
echo $AMI_STATE
| true |
9d46d56788e16fae4269a6599bcbd0fadc564452 | Shell | BGCX262/zumastor-svn-to-git | /branches/unittests/test/scripts/zuminstall.sh | UTF-8 | 5,838 | 4.15625 | 4 | [] | no_license | #!/bin/sh
#set -x
#
# Potentially modifiable parameters.
#
# Working directory, where images and ancillary files live.
WORKDIR=~/local
# Where the svn output goes.
BUILDLOG=build.log
# Name of target Zumastor support directory. This must match the name in the
# preseed file! Note: No leading "/"!
ZUMADIR=zinst
# Where we put the build.
BUILDDIR=`pwd`/build
# Default config file.
CONFIG=${BUILDDIR}/zumastor/test/config/qemu-config
PACKAGEDIR=""
usage()
{
echo "Usage: $0 [-p <path>] [-c <config file>] <hostname/IP address>" >&2
echo "Where <path> is the directory that contains the Zumastor .deb files" >&2
echo "and <config file> is a kernel config file to be used for the build." >&2
exit 1
}
update_build()
{
CURDIR=`pwd`
mkdir -p ${BUILDDIR}
cd ${BUILDDIR}
echo -ne Getting zumastor sources from subversion ...
if [ -e zumastor -a -f zumastor/build_packages.sh ]; then
svn update zumastor >> $BUILDLOG 2>&1 || exit $?
else
svn checkout http://zumastor.googlecode.com/svn/trunk/ zumastor >> $BUILDLOG 2>&1 || exit $?
fi
if [ ! -f zumastor/build_packages.sh ]; then
echo "No build_packages script found!" >&2
usage
fi
if [ ! -f ${CONFIG} ]; then
echo "No kernel config file \"${CONFIG}\" found!" >&2
exit 1
fi
cd ${CURDIR}
sh ${BUILDDIR}/zumastor/build_packages.sh ${CONFIG} >>${BUILDLOG} 2>&1
}
while getopts "c:p:" option ; do
case "$option" in
c) CONFIG="$OPTARG";;
p) PACKAGEDIR="$OPTARG";;
*) usage;;
esac
done
shift $(($OPTIND - 1))
if [ $# -lt 1 ]; then
usage
fi
#
# If the package directory doesn't start with a "/" then it's relative to
# the current directory.
#
if [ "${PACKAGEDIR}" != "" -a "${PACKAGEDIR:0:1}" != "/" ]; then
PACKAGEDIR=`pwd`/${PACKAGEDIR}
fi
#
# If the package directory doesn't start with a "/" then it's relative to
# the current directory.
#
if [ "${CONFIG}" != "" -a "${CONFIG:0:1}" != "/" ]; then
CONFIG=`pwd`/${CONFIG}
fi
#
# If we didn't get a package directory on the command line in which to look
# for the zumastor .deb files, just check out the latest source, build it
# and use the .deb files so produced.
#
if [ "${PACKAGEDIR}" = "" ]; then
#
# Verify that the config file actually exists.
#
if [ ! -f "${CONFIG}" ]; then
echo "Config file ${CONFIG} doesn't exist!" >&2
exit 1
fi
echo "No package directory, building new packages in ${BUILDDIR} with config file ${CONFIG}." >&2
update_build
PACKAGEDIR=${BUILDDIR}
else
#
# Verify that the package directory actually exists.
#
if [ ! -d "$PACKAGEDIR" ]; then
echo "Package dir ${PACKAGEDIR} doesn't exist!" >&2
exit 1
fi
echo "Using package directory ${PACKAGEDIR}." >&2
fi
#
# Find the Zumastor packages. We want the latest, therefore the last in the
# alphabetically-sorted list.
#
cd $PACKAGEDIR
KHDR=`ls kernel-headers-*.deb | tail -1`
KIMG=`ls kernel-image-*.deb | tail -1`
DDSN=`ls ddsnap*.deb | tail -1`
ZUMA=`ls zumastor*.deb | tail -1`
fail=0
if [ ! -f "${KHDR}" ]; then
echo "No kernel-headers package found!" >&2
fail=1
fi
if [ ! -f "${KIMG}" ]; then
echo "No kernel-image package found!" >&2
fail=1
fi
if [ ! -f "${DDSN}" ]; then
echo "No ddsnap package found!" >&2
fail=1
fi
if [ ! -f "${ZUMA}" ]; then
echo "No zumastor package found!" >&2
fail=1
fi
PACKAGES="${PACKAGEDIR}/${KHDR} ${PACKAGEDIR}/${KIMG} ${PACKAGEDIR}/${DDSN} ${PACKAGEDIR}/${ZUMA}"
cd ${WORKDIR}
#
# Create the zumastor install script.
#
cat <<EOF_zinstall.sh >zinstall.sh
#!/bin/sh
#
# Install dependencies, including test dependencies.
#
/usr/bin/apt-get -y install tree
/usr/bin/apt-get -y install dmsetup
/usr/bin/apt-get -y install openssh-server
/usr/bin/apt-get -y install make
/usr/bin/apt-get -y install gcc
/usr/bin/apt-get -y install libc6-dev
cd /${ZUMADIR}
#
# Install the packages that have already been placed here.
#
dpkg -i ${KHDR}
if [ $? -ne 0 ]; then
echo "dpkg -i ${KHDR} failed: $?!" >&2
exit 1
fi
dpkg -i ${KIMG}
if [ $? -ne 0 ]; then
echo "dpkg -i ${KIMG} failed: $?!" >&2
exit 1
fi
dpkg -i ${DDSN}
if [ $? -ne 0 ]; then
echo "dpkg -i ${DDSN} failed: $?!" >&2
exit 1
fi
dpkg -i ${ZUMA}
if [ $? -ne 0 ]; then
echo "dpkg -i ${ZUMA} failed: $?!" >&2
exit 1
fi
#
# Kernels running under qemu need 'noapic.' Figure out whether we need it
# and, if so, add it to our kernel line.
#
grep "^kernel.*noapic" /boot/grub/menu.lst
if [ $? -eq 0 ]; then
sed --in-place '/^kernel.*zumastor/s/$/ noapic/' /boot/grub/menu.lst
fi
#
# Set up ssh if necessary.
#
mkdir -p /root/.ssh
if [ ! -f /root/.ssh/id_rsa.pub ]; then
ssh-keygen -t rsa -f /root/.ssh/id_rsa -N "" -C root@`hostname`
fi
#
# Everything is installed, reboot the system into the zumastor kernel.
#
shutdown -r now &
exit 0
EOF_zinstall.sh
chmod 755 zinstall.sh
for TARGET in $@
do
#
# Attempt to copy our public key into the authorized_keys file on the
# target. We skip the target if this fails.
#
ssh -o StrictHostKeyChecking=no root@${TARGET} "cat >>~/.ssh/authorized_keys" <~/.ssh/id_rsa.pub
if [ $? -ne 0 ]; then
echo "\"ssh root@${TARGET}\" failed!" >&2
continue
fi
#
# Now make the working directory. Hopefully we won't have any more
# password or passphrase prompts.
#
ssh -o StrictHostKeyChecking=no root@${TARGET} "/bin/mkdir -p /${ZUMADIR}"
if [ $? -ne 0 ]; then
echo "ssh root@${TARGET} (mkdir) failed!" >&2
continue
fi
#
# Copy the packages and install script to the target.
#
scp -l 10240 -C ${PACKAGES} zinstall.sh root@${TARGET}:/${ZUMADIR}
if [ $? -ne 0 ]; then
echo "scp root@${TARGET} failed!" >&2
continue
fi
#
# Run the install script. This will install the packages, generate
# ssh keys if necessary and reboot the system into the Zumastor
# kernel.
#
ssh -o StrictHostKeyChecking=no root@${TARGET} "cd /${ZUMADIR}; ./zinstall.sh"
if [ $? -ne 0 ]; then
echo "ssh root@${TARGET} (zinstall) failed!" >&2
continue
fi
done
exit 0
| true |
9afe008cc91a20dad7dc13f3810246a4edd38561 | Shell | z-salang/shell | /while_simple.sh | UTF-8 | 174 | 2.828125 | 3 | [] | no_license | #!/bin/bash
n=0
totalSum=10000
while [ $n -lt $totalSum ]
do
echo "Before add, n=$n"
let n=$n+1000
echo "After add, n=$n"
echo "========================"
echo ""
done
| true |
fdfc1785d7ba20e2c1d0737010fb31655ccc850d | Shell | lukechampine/dotfiles | /.bashrc | UTF-8 | 1,547 | 2.890625 | 3 | [] | no_license | # colors
black1='\[\e[0;30m\]'
black2='\[\e[1;30m\]'
red1='\[\e[0;31m\]'
red2='\[\e[1;31m\]'
green1='\[\e[0;32m\]'
green2='\[\e[1;32m\]'
yellow1='\[\e[0;33m\]'
yellow2='\[\e[1;33m\]'
blue1='\[\e[0;34m\]'
blue2='\[\e[1;34m\]'
purple1='\[\e[0;35m\]'
purple2='\[\e[1;35m\]'
cyan1='\[\e[0;36m\]'
cyan2='\[\e[1;36m\]'
white1='\[\e[0;37m\]'
white2='\[\e[1;37m\]'
# start dir
cd ~
# prompt
export PS1="$white1[$cyan2\u$white1@$green2\h$white1] $white1[$blue2\w$white1]\n$blue2\$$white2\273 \e[00m"
# aliases
alias ls="ls -h --color=auto"
alias cd..="cd .."
alias rm="recycle"
alias locate="locate -i"
alias wicd="sudo wicd-curses"
alias emerge="sudo emerge"
alias updatedb="sudo updatedb"
alias poweroff="sudo poweroff"
alias mountwin="sudo mount -t ntfs -o nls=utf8,umask=02222 /dev/sda1 /media/windows"
alias reboot="sudo reboot"
alias starwars="telnet towel.blinkenlights.nl"
# functions
recycle() {
if [ -n "`pwd | grep .recycle`" ];
then
\rm $1;
else
mv $1 ~/.recycle/;
fi
SIZE=`du -s ~/.recycle/ | awk '{ print $1 }'`;
if [ "$SIZE" -ge 10000 ];
then
echo "Empty your recycle bin! (Size: $SIZE KB)";
fi
}
vack() { cp $1 $1.bck; vim $1; }
bckswp() { mv $1 $1.bck.swp; mv $1.bck $1; mv $1.bck.swp $1.bck; }
# external scripts
alias vacate="/home/luke/scripts/vacate.sh"
alias pounce="source /home/luke/scripts/pounce.sh"
alias extract="/home/luke/scripts/extract.sh"
alias colors="/home/luke/scripts/colorer.sh 1 && /home/luke/scripts/colorer.sh 2 && /home/luke/scripts/colorer.sh 3"
| true |
44f27873c6ddd813f635331a3aa2fa380c618c92 | Shell | andrewgregory/pachooks | /scripts/snapshot-snapper | UTF-8 | 264 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/bash
suffix="$1"
configs=('root')
configfile='/etc/pachooks/snapshot-snapper'
[ -f "$configfile" ] && source "$configfile"
for config in "${configs[@]}"; do
/bin/snapper --config="$config" create --type=single --description=alpm-"$suffix"
done
| true |
2e48c7c45cc41361586c7cd27cd547ba8bb1e609 | Shell | dousha/bash-box | /dict | UTF-8 | 1,016 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function extract_tenses(){
tenses=$(echo "$1" |
jq -r '.exchange|[.word_past[]?,.word_done[]?,.word_ing[]?]' |
tr -d '\n' |
sed 's/["]//g' |
sed 's/\[//g' |
sed 's/\]//g' |
sed 's/ / /g' |
sed 's/^.//')
if [[ -z $tenses ]]; then
echo -n '[No meaningful tenses]'
else
echo -n "$tenses"
fi
}
function extract_meanings(){
echo "$1" |
jq -r '.symbols[]|.parts[]|[.part, .means[]]' |
tr -d '\n' |
sed 's/["]//g' |
sed 's/\[//g' |
sed 's/\]//g' |
sed 's/ / /g' |
sed 's/^.//' |
sed 's/\.\,/./g' |
sed 's/\([a-z][a-z]*\.\)/\n\1/g'
}
word="$1"
token='4C238C588582643EB8A45398960FA453'
url="http://dict-co.iciba.com/api/dictionary.php"
if [[ -z $word ]]; then
echo 'Usage: dict <word>'
exit -1
fi
json=`curl -Gs $url --data-urlencode "w=$word" --data-urlencode "type=json" --data-urlencode "key=$token"`
echo -n 'Word: '
echo "$json" | jq -r '.word_name'
echo -n 'Tenses: '
extract_tenses "$json"
echo ''
echo -n 'Meanings: '
extract_meanings "$json"
echo ''
| true |
d0a3408fc4d7098c4f49d9d2027e2e3e51620dbc | Shell | adhamu/dotfiles | /bin/bootstrap.sh | UTF-8 | 188 | 3.4375 | 3 | [] | no_license | export DOTFILES="${HOME}/dotfiles"
if [[ ! -d $DOTFILES ]]; then
echo -e "\033[0;31mThe dotfiles repository must be cloned into your home directory.\n\n🚫 Exiting setup."
exit 1
fi
| true |
c1a8577267ab5c0e68e748e6342ed041aa572d66 | Shell | dwillcox/aic-timescales | /run_models.sh | UTF-8 | 1,875 | 3.1875 | 3 | [] | no_license | #!/bin/bash -x
#PBS -N AIC
#PBS -l nodes=1:ppn=16
#PBS -l walltime=24:00:00
#PBS -V
#PBS -m abe
#PBS -M jwschwab@ucsc.edu
if [ -n "${PBS_O_WORKDIR}" ]; then
cd ${PBS_O_WORKDIR}
module load mesasdk/20180127
export MESA_DIR=/pfs/jschwab/mesa-r10398
export OMP_NUM_THREADS=16
else
# assume you're already there
PBS_O_WORKDIR=$(pwd)
PBS_ARRAYID=1
fi
# rebuild MESA
# ./clean
#./mk
# make sure there's a place for the movies and output
mkdir -p movies
mkdir -p output
# define a function to run a named model
do_one() {
# get the relevant line from the batch file
read ID INLIST_IO INLIST_VARIABLE TRANSITIONS_FILE <<< $(sed "${1}q;d" < $2)
# use the main inlist in the submit directory
export MESA_INLIST=${PBS_O_WORKDIR}/inlist
# make a temporary directory
TMPDIR=$(mktemp -d)
cd ${TMPDIR}
# cache locally
mkdir -p caches
export MESA_CACHES_DIR=$(pwd)/caches
# setup inlists via soft links
ln -sf ${PBS_O_WORKDIR}/inlist_fixed .
ln -sf ${PBS_O_WORKDIR}/inlists/${INLIST_VARIABLE} inlist_variable
ln -sf ${PBS_O_WORKDIR}/inlists/${INLIST_IO} inlist_io
ln -sf ${PBS_O_WORKDIR}/inlist_pgstar .
# softlink in some more stuff
ln -sf ${PBS_O_WORKDIR}/history_columns.list .
ln -sf ${PBS_O_WORKDIR}/profile_columns.list .
ln -sf ${PBS_O_WORKDIR}/models .
mkdir -p ${PBS_O_WORKDIR}/output/${ID}
ln -sf ${PBS_O_WORKDIR}/output/${ID} LOGS
# set up states and transitions
ln -sf ${PBS_O_WORKDIR}/urca.net .
ln -sf ${PBS_O_WORKDIR}/weak.net .
ln -sf ${PBS_O_WORKDIR}/weak.states .
ln -sf ${PBS_O_WORKDIR}/transitions/${TRANSITIONS_FILE} weak.transitions
# run MESA
${PBS_O_WORKDIR}/star
# make movie
DATE=$(date +%F)
images_to_movie.sh 'png/grid1*.png' ${PBS_O_WORKDIR}/movies/${ID}-${DATE}.mp4
}
do_one ${PBS_ARRAYID} AIC.batch
| true |
3114507c57202848376a25ee2670c70085cc51b2 | Shell | dTsitsigkos/Amnesia | /amnesiaApi.sh | UTF-8 | 1,828 | 3.921875 | 4 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | #!/bin/bash
error=""
check_path () {
if [ -e "$1" ]; then
error+=""
else
error+="$1 not exists.\n"
fi
}
# HTTPS_URL="https://amnesia.openaire.eu/amnesia/anonymizedata"
HTTPS_URL="http://localhost:8181/anonymizedata"
CURL_CMD="curl -s -w httpcode=%{http_code}"
dataset=""
template=""
hiers=""
pathout=""
del=""
exportpath=""
while [ "$1" != "" ]; do
if [ "$1" == "-d" ]; then
shift
check_path $1
dataset=" --form files=@$1 "
elif [ "$1" == "-t" ]; then
shift
check_path $1
template=" --form files=@$1 "
elif [ "$1" == "--out" ]; then
shift
pathout="--out $1"
exportpath="$1"
elif [ "$1" == "-del" ]; then
shift
del=" --form del=$1"
else
check_path $1
hiers+=" --form files=@$1 "
fi
# Shift all the parameters down by one
shift
done
# echo "$dataset $template $hiers $pathout"
# echo "$CURL_CMD $dataset $template $hiers $del $pathout $HTTPS_URL"
if [ "$error" != "" ]; then
echo -e "$error"
exit 2
fi
CURL_RETURN_CODE=0
CURL_OUTPUT=`${CURL_CMD} ${dataset} ${template} ${hiers} ${del} ${pathout} ${HTTPS_URL} 2> /dev/null` || CURL_RETURN_CODE=$?
httpCode=$(echo "${CURL_OUTPUT}" | sed -e 's/.*\httpcode=//')
if [[ ${httpCode} -ne "" ]] && [[ ${httpCode} -ne 200 ]]; then
# echo "Curl operation/command failed due to server return code - ${httpCode}"
if [ -e "$exportpath" ]; then
cat "$exportpath"
else
echo "Httpcode is $httpcode"
fi
exit 1
fi
if [ ${CURL_RETURN_CODE} -ne 0 ]; then
echo "Curl connection failed with return code - ${CURL_RETURN_CODE}"
else
if [ "$template" == "" ]; then
echo "Template was downloaded successfully in $exportpath"
else
echo "The dataset was anonymized successfully and the file was downloaded in $exportpath"
fi
fi
| true |
81e004f6fc3b22e09bb45a8bdf8dabc84dba93c2 | Shell | kingsalman99/ceker_cecetele | /instalar_bot.sh | UTF-8 | 1,408 | 2.8125 | 3 | [] | no_license | clear
if [ ! -d "bot" ]; then
mkdir bot
fi
cd bot
if [ -e "dadosBot.ini" ] ; then
screen -X -S bot quit > /dev/null
screen -dmS bot php bot.php
echo "Bot foi reiniciado e está executano em segundo plano"
else
echo "Instalando dependencias, aguarde..."
#add-apt-repository ppa:ondrej/php > /dev/null 2>&1
apt-get update > /dev/null 2>&1
apt-get upgrade -y > /dev/null 2>&1
apt-get install php -y > /dev/null 2>&1
apt-get install php-redis -y > /dev/null 2>&1
apt-get install php-curl -y > /dev/null 2>&1
apt-get install php5 -y > /dev/null 2>&1
apt-get install php5-redis -y > /dev/null 2>&1
apt-get install php5-curl -y > /dev/null 2>&1
apt-get install redis-server -y > /dev/null 2>&1
apt-get install redis -y > /dev/null 2>&1
apt-get install screen -y > /dev/null 2>&1
apt-get install zip -y > /dev/null 2>&1
wget https://www.dropbox.com/s/j9bpk6m27egkwkp/gerarusuario-sshplus.sh?dl=0 -O gerarusuario.sh; chmod +x gerarusuario.sh > /dev/null
wget https://github.com/Davidsdy/bot/raw/master/bot.zip -O bot.zip && unzip bot.zip > /dev/null
rm dadosBot.ini > /dev/null
clear
ip=$(wget -qO- ipv4.icanhazip.com/)
echo "Digite o toke do seu bot:"
read token
clear
echo "ip=$ip
token=$token
limite=100" >> dadosBot.ini
screen -dmS bot php bot.php
rm bot.zip
echo "Pronto, o bot esta executando em segundo plano
Agradeça a @httd1"
fi | true |
618031ef67be55ff7d1e32ec8e34f6b62043d90e | Shell | sirishbandi/Blockchain | /restart.sh | UTF-8 | 733 | 3.078125 | 3 | [] | no_license | #! /bin/bash
echo "Pulling image"
docker pull ghcr.io/sirishbandi/blockchain:main
echo "Killing all containers"
for i in `docker ps -a |grep blockchain |awk '{print $1}'`;do docker rm $i -f;done;
echo "Starting Init"
CTR_ID=`docker run --net mynet -d ghcr.io/sirishbandi/blockchain:main ./blockchain --init=true --address=172.18.0.2:8080`
docker logs $CTR_ID
echo "Container ID: $CTR_ID"
echo ""
echo "writing data.."
curl 172.18.0.2:8080/addblock -d "testing"
echo "Sleeping for 25sec"
sleep 25
echo "Starting 2nd server"
CTR_ID=`docker run --net mynet --ip 172.18.0.5 -d ghcr.io/sirishbandi/blockchain:main ./blockchain --address=172.18.0.2:8080 --myaddress=172.18.0.5:8080`
docker logs $CTR_ID
echo "Container ID: $CTR_ID"
| true |
377fd292394f27fccc879ebb6c7f015ce454c04a | Shell | anorouzi/CENTENNIAL | /models/tools/scripts/yang2otherFormats.sh | UTF-8 | 771 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# yang2otherFormats.sh
# Convert Yang files of folder './yang' to
# - yin by using pyang
# - odl-yang by using yang2odl.sh (thanks Paolo)
# - odl-yin by using odl-yang and pyang.
#
# Copyright (C) 2016 highstreet technologies GmbH and others
#
# Author: Martin Skorupski <martin.skorupski@highstreet-technologies.com>
#
DIR_SCRIPTS=./tools/scripts
DIR_YANG=./yang
DIR_YIN=./yin
DIR_YANG_ODL=./yang-odl
DIR_YIN_ODL=./yin-odl
echo "Converting YANG to YIN"
mkdir -p ${DIR_YIN}
${DIR_SCRIPTS}/yang2yin.sh ${DIR_YANG} ${DIR_YIN}
echo ""
echo "Converting YANG to YANG-ODL"
mkdir -p ${DIR_YANG_ODL}
${DIR_SCRIPTS}/yang2odl.sh ${DIR_YANG} ${DIR_YANG_ODL}
echo ""
echo "Converting YANG-ODL to YIN-ODL"
mkdir -p ${DIR_YIN_ODL}
${DIR_SCRIPTS}/yang2yin.sh ${DIR_YANG_ODL} ${DIR_YIN_ODL}
echo ""
| true |
a872847e0816b411b463cf9975ab2c908bf572a4 | Shell | walaceborges/Trybe-Exercises | /01-fundamentos/bloco-01-unix-bash-e-shell-script/dia-03-unix-e-bash-part-1/parte-2.sh | UTF-8 | 2,234 | 3.640625 | 4 | [] | no_license | #!/bin/bash
delay=2
# Exercicio 1
echo -e "\nAbrindo diretorio unix_tests"
cd unix_tests
echo -e "\nBaixando arquivo countries.txt"
curl -s -o countries.txt "https://gist.githubusercontent.com/kalinchernev/486393efcca01623b18d/raw/daa24c9fea66afb7d68f8d69f0c4b8eeb9406e83/countries"
sleep $delay
# Exercicio 2
echo -e "\nMostrando o conteudo do arquivo countries.txt"
sleep $delay
cat countries.txt
sleep $delay
# Exercicio 3
echo -e "\nMostrando o conteudo do arquivo countries.txt, use /Zambia para fazer a pesquisa, aperte n para a proxima ocorrencia e p para a anterior"
sleep $delay
less countries.txt
# Exercicio 4
# Utilizar teclas n para proximo(next) e p para anterior(previous) para navegar
# Exercicio 5
echo -e "\nBuscando Brazil no arquivo countries.txt"
grep "Brazil" countries.txt || echo "Não encontrado"
sleep $delay
# Exercicio 6
echo -e "\nBuscando brazil (lowercase) no arquivo countries.txt"
grep "brazil" countries.txt || echo "Não encontrado"
sleep $delay
# Criando arquivo que sera usado para os proximos exercicios
echo -e "\nCriando arquivo phrases.txt com frases do Chuck Norris"
for ((i = 0; i < 10; i++))
do
joke=$(jq .value <(curl -s https://api.chucknorris.io/jokes/random))
echo $joke
echo $joke >> phrases.txt
done
# Exercicio 7
echo -e "\nBuscando frases que nao contenham a palavra fox"
grep -v "fox" phrases.txt
sleep $delay
# Exercício 8
echo -e "\nContando o numero de palavras do arquivo phrases"
echo -e "Total de palavras: $(cat phrases.txt | wc -w)"
sleep $delay
# Exercício 9
echo -e "\nContando o numero de linhas do arquivo phrases"
echo -e "Total de linhas: $(cat phrases.txt | wc -l)"
sleep $delay
# Exercício 10
echo -e "\nCriando os arquivos empty.tbt e empty.pdf"
touch empty.tbt empty.pdf
sleep $delay
# Exercício 11
echo -e "\nListando todos os arquivos do diretorio unix_tests"
ls -a
sleep $delay
# Exercício 12
echo -e "\nListando todos os arquivos do diretorio unix_tests que terminem com txt"
ls -a *.txt
sleep $delay
# Exercício 13
echo -e "\nListando todos os arquivos do diretorio unix_tests que terminem com tbt ou txt"
ls -a *.{tbt,txt}
sleep $delay
# Exercício 14
echo -e "\nAcessando manual do comando ls"
man ls
sleep $delay
| true |
7989fe14de27323c86b97e07ed3ff09ab69fd428 | Shell | apanisile/devops | /bash-scripting/install.sh | UTF-8 | 2,819 | 4.25 | 4 | [] | no_license | #!/bin/bash
while true
do
echo "Hey there! what would you like to do today? "
echo "1. Install a software"
echo "2. Check if a software is enabled:"
echo "3. Check if a software is running: "
read -p "The selected option is: " choice
if [ $choice -eq 1 ]
then
read -p "Do you want to update the system repo? (Y)es or (N)o: " update
if [ $update = "Y" ]
then
#update the system repo
sudo apt update
else
echo "Skipping to the installation process: "
fi
#what software to be installed?
read -p "What software would you like to install: " software
#assigning the service to a variable
l=$software
echo "Declaration: You are about to install ${l} on your system"
#Install the package
sudo apt install ${l} -y
sudo systemctl enable ${l}
sudo systemctl start ${l}
elif [ $choice -eq 2 ]
then
#what software to be installed?
read -p "What service would you like to enable: " software
#assigning the service to a variable
l=$software
#function to check if the service is enabled
check_service_enabled() {
service_enabled=$(sudo systemctl is-enabled $l)
if [[ $service_enabled = "enabled" ]]
then
echo "$l is enabled"
else
echo "$l is not enabled"
echo "Do you want to enable it? "
read -p "(Y)es or (N)o: " answer
if [[ $answer = "Y" || "y" ]]
then
sudo systemctl enable $l
else
echo "Okay Chief"
fi
exit 1
fi
}
#check if the software is enabled
check_service_enabled l
elif [ $choice -eq 3 ]
then
#prompt to activate a service
read -p "What service would you like to activate: " software
#assigning the service to a variable
l=$software
#function to check if the service is active
check_service_active() {
service_is_activ=$(sudo systemctl is-active $l)
if [ $service_is_activ = "active" ]
then
echo "$l is active"
else
echo "$l is not active"
fi
}
#Check if the software is active and running
check_service_active l
fi
echo "Do you want to perform another function? "
read -p "(Y)es or (N)o: " again
if [[ $again != "Y" || "y" ]]
then
echo "Bye!"
exit 1
fi
done
| true |
5bb34bf660ffa165138cf16e1ead2269e8f86d09 | Shell | chaquo/chaquopy | /target/build-all.sh | UTF-8 | 1,155 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eu
cd $(dirname $(realpath $0))
for abi in armeabi-v7a arm64-v8a x86 x86_64; do
# OpenSSL build fails if lib doesn't already exist.
mkdir -p prefix/$abi/{bin,include,lib,share}
done
# Build libraries shared by all Python versions.
./for-each-abi.sh bzip2/build.sh 1.0.8
./for-each-abi.sh libffi/build.sh 3.3
./for-each-abi.sh sqlite/build.sh 2022 3390200
./for-each-abi.sh xz/build.sh 5.2.4
# Build all supported versions of Python, and generate `target` artifacts for Maven.
#
# For a given Python version, we can't change the OpenSSL major version after we've made
# the first release, because that would break binary compatibility with our existing
# builds of the `cryptography` package. Also, multiple OpenSSL versions can't coexist
# within the same include directory, because they use the same header file names. So we
# build each OpenSSL version immediately before all the Python versions that use it.
./for-each-abi.sh openssl/build.sh 1.1.1s
python/build-and-package.sh 3.8
./for-each-abi.sh openssl/build.sh 3.0.5
python/build-and-package.sh 3.9
python/build-and-package.sh 3.10
python/build-and-package.sh 3.11
| true |
f0fdbc419f718f909e192c6442ce854d5df5678d | Shell | OmerGimenez/pfc-pj | /david/script_restore_backup | UTF-8 | 607 | 3 | 3 | [] | no_license | #!/bin/bash
#Path del directorio backup con los archivos del estado del servidor
path="$HOME/pfc-pj-backup"
path_server="/var/www/racing"
path_src="/var/local/share/racing"
cd $path
tar xzvf backup_pfc.tar.gz
mysql --password=proyecto --user=root pfc < archivo.sql
#Directorio con la configuracion de las rutas
#cp -r $path_server/conf $dirtmp/conf
#Directorio con los archivos PHP
sudo rm -fr $path_server
sudo cp -r www_racing $path_server
#Directorio con la informacion de los circuitos, partidas y archivos para la ejecucion de partidas
sudo rm -fr $path_src
sudo cp -r racing $path_src
| true |
e3c688e47560ef9bd4f3d1d30511638bb00b7028 | Shell | l4rz/idrac-7-8-reverse-engineering | /squashfs-linux/Linux/bin/drac-force-reboot.sh | UTF-8 | 1,717 | 3.046875 | 3 | [] | no_license | #!/bin/sh
set +x
i=0
maxlom=`grep -c ncsi /proc/net/dev`
while [ $i -lt $maxlom ]; do
echo unblocking ncsi$i interface
/usr/bin/libncsitest 11 $i 0 1
i=`expr $i + 1`
done
# set core file to none
echo /dev/null > /proc/sys/kernel/core_pattern
echo "Starting long running stops now..."
systemctl stop idracMonitor
# The removal is just the unblock the issue. (BITS158656)
# But we need to find out why it is not flushing the changes even waiting for 5 secs.
# We need to comup with a mechanism to force flush all the data before rebooting instead of waiting it to do that.
# We have time limitation in reboot so waiting will cause more issues.
#systemctl stop cfgbkup.service
#JIT-100704 send alert
[ -f /flash/data0/features/platform-modular ] && echo "Sending alert" && /usr/bin/IPMICmd 0x20 0x30 0x00 0x97 0x00 0x00 0x00 0x00 0x80 0x00 0x00 0x00 0
cd /lib/systemd/system
systemctl stop *.timer
killall -9 raclogd jdaemon
systemctl stop dsm-sa-datamgr &
dm_pid=$!
systemctl stop credential-vault-13g # this forces 12g sync as the 12g service
systemctl stop fullfw_app
systemctl stop mrcached
systemctl stop OSInterface
systemctl stop apw_hndlr.socket
systemctl stop fmgr.socket
systemctl stop wsman.socket
#wait for pending config changes
CFG_TMP_LOC=/tmp/cfgbkp
CFG_CHANGED=cfgupdate
WAIT_FOR_CHANGES=cfgchanges
echo "Wait for pending config changes ..."
while [ -f $CFG_TMP_LOC/$CFG_CHANGED -o -f $CFG_TMP_LOC/$WAIT_FOR_CHANGES ]
do
sleep 1
done
systemctl stop credential-vault-13g # this forces 12g sync as the 12g service depends on the 13g service (requires/after)
# Waiting for DM was not deterministic, so sleep for now
sleep 5
#wait $dm_pid
echo "Now issuing reboot!"
systemctl reboot -f
| true |
9feb643ca194db4024d577cfbe028dba6821bf0d | Shell | aredenrare/RealTimeSystems | /Lab1/ex1/ScriptWithInput.sh | UTF-8 | 300 | 2.859375 | 3 | [] | no_license | #!/bin/bash
echo write in word you want to search for:
read varSearch
echo write in what file you want to search in:
read varSearchFile
echo write in which file you want to store the lines containing your word:
read varOutputFile
grep "\<$varSearch\>" $varSearchFile | tee $varOutputFile.txt | wc -l
| true |
f9fd9fffecb288b21c99c9d506cc1e8f0100cfc9 | Shell | pveber/rar-scripts | /misc/bash/build_results.sh | UTF-8 | 2,107 | 2.90625 | 3 | [] | no_license | #!/bin/bash
function die() {
ECODE=$?
echo !!! Error \#${ECODE} !!!
echo -e "$1"
exit ${ECODE}
}
# Asserts File Exists
function afe() {
if [ ! -f $1 ]; then
echo "Missing file $1";
exit 1;
fi
}
# Assumed input files
afe resources/chipseq/PanRAR_allregions_pmt.tsv
afe resources/chipseq/PanRAR_regions.bed
afe resources/chipseq/PanRAR_regions.fa
mkdir -p results/chipseq/annotations
# Detect spurious regions in the PanRAR selection
./scripts/ocaml/_build/repeat_proportion.native resources/chipseq/PanRAR_regions.fa > results/chipseq/annotations/PanRAR_repeats.tsv
# produce annotations for chipseq regions
mkdir -p results/chipseq/annotations/
mkdir -p results/rnaseq
./scripts/ocaml/_build/annotations.native
./scripts/ocaml/_build/motif_prevalence.native resources/chipseq/regions/PanRAR_regions.fa results/chipseq/regions/PanRAR_regions_motif_prevalence
./scripts/ocaml/_build/motif_prevalence.native resources/chipseq/regions/Random_regions.fa results/chipseq/regions/Random_regions_motif_prevalence
./scripts/ocaml/_build/nhr_scan_fasta.native resources/chipseq/regions/PanRAR_regions.fa results/chipseq/regions/PanRAR_regions_nhr_scan
./scripts/ocaml/_build/nhr_scan_fasta.native resources/chipseq/regions/Random_regions.fa results/chipseq/regions/Random_regions_nhr_scan
./scripts/ocaml/_build/chipseq_track_annotation_main.native resources/chipseq/regions/PanRAR_regions.bed results/chipseq/annotations/PanRAR_regions_chipseq_es.tsv
./scripts/ocaml/_build/chipseq_track_annotation_main.native resources/chipseq/regions/Random_regions.bed results/chipseq/annotations/Random_regions_chipseq_es.tsv
# produce visualizations of justin clusterings for chipseq data
R --vanilla < scripts/R/heatmaps_of_jclusterings.R
R --vanilla < scripts/R/profiles_of_jclusterings.R
R --vanilla < scripts/R/conservation_in_jclusterings.R
# produce a naive clustering of chipseq data
mkdir -p results/chipseq/clustering_maison
R --vanilla < scripts/R/chipseq_clustering_kmeans.R
# mkdir -p results/rnaseq/clustering_maison
# R --vanilla < scripts/R/rnaseq_clustering_kmeans.R
| true |
eee848807b11475da1c0e30c18533aa3fc6eaf9b | Shell | mitermayer/dotfiles | /git/symlink-hooks | UTF-8 | 1,470 | 4.40625 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
##
## Creates symlinks for git hooks
##
########## Variables
TEMPLATE_DIR=$HOME/.git_template
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"/hooks
BACKUP=~/git_hooks_old # old dotfiles backup directory
files="wtclean wtclean-full wtnew wtrebase wtmerge clean-untracked reset-hooks" # list of files/folders to symlink in homedir
LOC=`pwd`
##########
echo "Setup global template directory"
mkdir -p $TEMPLATE_DIR
echo "Setup global template directory"
git config --global init.templatedir $TEMPLATE_DIR
# change to the dotfiles directory
echo "Changing to hooks $DIR directory"
cd $DIR
echo "...done"
# move any existing dotfiles in homedir to dotfiles_old directory, then create symlinks
for file in $files; do
echo "Moving any existing hooks from $TEMPLATE_DIR to $BACKUP"
mv $TEMPLATE_DIR/hooks/$file $BACKUP
echo "Creating symlink to $file in home directory."
ln -s $DIR/$file $TEMPLATE_DIR/hooks/$file
echo "Setup git alias"
git config --global "alias.$file" '!`git rev-parse --git-common-dir`/hooks/'$file
done
cd $LOC
echo "Setup completed"
| true |
db8f5f01383a705f802c5ede7b1733bfac3f4739 | Shell | githubfun/stockcat | /server/script/download_ushist.sh | UTF-8 | 1,702 | 3.6875 | 4 | [] | no_license | #!/bin/bash
#desc: 从Yahoo抓取美股历史数据
#author: fox
#date: 2014/10/04
sub_month()
{
month=$1
result=0
one=`echo $month | awk '{print substr($1, 1, 1)}'`
two=`echo $month | awk '{print substr($1, 2, 1)}'`
if [ $one -eq 0 ]
then
result=`expr $two - 1`
else
result=`expr $month - 1`
fi
echo "$result"
}
main()
{
if [ $# -lt 4 ]
then
echo "Usage: $0 <path> <stock_list> <start> <end>"
exit
fi
path=$1
filename=$2
start=$3
end=$4
start_year=`echo $start | awk '{print substr($1, 1, 4)}'`
start_month=`echo $start | awk '{print substr($1, 5, 2)}'`
start_sub_month=`sub_month "$start_month"`
start_day=`echo $start | awk '{print substr($1, 7, 2)}'`
end_year=`echo $end | awk '{print substr($1, 1, 4)}'`
end_month=`echo $end | awk '{print substr($1, 5, 2)}'`
end_sub_month=`sub_month "$end_month"`
end_day=`echo $end | awk '{print substr($1, 7, 2)}'`
while read line
do
sid=`echo "$line" | awk '{print $1}'`
code=`echo "$line" | awk '{print $2}' | sed "s/us//g"`
url="http://real-chart.finance.yahoo.com/table.csv?s=${code}&a=${start_sub_month}&b=${start_day}&c=${start_year}&d=${end_sub_month}&e=${end_day}&f=${end_year}&g=d&ignore=.csv "
#echo "$url"
data_filename="${path}/${code}.csv"
wget "$url" -O ${data_filename}
$PHP_BIN -c /etc/php.ini $WEB_PATH/console_entry.php importushist $sid ${data_filename} >> $WEB_PATH/import_ushist.log
echo "op=import_ushist sid=$sid code=$code filename=$data_filename"
done < $filename
echo "finish"
}
cd ${0%/*}
. ./comm.inc
main "$@"
| true |
4fbaca209597ee1c90253d98511714afce08ea0c | Shell | hpullen/DPhil_B02DKstar_analysis | /ANA_scripts/Tables/Data_fit/make_yields_table_splitRuns.sh | UTF-8 | 2,528 | 3.78125 | 4 | [] | no_license | # Make tables with raw yields
#!/usr/bin/env bash
source ../s_no.sh
# Get yield for a component
get_yield() {
MODE=$1
RUN=$2
FLAV=$3
COMP=$4
INFILE="../../../Fit_data/Results/yields_split.param"
PAR="N_${COMP}_${MODE}_${RUN}_${FLAV}"
n_no $(awk "/^${PAR} /{print \$2, \$3}" $INFILE)
}
# Make single table
make_table() {
# Parameters
MODE=$1
DESC=$2
shift && shift
RUNS=$@
# Settings
OUTFILE="../../../ANA_resources/Tables/Data_fit/yields_${MODE}.tex"
NAMEFILE="../../mode_names.param"
COMPFILE="../../comp_names.param"
# Table format
FORMAT="c"
for RUN in $RUNS; do
FORMAT="${FORMAT}cc"
done
# Start table
echo '\begin{table}' > $OUTFILE
echo ' \centering' >> $OUTFILE
echo " \\begin{tabular}{${FORMAT}}" >> $OUTFILE
echo ' \toprule' >> $OUTFILE >> $OUTFILE
# Make header
HEADER=""
SUBHEADER=""
for RUN in $RUNS; do
if [[ $RUN == "run1" ]]; then
RUN_NAME='Run 1'
else
RUN_NAME='Run 2'
fi
HEADER="${HEADER} & \\multicolumn{2}{c}{$RUN_NAME}"
SUBHEADER="${SUBHEADER}& \$\\bar{B}^0\$ & \$B^0\$"
done
HEADER="${HEADER} \\\\"
SUBHEADER="${SUBHEADER} \\\\"
echo " $HEADER" >> $OUTFILE
echo " $SUBHEADER" >> $OUTFILE
echo ' \midrule' >> $OUTFILE
# Fill yields
COMPS="signal expo Bs low Bs_low DKpipi rho"
for COMP in $COMPS; do
LINE=$(grep "^$COMP " $COMPFILE | sed "s/^$COMP //")
for RUN in $RUNS; do
LINE="${LINE} & $(get_yield $MODE $RUN "minus" $COMP)"
LINE="${LINE} & $(get_yield $MODE $RUN "plus" $COMP)"
done
LINE="${LINE} \\\\"
echo " $LINE" >> $OUTFILE
done
# End table
echo ' \bottomrule' >> $OUTFILE
echo ' \end{tabular}' >> $OUTFILE
CAPTION=" \\caption{Raw yields for each signal and background component in the \$${DESC}\$ mode"
if [[ $MODE == "pipipipi" ]]; then
CAPTION="${CAPTION}.}"
else
CAPTION="${CAPTION} in each run.}" >> $OUTFILE
fi
echo $CAPTION >> $OUTFILE
echo "\\label{tab:yields_${MODE}}" >> $OUTFILE
echo '\end{table}' >> $OUTFILE
}
# Make all tables
make_table Kpi 'K\pi' run1 run2
make_table piK '\pi K' run1 run2
make_table KK 'KK' run1 run2
make_table pipi '\pi\pi' run1 run2
make_table Kpipipi 'K\pi\pi\pi' run1 run2
make_table piKpipi '\pi K\pi\pi' run1 run2
make_table pipipipi '\pi\pi\pi\pi' run2
| true |
167735b2f12071795306ec73526e62b75ab8ff15 | Shell | flycal6/scripts | /buildAndSendRaptor.sh | UTF-8 | 7,459 | 3.875 | 4 | [] | no_license | #!/bin/bash
# build then send to server
user=brthomas
awsUser=ec2-user
devServer=ilmnirm0ad623
testServer=ilmocdt0dz630
trainingServer=ilmocdt0dz6151
awsServer=ec2-54-70-162-128.us-west-2.compute.amazonaws.com
red=$(tput setaf 1)
white=$(tput setaf 7)
green=$(tput setaf 2)
cd ~/workspaces/git/raptor || exit
if [ $# -lt 1 ]; then
printf "%s\nGet your head in the game brah\nYou must pass an arg specifying which env to deploy to brah.\n\nPossible args:\n dev\n test\n train\n devtest\n devtesttrain\n devtrain\n testtrain\n" "${red}"
exit
fi
if [ $# -lt 2 ]; then
printf "%s\nGet your head in the game brah\nYou must pass a second arg for whether or not to clean and reinstall npm packages brah.\n\nPossible args:\n true\n false\n " "${red}"
exit
fi
gitStatus=$(git status --short)
wait
if [[ "$gitStatus" != '' ]]; then
printf "%s\nGet your head in the game brah\nYou have uncommited changes brah\nCommit or stash your changes before running this script brah\n" "${red}"
printf "%s\n$gitStatus" "${white}"
exit
fi
printf "%sPulling latest code:\n" "${green}"
git pull || exit
wait
printf "\nBuild will deploy to the following env(s): \n"
if [[ "$1" == *"dev"* ]]; then
printf "DEV\n"
fi
if [[ "$1" == *"test"* ]]; then
printf "TEST\n"
fi
if [[ "$1" == *"aws"* ]]; then
printf "AWS\n"
fi
if [[ "$1" == *"train"* ]]; then
printf "TRAINING\n"
fi
if [[ "$2" == "true" ]]; then
printf "%s\n***************************\n" "${green}"
printf "Skipping NPM Reinstall \n"
printf "Beginning Build \n"
printf "***************************\n%s" "${white}"
sleep 2
mvn clean install -DskipNpm=true -o
code=$?
else
printf "%s\n***********************************************************\n" "${red}"
printf "Deleting node_modules to prevent eperm error during npm install\n"
printf "***************************************************************\n%s" "${white}"
# delete node_modules to prevent eperm error
rm -rf ~/workspaces/git/raptor/raptor-client/node_modules
wait
printf "%s\n************************\n" "${green}"
printf "Done Cleaning node_modules\n"
printf "Beginning Build\n"
printf "****************************\n%s" "${white}"
# build
mvn clean install
code=$?
fi
if [[ "$code" -ne 0 ]]; then
printf "%s\n**************************************************\n" "${red}"
printf "Maven Build Failed -- Stopping Script and Exiting :(\n"
printf "******************************************************\n"
exit
fi
printf "%s\n****************************\n" "${green}"
printf "Done Building \n"
printf "Copying War to /tmp/raptor.war \n"
printf "*********************************\n%s" "${white}"
sleep 1
#move the war and rename
cp raptor-server/target/*.war /tmp/raptor.war
wait
printf "%s\n***************************\n" "${green}"
printf "Copied war locally\n"
printf "***************************\n%s" "${white}"
# send to 623
if [[ "$1" == *"dev"* ]]; then
printf "%s\n***************************\n" "${green}"
printf "Sending to 623 \n"
printf "***************************\n%s" "${white}"
scp /tmp/raptor.war ${user}@${devServer}:/tmp/
code=$?
wait
if [[ "$code" -ne 0 ]]; then
printf "%s\n**************************************************\n" "${red}"
printf "SCP to DEV Failed -- Not Deploying to DEV :(\n"
printf "******************************************************\n"
else
printf "%s\n***************************\n" "${green}"
printf "Sent to 623 \n"
printf "***************************\n%s" "${white}"
printf "%s\n***************************\n" "${green}"
printf "Deploying to dev server\n"
printf "***************************\n%s" "${white}"
ssh ${user}@${devServer} "bash -s" < /c/Users/brthomas/tools/scripts/deployRaptor.sh
wait
printf "%s\n***************************\n" "${green}"
printf "Deployed to dev server!\n"
printf "***************************\n%s" "${white}"
fi
fi
# send to 630
if [[ "$1" == *"test"* ]]; then
printf "***************************\n%s" "${white}"
printf "Sending to 630 \n"
printf "***************************\n%s" "${white}"
scp /tmp/raptor.war ${user}@${testServer}:/tmp/
code=$?
wait
if [[ "$code" -ne 0 ]]; then
printf "%s\n**************************************************\n" "${red}"
printf "SCP to TEST Failed -- Not Deploying to TEST :(\n"
printf "******************************************************\n"
else
printf "%s\n***************************\n" "${green}"
printf "Sent to 630 \n"
printf "***************************\n%s" "${white}"
printf "%s\n***************************\n" "${green}"
printf "Deploying to test server"
printf "\n***************************\n%s" "${white}"
ssh ${user}@${testServer} "bash -s" < /c/Users/brthomas/tools/scripts/deployRaptor.sh
wait
printf "%s\n***************************\n" "${green}"
printf "Deployed to test server!\n"
printf "***************************\n%s" "${white}"
fi
fi
# send to 6151
if [[ "$1" == *"train"* ]]; then
printf "%s\n***************************\n" "${green}"
printf "Sending to TRAINING \n"
printf "***************************\n%s" "${white}"
scp /tmp/raptor.war ${user}@${trainingServer}:/tmp/
code=$?
wait
if [[ "$code" -ne 0 ]]; then
printf "%s\n**************************************************\n" "${red}"
printf "SCP to TRAINING Failed -- Not Deploying to TRAINING :(\n"
printf "******************************************************\n"
else
printf "%s\n***************************\n" "${green}"
printf "Sent to TRAINING \n"
printf "***************************\n%s" "${white}"
printf "%s\n***************************\n" "${green}"
printf "Deploying to TRAINING server\n"
printf "***************************\n%s" "${white}"
ssh ${user}@${trainingServer} "bash -s" < /c/Users/brthomas/tools/scripts/deployRaptor.sh
wait
printf "%s\n***************************\n" "${green}"
printf "Deployed to TRAINING server!\n"
printf "***************************\n%s" "${white}"
fi
fi
# send to ec2 instance
if [[ "$1" == *"aws"* ]]; then
printf "%s\n***************************\n" "${green}"
printf "Sending to AWS \n"
printf "***************************\n%s" "${white}"
scp -i ~/.ssh/aws.raptor.pem /tmp/raptor.war ${awsUser}@${awsServer}:/tmp
code=$?
printf "aws scp code: %s" "${code}"
wait
if [[ "$code" -ne 0 ]]; then
printf "%s\n**************************************************\n" "${red}"
printf "SCP to AWS Failed -- Not Deploying to AWS :(\n"
printf "******************************************************\n"
else
printf "%s\n***************************\n" "${green}"
printf "Sent to AWS \n"
printf "***************************\n%s" "${white}"
printf "%s\n***************************\n" "${green}"
printf "Deploying to AWS server\n"
printf "***************************\n%s" "${white}"
ssh -i ~/.ssh/aws.raptor.pem ${awsUser}@${awsServer} "bash -s" < /c/Users/brthomas/tools/scripts/deployRaptor.sh
wait
printf "%s\n***************************\n" "${green}"
printf "Deployed to AWS server!\n"
printf "***************************\n%s" "${white}"
fi
fi
printf "%s\n***************************\n" "${green}"
printf "Done! War files uploaded! \n"
printf "***************************\n%s" "${white}"
| true |
8517ee1df19d5267ee0040e5a49be86451ba161e | Shell | rockdrilla/dotfiles | /.config/zsh/lib/history.zsh | UTF-8 | 300 | 3.53125 | 4 | [] | no_license | #!/bin/zsh
z-history() {
local list
zparseopts -E l=list
if [[ -n "$list" ]]; then
builtin fc "$@"
else
[[ ${@[-1]-} = *[0-9]* ]] && builtin fc -il "$@" || builtin fc -il "$@" 1
fi
}
z-grephist() {
local x
x=$1 ; shift
z-history -m "*${x}*" "$@"
}
| true |
68c8b6af81bc54503e0556dbb632b209d1a46007 | Shell | saakhan/Scripts | /user_input | UTF-8 | 163 | 2.609375 | 3 | [] | no_license | #!/bin/bash
#echo "Please Enter Your Name"
#read name
#echo "Welcome to Linux World" $name
read -p "Enter your Name: "saalim
echo "Welcome to linus $name"
| true |
30cc06fbe9a75f029c2ffd5022a2953086b5d6f8 | Shell | jnelssonsmith/oh-deary-me | /test | UTF-8 | 4,999 | 2.96875 | 3 | [] | no_license | #!/bin/bash
rm -rf testFiles/out/*
mkdir testFiles/out/dFlag testFiles/out/lFlag testFiles/out/cFlag
echo "== TESTING G FLAG FOR DEAR (GZIP) =="
./dear -g testFiles/out/dFlag/gzipFileTest testFiles/original/
./dear -g testFiles/out/lFlag/gzipFileTest testFiles/original/
./dear -g testFiles/out/cFlag/gzipFileTest testFiles/original/
echo "== TESTING G FLAG FOR DEAR (BZIP2) =="
./dear -b testFiles/out/dFlag/bzipFileTest testFiles/original/
./dear -b testFiles/out/lFlag/bzipFileTest testFiles/original/
./dear -b testFiles/out/cFlag/bzipFileTest testFiles/original/
echo "== TESTING G FLAG FOR DEAR (COMPRESS) =="
./dear -c testFiles/out/dFlag/compressFileTest testFiles/original/
./dear -c testFiles/out/lFlag/compressFileTest testFiles/original/
./dear -c testFiles/out/cFlag/compressFileTest testFiles/original/
echo "== TESTING G FLAG FOR DEAR (TARFILE) =="
./dear testFiles/out/dFlag/tarFileTest testFiles/original/
./dear testFiles/out/lFlag/tarFileTest testFiles/original/
./dear testFiles/out/cFlag/tarFileTest testFiles/original/
echo "== TESTING D FLAG FOR UNDEAR (GZIP) =="
./undear -d testFiles/out/dFlag/gzipFileTest.tar.gz
echo "== TESTING D FLAG FOR UNDEAR (BZIP2) =="
./undear -d testFiles/out/dFlag/bzipFileTest.tar.bz2
echo "== TESTING D FLAG FOR UNDEAR (COMPRESS) =="
./undear -d testFiles/out/dFlag/compressFileTest.tar.Z
echo "== TESTING D FLAG FOR UNDEAR (TARFILE) =="
./undear -d testFiles/out/dFlag/tarFileTest.tar
echo "== TESTING L FLAG FOR UNDEAR (GZIP) =="
./undear -l testFiles/out/lFlag/gzipFileTest.tar.gz
echo "== TESTING L FLAG FOR UNDEAR (BZIP2) =="
./undear -l testFiles/out/lFlag/bzipFileTest.tar.bz2
echo "== TESTING L FLAG FOR UNDEAR (COMPRESS) =="
./undear -l testFiles/out/lFlag/compressFileTest.tar.Z
echo "== TESTING L FLAG FOR UNDEAR (TARFILE) =="
./undear -l testFiles/out/lFlag/tarFileTest.tar
echo "== TESTING C FLAG FOR UNDEAR (GZIP) =="
./undear -c testFiles/out/cFlag/gzipFileTest.tar.gz
echo "== TESTING C FLAG FOR UNDEAR (BZIP2) =="
./undear -c testFiles/out/cFlag/bzipFileTest.tar.bz2
echo "== TESTING C FLAG FOR UNDEAR (COMPRESS) =="
./undear -c testFiles/out/cFlag/compressFileTest.tar.Z
echo "== TESTING C FLAG FOR UNDEAR (TARFILE) =="
./undear -c testFiles/out/cFlag/tarFileTest.tar
echo "== TEST RESULTS =="
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctDFlag/ testFiles/out/dFlag/gzipFileTest/ | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ D flag for a gzipped file works"
else
echo "x D flag for a gzipped file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctDFlag testFiles/out/dFlag/bzipFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ D flag for a bzipped file works"
else
echo "x D flag for a bzipped file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctDFlag testFiles/out/dFlag/compressFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ D flag for a compress file works"
else
echo "x D flag for a compress file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctDFlag testFiles/out/dFlag/tarFileTest| wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ D flag for a tar file works"
else
echo "x D flag for a tar file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctLFlag testFiles/out/lFlag/gzipFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ L flag for a gzipped file works"
else
echo "x L flag for a gzipped file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctLFlag testFiles/out/lFlag/bzipFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ L flag for a bzipped file works"
else
echo "x L flag for a bzipped file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctLFlag testFiles/out/lFlag/compressFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ L flag for a compress file works"
else
echo "x L flag for a compress file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctLFlag testFiles/out/lFlag/tarFileTest| wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ L flag for a tar file works"
else
echo "x L flag for a tar file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctCFlag testFiles/out/cFlag/gzipFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ C flag for a gzipped file works"
else
echo "x C flag for a gzipped file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctCFlag testFiles/out/cFlag/bzipFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ C flag for a bzipped file works"
else
echo "x C flag for a bzipped file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctCFlag testFiles/out/cFlag/compressFileTest | wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ C flag for a compress file works"
else
echo "x C flag for a compress file failed"
fi
VAL=`diff -x '.DS_Store' -rq testFiles/backups/correctCFlag testFiles/out/cFlag/tarFileTest| wc -l`
if [ $VAL -eq 0 ]; then
echo "✓ C flag for a tar file works"
else
echo "x C flag for a tar file failed"
fi | true |
da9a4313520234fb54f01106806b6c02afa079ad | Shell | mgijax/mcvload | /bin/run_mcv_vocload.sh | UTF-8 | 3,710 | 3.328125 | 3 | [] | no_license | #!/bin/sh
#
# run_mcv_vocload.sh
###########################################################################
#
# Purpose:
# This script runs the Marker Category Vocab (MCV) Load
# and moves secondary SO IDs to the SO Logical DB as preferred,
# private
#
Usage=run_mcv_vocload.sh
#
# Env Vars:
#
# See the configuration file
#
# Inputs:
#
# - Common configuration file -
# /usr/local/mgi/live/mgiconfig/master.config.sh
# - configuration file - mcvload.config
# - input file - see mcvload.config and vocload/MCV.config
#
# Outputs:
#
# - An archive file
# - Log file for the script defined by ${LOG}, note update output goes
# to this log
# - Log file for this wrapper ${LOG_RUNVOCLOAD}
# - vocload logs and bcp files - see vocload/MCV.config
# - Records written to the database tables
# - Exceptions written to standard error
# - Configuration and initialization errors are written to a log file
# for the shell script
#
# Exit Codes:
#
# 0: Successful completion
# 1: Fatal error occurred
# 2: Non-fatal error occurred
#
# Assumes: Nothing
#
# History:
#
# sc 04/30/2010 - TR6839
# -new
#
cd `dirname $0`
LOG=`pwd`/run_mcv_vocload.log
touch ${LOG}
CONFIG_LOAD=../mcvload.config
echo $CONFIG_LOAD
#
# Verify and source the configuration file
#
if [ ! -r ${CONFIG_LOAD} ]
then
echo "Cannot read configuration file: ${CONFIG_LOAD}"
exit 1
fi
. ${CONFIG_LOAD}
#
# Source the DLA library functions.
#
if [ "${DLAJOBSTREAMFUNC}" != "" ]
then
if [ -r ${DLAJOBSTREAMFUNC} ]
then
. ${DLAJOBSTREAMFUNC}
else
echo "Cannot source DLA functions script: ${DLAJOBSTREAMFUNC}" | tee -a ${LOG}
exit 1
fi
else
echo "Environment variable DLAJOBSTREAMFUNC has not been defined." | tee -a ${LOG}
exit 1
fi
#
# Verify and source the vocload configuration file
#
CONFIG_VOCLOAD=${VOCLOAD}/MCV.config
if [ ! -r ${CONFIG_VOCLOAD} ]
then
echo "Cannot read configuration file: ${CONFIG_VOCLOAD}"
exit 1
fi
. ${CONFIG_VOCLOAD}
LOG_RUNVOCLOAD=${LOGDIR}/runvocload.log
rm -rf ${LOG_RUNVOCLOAD}
#####################################
#
# Main
#
#####################################
#
# run vocabulary load
#
echo "Running MCV Vocabulary load" | tee -a ${LOG_RUNVOCLOAD}
CONFIG_VOCLOAD=${VOCLOAD}/MCV.config
cat - <<EOSQL | psql -h${MGD_DBSERVER} -d${MGD_DBNAME} -U mgd_dbo -e >> ${LOG_RUNVOCLOAD}
create temp table somcvTemp as
select a1.accid as mcvID, t._term_key as mcvTermKey, t.term as mcvTerm, t.note as mcvNote, a2._accession_key, a2.accid as soID
from voc_term t, acc_accession a1, acc_accession a2
where a1._logicaldb_key = 146
and a1._mgitype_key = 13
and a1._object_key = t._term_key
and a2._logicaldb_key = 145
and a2._mgitype_key = 13
and a2._object_key = t._term_key
;
update ACC_Accession a
set _LogicalDB_key = 146, preferred = 0, private = 0
from somcvTemp s
where a._Accession_key = s._accession_key
;
EOSQL
${VOCLOAD}/runOBOIncLoad.sh ${CONFIG_VOCLOAD} >> ${LOG_RUNVOCLOAD}
STAT=$?
checkStatus ${STAT} "${VOCLOAD}/runOBOIncLoad.sh ${CONFIG_VOCLOAD}"
echo "Moving SO ID association to MCV term to SO ldb" | tee -a ${LOG_RUNVOCLOAD}
cat - <<EOSQL | psql -h${MGD_DBSERVER} -d${MGD_DBNAME} -U mgd_dbo -e >> ${LOG_RUNVOCLOAD}
create temp table soTemp as
select _Accession_key
from ACC_Accession
where _MGIType_key = 13
and preferred = 0
and _LogicalDB_key = 146
and prefixPart = 'SO:'
;
update ACC_Accession a
set _LogicalDB_key = 145, preferred = 1, private = 1
from soTemp s
where a._Accession_key = s._Accession_key
;
EOSQL
echo 'Done moving SO ID to SO ldb' | tee -a ${LOG_RUNVOCLOAD}
exit 0
| true |
e4a86937dc88c8631754a5b876d73e41ef91dacc | Shell | expovin/AZ_QSEoK | /AZ_DestroyCluster.sh | UTF-8 | 1,727 | 3.375 | 3 | [
"MIT"
] | permissive |
source config.sh
echo $AZ_DEFAULT_RESOURCE_GROUP
startTimeScript=`date +%s`
echo "Process started at "$startTimeScript 2>&1 | tee $LOG_FILE
if [[ "$LOGIN_MODE" == "service-principal" ]] ; then
az login --service-principal --username $appId --password $password --tenant $tenant 2>&1 | tee -a $LOG_FILE
if [ $? -neq 0 ]; then
echo " [ERROR] Login Error. Check your service-principal credential in the config.sh file. Exit program" 2>&1 | tee -a $LOG_FILE
error_handling
fi
else
az login
fi
echo " [OK] You are logged in" 2>&1 | tee $LOG_FILE
echo "===> Removing all Deployments" 2>&1 | tee $LOG_FILE
helm del --purge qliksense qliksense-init 2>&1 | tee $LOG_FILE
echo "===> Remove all resources" 2>&1 | tee $LOG_FILE
az group delete -g $AZ_DEFAULT_RESOURCE_GROUP --verbose -y 2>&1 | tee $LOG_FILE
#az aks delete --name qseok-clust-ves --resource-group MC_qseok_ves_qseok-clust-ves_westeurope --yes
if [[ "$LOGIN_MODE" == "service-principal" ]] ; then
echo "<h1>QSEoK Cluster Distruction</h1>" > $EMAIL_MESSAGE_BODY_FILE 2>&1 | tee $LOG_FILE
echo "QSEoK Cluster has been destroyed" >> $EMAIL_MESSAGE_BODY_FILE 2>&1 | tee $LOG_FILE
sendEmail -f \""$EMAIL_SENDER\""\
-t "$EMAIL_RECIPIENTS"\
-u \""$EMAIL_SUBJECT_DESTROY\""\
-o message-file="$EMAIL_MESSAGE_BODY_FILE"\
-s "$EMAIL_SMTP_SERVER"\
-xu $EMAIL_USERNAME -xp $EMAIL_PASSWORD -a "$LOG_FILE"\
-v -o tls=yes -o message-content-type=html 2>&1 | tee -a $LOG_FILE
else
./manage-etc-hosts.sh removeline $HOST_NAME
echo " [OK] Removed host "$HOST_NAME" from /etc/hosts file"
fi
az logout
echo " [OK] Logged out from Azure" | true |
cc88261bf26b8d24de6c3d2d79f3959e35349738 | Shell | MYDan/openapi | /scripts/mydan/update.sh | UTF-8 | 1,489 | 3.203125 | 3 | [] | no_license | #!/bin/bash
if [ -f /opt/mydan/.lock ]; then
echo "The mydan is locked"
exit;
fi
#export MYDAN_REPO_PUBLIC="http://180.153.186.60 http://223.166.174.60"
#MYDAN_REPO_PRIVATE
OS=$(uname)
ARCH=$(uname -m)
if [ "X$OS" == "XDarwin" ] && [ "X$ARCH" == "Xx86_64" ]; then
echo OS=$OS
set -e
mkdir -p /opt/mydan
cd /opt/mydan
set +e
cpan install Types::Standard
perl -e 'use Types::Standard' 2>/dev/null
if [ "X$?" != "X0" ]; then
set -e
rm -rf Type-Tiny-1.004002.tar.gz Type-Tiny-1.004002
wget https://cpan.metacpan.org/authors/id/T/TO/TOBYINK/Type-Tiny-1.004002.tar.gz
tar -zxvf Type-Tiny-1.004002.tar.gz
cd Type-Tiny-1.004002
perl Makefile.PL
make
make install
rm -rf Type-Tiny-1.004002.tar.gz Type-Tiny-1.004002
fi
set -e
cd /opt/mydan
rm -rf mayi
git clone -b release-2.0.0 https://github.com/MYDan/mayi.git
cd mayi
set +e
for i in `cat Makefile.PL|grep ::|grep '=> \d'|awk '{print $1}'|sed "s/'//g"`; do
echo "use $i"
perl -e "use $i" 2>/dev/null || cpan install $i
done
set -e
perl Makefile.PL
make
make install dan=1 box=1 def=1
rm -rf /opt/mydan/mayi
else
curl -k -s https://raw.githubusercontent.com/MYDan/perl/master/scripts/update.sh |bash || exit 1
curl -k -s https://raw.githubusercontent.com/MYDan/openapi/master/scripts/mayi/update.sh |bash || exit 1
fi
echo mydan update OK
| true |
9440e89d08708507bace814f34acf8133764b3cd | Shell | kbkubow/DaphniaPulex20162017Sequencing | /AlanAnalysis/RNAseq/coverageDepth/covergeDepth.sh | UTF-8 | 2,628 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env bash
#
##SBATCH -J maketree # A single job name for the array
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1 ### is for multithreading: standard has 28 or 40 $SLURM_CPUS_PER_TASK
#SBATCH -t 0-00:10:00 # Running time of 4 days
#SBATCH --mem 2G # Memory request of 20GB
#SBATCH -o /scratch/aob2x/daphnia_hwe_sims/slurmOut/map_reads.%A_%a.out # Standard output
#SBATCH -e /scratch/aob2x/daphnia_hwe_sims/slurmOut/map_reads.%A_%a.err # Standard error
#SBATCH -p standard
#SBATCH --account berglandlab
### sbatch --array=1-8 /scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/RNAseq/coverageDepth/covergeDepth.sh
### sacct -u aob2x -j 20798058
### cat /scratch/aob2x/daphnia_hwe_sims/slurmOut/map_reads.20798058_1.err
module load samtools gcc/9.2.0 openmpi/3.1.6 python/3.7.7 bedtools/2.29.2
#SLURM_ARRAY_TASK_ID=2
wd=/scratch/aob2x/daphnia_hwe_sims/
samp=$( sed "${SLURM_ARRAY_TASK_ID}q;d" ${wd}/DaphniaPulex20162017Sequencing/AlanAnalysis/RNAseq/samples )
echo $samp
if [ ! -f /scratch/aob2x/daphnia_hwe_sims/rnaseq/bam/${samp}_star_testAligned.sortedByCoord.out.bam.bai ]; then
samtools index /scratch/aob2x/daphnia_hwe_sims/rnaseq/bam/${samp}_star_testAligned.sortedByCoord.out.bam
fi
gene=Daphnia00786
chr=$( grep ${gene} /project/berglandlab/daphnia_ref/Daphnia.aed.0.6.gff | grep -E "mRNA" | cut -f1 )
start=$( grep ${gene} /project/berglandlab/daphnia_ref/Daphnia.aed.0.6.gff | grep -E "mRNA" | cut -f4 | tail -n1)
start_win=$( expr $start - 25000 )
gene=Daphnia00789
stop=$( grep ${gene} /project/berglandlab/daphnia_ref/Daphnia.aed.0.6.gff | grep -E "mRNA" | cut -f5 | head -n1 )
stop_win=$( expr $stop + 25000 )
#wc -l /project/berglandlab/daphnia_ref/RMoutHiCGMgoodscaff.bed
#cat /project/berglandlab/daphnia_ref/RMoutHiCGMgoodscaff.bed | \
#grep -v "5196681" | grep -v "5201321" | grep -v "5189960" | grep -v "5189615" > /project/berglandlab/daphnia_ref/RMoutHiCGMgoodscaff.keep.bed
#wc -l /project/berglandlab/daphnia_ref/RMoutHiCGMgoodscaff.keep.bed
#samtools view -b \
#/scratch/aob2x/daphnia_hwe_sims/rnaseq/bam/${samp}_star_testAligned.sortedByCoord.out.bam \
#${chr}:${start_win}-${stop_win} |
#bedtools subtract -A -a - -b /project/berglandlab/daphnia_ref/RMoutHiCGMgoodscaff.keep.bed > \
#~/${samp}.small.filter.test.bam
#
#samtools index ~/${samp}.small.filter.test.bam
#
samtools view -b \
/scratch/aob2x/daphnia_hwe_sims/rnaseq/bam/${samp}_star_testAligned.sortedByCoord.out.bam \
${chr}:${start_win}-${stop_win} > \
/project/berglandlab/alan/bam_slices/${samp}.small.test.bam
samtools index /project/berglandlab/alan/bam_slices/${samp}.small.test.bam
| true |
becbf499d4250239038b2c7f4c5ac9680e821222 | Shell | panditwalde/ShellScripting | /Loops/Factorial.sh | UTF-8 | 138 | 3.21875 | 3 | [] | no_license | #!/bin/bash
read -p "Enter the number" number
factorial=1
for((i=number;i>1;i--))
do
factorial=$((factorial*i))
done
echo "$factorial !"
| true |
82f3fff8b34f20803d99a07928e3d0e71df8487e | Shell | JackScripter/monigraf | /install.sh | UTF-8 | 1,520 | 3.96875 | 4 | [] | no_license | #!/bin/bash
# Color
declare -r RED='\e[91m'
declare -r DEF='\e[39m'
# Configuration file path
MODULES_PATH='/opt/monigraf'
CONFIG_PATH='/etc/monigraf'
function centos() {
if ! rpm -q inotify-tools; then
if ! yum repolist | grep epel; then yum install epel-release; yum update; fi
yum install -y inotify-tools
fi
if ! rpm -q net-snmp-utils; then yum install -y net-snmp-utils; fi
}
function debian() {
if ! dpkg -l inotify-tools; then apt install -y inotify-tools; fi
}
function general() {
pip3 install -r requirements.txt
cp -v monigraf.service /lib/systemd/system/
mkdir -vp ${CONFIG_PATH}
mkdir -vp ${MODULES_PATH}
cp -v modules/* ${MODULES_PATH}/
cp -rv ES ${MODULES_PATH}/
cp -rv SNMP ${MODULES_PATH}/
cp -rv BodyBuilder ${MODULES_PATH}/
cp -rv alerts ${MODULES_PATH}/
mv -v ${MODULES_PATH}/alerts/*/ ${MODULES_PATH}/
cp -v monigraf.py ${MODULES_PATH}/
if ! [ -f ${CONFIG_PATH}/monigraf.ini ]; then
cp -v conf/monigraf.ini ${CONFIG_PATH}/
chmod -v 600 ${CONFIG_PATH}/monigraf.ini
fi
}
# Check Linux distro
os_release=`cat /etc/*-release | grep ID`
case $os_release in
*"debian"*) distro="debian";;
*"centos"*) distro="centos";;
*) distro="";;
esac
if [[ $distro == "" ]]; then
echo -e "${RED}Cannot find a supported Linux distribution !${DEF}"
echo "If you think one of the following distro should work, choose one."
echo "- debian"
echo "- centos"
distro=`read -p "Distro: "`
fi
case $distro in
"debian") debian;;
"centos") centos;;
*) exit;;
esac
general
| true |
efbd224e16af0dc979977d92283e9beda0ae8ce0 | Shell | Groestlcoin/armbian-groestlcoin-core | /customize-image.sh | UTF-8 | 4,378 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# arguments: $RELEASE $LINUXFAMILY $BOARD $BUILD_DESKTOP
#
# This is the image customization script
# NOTE: It is copied to /tmp directory inside the image
# and executed there inside chroot environment
# so don't reference any files that are not already installed
# NOTE: If you want to transfer files between chroot and host
# userpatches/overlay directory on host is bind-mounted to /tmp/overlay in chroot
RELEASE=$1
LINUXFAMILY=$2
BOARD=$3
BUILD_DESKTOP=$4
# TODO: exit with non-zero status if anything goes wrong
sudo -s <<'EOF'
# User with sudo rights and initial password:
useradd groestlcoin -m -s /bin/bash --groups sudo
echo "groestlcoin:groestlcoin" | chpasswd
echo "groestlcoin ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/groestlcoin
EOF
# TODO copy ssh pubkey if found, disable password SSH login
# Clone Groestlcoin Core repo for graphics assets:
sudo -s <<'EOF'
git clone https://github.com/groestlcoin/groestlcoin.git /usr/local/src/groestlcoin
cd /usr/local/src/groestlcoin
git checkout 2.19.1
# TODO: check signature commit hash
git clone https://github.com/Groestlcoin/packaging.git /usr/local/src/packaging
EOF
sudo cp /tmp/overlay/bin/groestlcoin* /usr/local/bin
if [ "$BUILD_DESKTOP" == "yes" ]; then
sudo cp /tmp/overlay/bin/groestlcoin-qt /usr/local/bin
fi
# Configure Groestlcoin Core:
sudo -s <<'EOF'
mkdir /home/groestlcoin/.groestlcoin
mkdir /home/groestlcoin/.groestlcoin/wallets
cp /tmp/overlay/groestlcoin/groestlcoin.conf /home/groestlcoin/.groestlcoin
# TODO: offer choice between mainnet and testnet
# echo "testnet=1" >> /home/groestlcoin/.groestlcoin/groestlcoin.conf
# mkdir /home/groestlcoin/.groestlcoin/testnet3
# Copy block index and chain state from host:
cp -r /tmp/overlay/groestlcoin/chainstate /home/groestlcoin/.groestlcoin
cp -r /tmp/overlay/groestlcoin/blocks /home/groestlcoin/.groestlcoin
# cp -r /tmp/overlay/groestlcoin/testnet3/chainstate /home/groestlcoin/.groestlcoin/testnet3
# cp -r /tmp/overlay/groestlcoin/testnet3/blocks /home/groestlcoin/.groestlcoin/testnet3
chown -R groestlcoin:groestlcoin /home/groestlcoin/.groestlcoin
EOF
# Install Tor
sudo -s <<'EOF'
if ! su - groestlcoin -c "gpg --keyserver pgp.surfnet.nl --recv A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89" ; then
if ! su - groestlcoin -c "gpg --keyserver pgp.mit.edu --recv A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89" ; then
exit 1
fi
fi
su - groestlcoin -c "gpg --export A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89" | apt-key add -
cat <<EOT >> /etc/apt/sources.list
deb https://deb.torproject.org/torproject.org bionic main
deb-src https://deb.torproject.org/torproject.org bionic main
EOT
apt-get update
apt-get install -y tor deb.torproject.org-keyring
mkdir -p /usr/share/tor
cat <<EOT >> /usr/share/tor/tor-service-defaults-torrc
ControlPort 9051
CookieAuthentication 1
CookieAuthFileGroupReadable 1
EOT
usermod -a -G debian-tor groestlcoin
EOF
cp /tmp/overlay/scripts/first_boot.service /etc/systemd/system
systemctl enable first_boot.service
if [ "$BUILD_DESKTOP" == "yes" ]; then
# Groestlcoin desktop background and icon:
sudo -s <<'EOF'
apt remove -y nodm
apt-get install -y lightdm lightdm-gtk-greeter xfce4 onboard
cp /tmp/overlay/rocket.jpg /usr/share/backgrounds/xfce/rocket.jpg
mkdir -p /home/groestlcoin/.config/xfce4/xfconf/xfce-perchannel-xml
cp /tmp/overlay/xfce4-desktop.xml /home/groestlcoin/.config/xfce4/xfconf/xfce-perchannel-xml/xfce4-desktop.xml
cp /tmp/overlay/lightdm-gtk-greeter.conf /etc/lightdm/lightdm-gtk-greeter.conf
mkdir -p /home/groestlcoin/Desktop
mkdir -p /home/groestlcoin/.config/autostart
cp /usr/local/src/packaging/debian/groestlcoin-qt.desktop /home/groestlcoin/Desktop
chmod +x /home/groestlcoin/Desktop/groestlcoin-qt.desktop
cp /tmp/overlay/keyboard.desktop /home/groestlcoin/.config/autostart
chown -R groestlcoin:groestlcoin /home/groestlcoin/Desktop
chown -R groestlcoin:groestlcoin /home/groestlcoin/.config
cp /usr/local/src/groestlcoin/share/pixmaps/groestlcoin128.png /usr/share/pixmaps
cp /usr/local/src/groestlcoin/share/pixmaps/groestlcoin256.png /usr/share/pixmaps
cp /tmp/overlay/scripts/first_boot_desktop.service /etc/systemd/system
systemctl enable first_boot_desktop.service
systemctl set-default graphical.target
EOF
fi
| true |
0dfdbb23fe314b4ceb3a4a67d909a1772be24bd2 | Shell | AbrahamXu/RPi-Monitor | /etc/mynodejs | UTF-8 | 651 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: mynodejs
# Required-Start: networking sshd
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start nodejs for remote controls
# Description: Start nodejs for remote controls
### END INIT INFO
M=/etc/init.d/mynodejs
APP_FILE=/home/pi/cmdServer/app.js
case "$1" in
start)
echo "mynodejs is starting ..."
sudo nodejs $APP_FILE > /home/pi/mynodejs.log &
echo "mynodejs is started successfully"
;;
stop)
echo "mynodejs is stopping ..."
sudo killall nodejs
echo "mynodejs is stopped successfully"
;;
*)
echo "Usage: $N {start|stop}" >&2
exit 1
;;
esac
exit 0
| true |
f64f8080feb57c9cefc80502cbad3e35a5946506 | Shell | naddeoa/elm-java-parser | /demo.sh | UTF-8 | 656 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
if ! [ -f ./bin/elm-parser.jar ]; then
echo "Build first with 'ant'"
echo "You can probably get it through your package manager on Linux, or 'brew isntall ant' on a mac."
exit 1
fi
echo '**DBG_IMPORT_STMT import MyModule' | java -jar bin/elm-parser.jar
echo '**DBG_IMPORT_STMT import MyModule.Submodule as Somethingelse' | java -jar bin/elm-parser.jar
echo '**DBG_IMPORT_STMT import ModuleDefinition.Submodule exposing (Html, li, ul)' | java -jar bin/elm-parser.jar
echo '**DBG_MODULE_DEFINITION module Html' | java -jar bin/elm-parser.jar
echo '**DBG_MODULE_DEFINITION module Html exposing (a,b, C)' | java -jar bin/elm-parser.jar
| true |
a2c7d6d61164b4a91862f0093b6fb571c2703cc8 | Shell | manasjain0699/python | /scripts/release.sh | UTF-8 | 6,393 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Workflow
# 1. [master branch] update existing snapshot (include API change for a new alpha/beta/GA
# release)
# - add a new snapshot or reuse the existing snapshot, the latter means either
# API change happened in a k8s patch release, or we want to include some new
# python / python-base change in the release note
# - API change w/ release notes
# - master change w/ release notes
# - submodule change w/ release notes
# 2. [master branch] create new snapshot (include API change for a new alpha release)
# - add a new snapshot or reuse the existing snapshot, the latter means either
# API change happened in a k8s patch release, or we want to include some new
# python / python-base change in the release note
# - API change w/ release notes
# - master change w/ release notes
# - submodule change w/ release notes
# 3. [release branch] create a new release
# - pull master
# - it's possible that master has new changes after the latest snaphost,
# update CHANGELOG accordingly
# - for generated file, resolve conflict by committing the master version
# - abort if a snapshot doesn't exist
# - generate client change, abort if API change is detected
# - CHANGELOG: latest snapshot becomes the release, create a new snapshot
# section that reflect the master branch state
# - README: add the release to README
# - an extra PR to update CHANGELOG and README in master in sync with this new
# release
#
# Difference between 1&2: API change release notes
#
# TODO(roycaihw):
# - add user input validation
# - add function input validaiton (release/version strings start with 'v' or not)
# - automatically send a PR; provide useful links for review
# - master branch diff: https://github.com/kubernetes-client/python/compare/commit1..commit2
# - python base diff: https://github.com/kubernetes-client/python-base/compare/commit1..commit2
# - Kubernetes changelog, e.g. https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md
# - add debug log
# - add a sentence about "changes since {last release}". In most cases our
# releases should be sequential. This script (the workflow above) is based on
# this assumption, and we should make the release note clear about that.
# - update readme; if it's a real release (instead of a snapshot in master
# branch), also create a PR to update changelog and readme in the master
# branch
#
# Usage:
# $ KUBERNETES_BRANCH=release-1.19 CLIENT_VERSION=19.0.0-snapshot DEVELOPMENT_STATUS="3 - Alpha" scripts/release.sh
set -o errexit
set -o nounset
set -o pipefail
# used by the client generator: https://github.com/kubernetes-client/gen/blob/729332ad08f0f4d98983b7beb027e2f657236ef9/openapi/openapi-generator/client-generator.sh#L52
export USERNAME=kubernetes
repo_root="$(git rev-parse --show-toplevel)"
declare -r repo_root
cd "${repo_root}"
source scripts/util/changelog.sh
source scripts/util/kube_changelog.sh
old_client_version=$(python3 "scripts/constants.py" CLIENT_VERSION)
old_k8s_api_version=$(util::changelog::get_k8s_api_version "v$old_client_version")
KUBERNETES_BRANCH=${KUBERNETES_BRANCH:-$(python3 "scripts/constants.py" KUBERNETES_BRANCH)}
CLIENT_VERSION=${CLIENT_VERSION:-$(python3 "scripts/constants.py" CLIENT_VERSION)}
DEVELOPMENT_STATUS=${DEVELOPMENT_STATUS:-$(python3 "scripts/constants.py" DEVELOPMENT_STATUS)}
# get Kubernetes API Version
new_k8s_api_version=$(util::kube_changelog::find_latest_patch_version $KUBERNETES_BRANCH)
echo "Old Kubernetes API Version: $old_k8s_api_version"
echo "New Kubernetes API Version: $new_k8s_api_version"
sed -i "s/^KUBERNETES_BRANCH =.*$/KUBERNETES_BRANCH = \"$KUBERNETES_BRANCH\"/g" scripts/constants.py
sed -i "s/^CLIENT_VERSION =.*$/CLIENT_VERSION = \"$CLIENT_VERSION\"/g" scripts/constants.py
sed -i "s/^DEVELOPMENT_STATUS =.*$/DEVELOPMENT_STATUS = \"$DEVELOPMENT_STATUS\"/g" scripts/constants.py
git commit -am "update version constants for $CLIENT_VERSION release"
util::changelog::update_release_api_version $CLIENT_VERSION $old_client_version $new_k8s_api_version
# get API change release notes since $old_k8s_api_version.
# NOTE: $old_k8s_api_version may be one-minor-version behind $KUBERNETES_BRANCH, e.g.
# KUBERNETES_BRANCH=release-1.19
# old_k8s_api_version=1.18.17
# when we bump the minor version for the snapshot in the master branch. We
# don't need to collect release notes in release-1.18, because any API
# change in 1.18.x (x > 17) must be a cherrypick that is already included in
# release-1.19.
# TODO(roycaihw): not all Kubernetes API changes modify the OpenAPI spec.
# Download the patch and skip if the spec is not modified. Also we want to
# look at other k/k sections like "deprecation"
release_notes=$(util::kube_changelog::get_api_changelog "$KUBERNETES_BRANCH" "$old_k8s_api_version")
if [[ -n "$release_notes" ]]; then
util::changelog::write_changelog v$CLIENT_VERSION "### API Change" "$release_notes"
fi
git commit -am "update changelog"
# run client generator
scripts/update-client.sh
rm -r kubernetes/test/
git add .
git commit -m "temporary generated commit"
scripts/apply-hotfixes.sh
git reset HEAD~2
# custom object API is hosted in gen repo. Commit API change separately for
# easier review
if [[ -n "$(git diff kubernetes/client/api/custom_objects_api.py)" ]]; then
git add kubernetes/client/api/custom_objects_api.py
git commit -m "generated client change for custom_objects"
fi
git add kubernetes/docs kubernetes/client/api/ kubernetes/client/models/ kubernetes/swagger.json.unprocessed scripts/swagger.json
# verify if there are staged changes, then commit
git diff-index --quiet --cached HEAD || git commit -m "generated API change"
git add .
git commit -m "generated client change"
echo "Release finished successfully."
| true |
b01d469727ab01696451a161ad2339e5860035f8 | Shell | CyberSys/bifrost-build | /all/exif-0.6.21-1/Fetch-source.sh | UTF-8 | 1,471 | 2.84375 | 3 | [] | no_license | #!/bin/bash
SRC=exif-0.6.21.tar.gz
DST=/var/spool/src/"${SRC}"
MD5=9321c409a3e588d4a99d63063ef4bbb7
[ -s "${DST}" ] || ../../wget-finder --checksum "${MD5}" -O "${DST}" http://downloads.sourceforge.net/project/libexif/exif/0.6.21/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://her.gr.distfiles.macports.org/exif/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://cjj.kr.distfiles.macports.org/exif/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://sourceforge.mirrorservice.org//l/project/li/libexif/exif/0.6.21/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://mirror.pnl.gov/macports/distfiles/exif/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" https://mirrors.ustc.edu.cn/macports/distfiles/exif/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ba.mirror.garr.it/mirrors/macports-distfiles/exif/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.twaren.net/FreeBSD/ports/distfiles/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" https://mirror.yandex.ru/macports/distfiles/exif/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://distfiles.icmpv6.org/distfiles/"${SRC}" \
|| ../../wget-finder -O "${DST}" "${SRC}:${MD5}"
| true |
33ec73b2e90b149e4d59dd3cbed96bc16e16c12d | Shell | dubo-dubon-duponey/regander | /tests/integration/regander/operations.sh | UTF-8 | 8,479 | 3.359375 | 3 | [
"DOC",
"MIT"
] | permissive | #!/usr/bin/env bash
# Pushing a blob
blob="2341"
shasum=$(printf "%s" "$blob" | shasum -a 256 -)
shasum="sha256:${shasum%% *}"
# Pushing an image
image='{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 4,
"digest": "'$shasum'"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": '${#blob}',
"digest": "'$shasum'"
}
]
}'
image=$(printf "%s" "$image" | jq -cj .)
ishasum=$(printf "%s" "$image" | shasum -a 256 -)
ishasum="sha256:${ishasum%% *}"
helperHUB(){
export REGISTRY_USERNAME=$HUB_TEST_USERNAME
export REGISTRY_PASSWORD=$HUB_TEST_PASSWORD
REGISTRY=https://registry-1.docker.io
imagename=$REGISTRY_USERNAME/regander-integration-test
otherimagename=$REGISTRY_USERNAME/regander-also-integration-test
tagname=that-tag-name
}
helperOSS(){
export REGISTRY_USERNAME=
export REGISTRY_PASSWORD=
REGISTRY=http://localhost:5000
imagename=dubogus/regander-integration-test
otherimagename=dubogus/regander-also-integration-test
tagname=that-tag-name
}
helperVersion(){
# Version
result=$(regander -s --registry=$REGISTRY version GET)
exit=$?
result="$(echo "$result" | jq -rcj .)"
dc-tools::assert::equal "exit code" "0" "$exit"
dc-tools::assert::equal "result" "registry/2.0" "$result"
}
helperCatalog(){
# Empty catalog
result=$(regander -s --registry=$REGISTRY catalog)
exit=$?
result="$(echo "$result" | jq -rcj .)"
if [ "$1" ]; then
dc-tools::assert::equal "catalog GET" "$exit" "0"
dc-tools::assert::equal "$1" "$result"
else
dc-tools::assert::equal "catalog GET" "$exit" "13"
dc-tools::assert::equal "$result" '{"errors":[{"code":"UNAUTHORIZED","message":"authentication required","detail":[{"Type":"registry","Class":"","Name":"catalog","Action":"*"}]}]}'
fi
}
helperBlobPush(){
result=$(regander -s --registry=$REGISTRY blob PUT $imagename "application/vnd.oci.image.layer.v1.tar+gzip" < <(printf "%s" "$blob"))
exit=$?
result="$(echo "$result" | jq -rcj .digest)"
dc-tools::assert::equal "blob PUT" "$exit" "0"
dc-tools::assert::equal "$shasum" "$result"
}
helperBlobHead(){
# Heading a blob
result=$(regander -s --registry=$REGISTRY blob HEAD $imagename "$shasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
type=$(echo "$result" | jq -rcj .type)
length=$(echo "$result" | jq -rcj .size)
location=$(echo "$result" | jq -rcj .location)
dc-tools::assert::equal "blob HEAD" "0" "$exit"
dc-tools::assert::equal "application/octet-stream" "$type"
dc-tools::assert::equal "${#blob}" "$length"
# If not redirected
if [ "$1" ]; then
dc-tools::assert::equal "$location" "$imagename/blobs/$shasum"
fi
}
helperBlobGet(){
# Getting a blob
result=$(regander -s --registry=$REGISTRY blob GET $imagename "$shasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
dc-tools::assert::equal "blob GET" "0" "$exit"
dc-tools::assert::equal "$blob" "$result"
}
helperBlobMount(){
# Mounting a blob
result=$(regander -s --registry=$REGISTRY --from=$imagename blob MOUNT $otherimagename "$shasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
digest=$(echo "$result" | jq -rcj .digest)
length=$(echo "$result" | jq -rcj .size)
location=$(echo "$result" | jq -rcj .location)
dc-tools::assert::equal "blob MOUNT" "0" "$exit"
dc-tools::assert::equal "$digest" "$shasum"
dc-tools::assert::equal "$length" "0"
# XXX HUB gives relative redirect
# dc-tools::assert::equal "$location" "$REGISTRY/v2/$otherimagename/blobs/$shasum"
}
helperBlobDelete(){
# Deleting a blob
result=$(regander -s --registry=$REGISTRY blob DELETE $imagename "$shasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
# XXX oss gives 20, Hub gives 13???? WTFF!!! It's changing!
# dc-tools::assert::equal "blob DELETE" "$exit" "13"
# dc-tools::assert::equal "$result" '{"errors":[{"code":"UNAUTHORIZED","message":"authentication required","detail":[{"Type":"repository","Class":"","Name":"dubogus/regander-integration-test","Action":"delete"}]}]}'
dc-tools::assert::equal "blob DELETE" "20" "$exit"
# XXX error is also different
dc-tools::assert::equal "$result" '{"errors":[{"code":"UNSUPPORTED","message":"The operation is unsupported."}]}'
}
helperImagePut(){
result=$(regander -s --registry=$REGISTRY manifest PUT $imagename $tagname < <(printf "%s" "$image"))
exit=$?
result="$(echo "$result" | jq -rcj .)"
digest=$(echo "$result" | jq -rcj .digest)
location=$(echo "$result" | jq -rcj .location)
dc-tools::assert::equal "image PUT" "0" "$exit"
dc-tools::assert::equal "$digest" "$ishasum"
# XXX hub gives relative
# dc-tools::assert::equal "$location" "$REGISTRY/v2/$imagename/manifests/$ishasum"
}
helperImageHead(){
# HEAD image
result=$(regander -s --registry=$REGISTRY manifest HEAD $imagename "$ishasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
type=$(echo "$result" | jq -rcj .type)
length=$(echo "$result" | jq -rcj .size)
location=$(echo "$result" | jq -rcj .location)
dc-tools::assert::equal "image HEAD" "0" "$exit"
dc-tools::assert::equal "$type" "application/vnd.docker.distribution.manifest.v2+json"
dc-tools::assert::equal "$length" "${#image}"
dc-tools::assert::equal "$digest" "$ishasum"
result=$(regander -s --registry=$REGISTRY manifest HEAD $imagename "$tagname")
exit=$?
result="$(echo "$result" | jq -rcj .)"
type=$(echo "$result" | jq -rcj .type)
length=$(echo "$result" | jq -rcj .size)
location=$(echo "$result" | jq -rcj .location)
dc-tools::assert::equal "image HEAD" "0" "$exit"
dc-tools::assert::equal "$type" "application/vnd.docker.distribution.manifest.v2+json"
dc-tools::assert::equal "$length" "${#image}"
dc-tools::assert::equal "$digest" "$ishasum"
}
helperImageGet(){
# GET image
result=$(regander -s --registry=$REGISTRY manifest GET $imagename "$ishasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
dc-tools::assert::equal "image GET" "0" "$exit"
dc-tools::assert::equal "$image" "$result"
result=$(regander -s --registry=$REGISTRY manifest GET $imagename "$tagname")
exit=$?
result="$(echo "$result" | jq -rcj .)"
dc-tools::assert::equal "image GET" "0" "$exit"
dc-tools::assert::equal "$image" "$result"
}
helperImageDelete(){
# DELETE image
result=$(regander -s --registry=$REGISTRY manifest DELETE $imagename "$ishasum")
exit=$?
result="$(echo "$result" | jq -rcj .)"
dc-tools::assert::equal "image DELETE" "20" "$exit"
dc-tools::assert::equal "$result" '{"errors":[{"code":"UNSUPPORTED","message":"The operation is unsupported."}]}'
result=$(regander -s --registry=$REGISTRY manifest DELETE $imagename "$tagname")
exit=$?
result="$(echo "$result" | jq -rcj .)"
dc-tools::assert::equal "image DELETE" "20" "$exit"
dc-tools::assert::equal "$result" '{"errors":[{"code":"UNSUPPORTED","message":"The operation is unsupported."}]}'
}
helperTagsGet(){
###########################
# Tags
###########################
result=$(regander -s --registry=$REGISTRY tags GET $imagename | jq -rcj .)
exit=$?
dc-tools::assert::equal "0" "$exit"
dc-tools::assert::equal "{\"name\":\"$imagename\",\"tags\":[\"$tagname\"]}" "$result"
}
testVersion(){
helperHUB
helperVersion
helperOSS
helperVersion
}
testCatalog(){
helperHUB
helperCatalog
helperOSS
helperCatalog '{"repositories":[]}'
}
testBlobPush(){
helperHUB
helperBlobPush
helperOSS
helperBlobPush
}
testBlobHead(){
helperOSS
helperBlobHead testlocation
helperHUB
helperBlobHead
}
testBlobGet(){
helperHUB
helperBlobGet
helperOSS
helperBlobGet
}
testBlobMount(){
helperHUB
helperBlobMount
helperOSS
helperBlobMount
}
testBlobDelete(){
helperHUB
helperBlobDelete
helperOSS
helperBlobDelete
}
testCatalogAgain(){
helperHUB
helperCatalog
helperOSS
helperCatalog "{\"repositories\":[\"$otherimagename\",\"$imagename\"]}"
}
testImagePut(){
helperHUB
helperImagePut
helperOSS
helperImagePut
}
testImageHead(){
helperHUB
helperImageHead
helperOSS
helperImageHead
}
testImageGet(){
helperHUB
helperImageGet
helperOSS
helperImageGet
}
testImageDelete(){
helperHUB
helperImageDelete
helperOSS
helperImageDelete
}
testTagsGet(){
helperHUB
helperTagsGet
helperOSS
helperTagsGet
}
| true |
ccce63fc0caf23f97459868fc35c5d3dd796c92a | Shell | ThanhTV312/shellscript | /GPFS-Fullv3.sh | UTF-8 | 12,332 | 3.46875 | 3 | [] | no_license | #!/bin/bash
echo "***Script by ThanhTV***"
echo "***Please read a Read Me file carefully***"
###------- Golbal Menu
echo "Select the operation: "
echo " 1)Press 1 to Install GPFS on Node"
echo " 2)Press 2 to Uninstall GPFS on Node"
echo " 3)Press 3 to Create GPFS Cluster, NSD"
echo " 4)Press 4 to Create File System"
echo " 5)Press 5 to Install GPFS GUI"
echo " 6)Press 6 to Exit"
echo -n "Enter choice: "
dir1=/root
cd "$dir1"
read n
###-------------------------------------------------------------- Function --------------------------------------------
#Function 1: Install GPFS Eviroment and Software
function InstallGPFS
{
###-------------------Enviroment
## Update hosts file:
#-e and using \t
echo -e "172.20.10.21\tgpfs-node1
172.20.10.22\tgpfs-node2
172.20.10.23\tgpfs-node3" >> /etc/hosts;
#Stop&disable the Firewall
systemctl stop firewalld;
systemctl disable firewalld;
systemctl mask --now firewalld;
#Disable Secure Linux
#sed -e 's/pattern1/pattern2/g' full_path_to_file/file
#sed -e 's/${VAR1}/${VAR2}/g' ${VAR3}
var1='SELINUX=enforcing';
var2='SELINUX=disabled';
sed -i -e "s&$var1&$var2&g" /etc/selinux/config;
#sed -i -e 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
# Configure SSH authentication between nodes (run on BOTH nodes):
ssh-keygen -t rsa -N "";
# copy public key to cluster servers
ssh-copy-id root@gpfs-node1;
ssh-copy-id root@gpfs-node2;
ssh-copy-id root@gpfs-node3;
touch ~/.hushlogin;
touch /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys;
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys;
#ssh root@dc-ecm-cpe01
#ssh root@dc-ecm-cpe02
# Update mmdsh
echo 'PATH=$PATH:/usr/lpp/mmfs/bin' >> .bashrc;
echo $PATH;
#ntp
#timedatectl list-timezones
timedatectl set-timezone Asia/Ho_Chi_Minh;
yum install -y ntp;
ntpdate 172.20.10.1;
systemctl enable ntpd;
systemctl start ntpd;
#Mount DVD local repo
mkdir /opt/rhel-iso;
mount /dev/sr0 /opt/rhel-iso;
cat > /etc/yum.repos.d/dvd.repo <<EOF;
[DVD]
name=DVD
baseurl=file:///opt/rhel-iso
enabled=1
gpgcheck=0
EOF
#Delete Junk Character for IP Address Insert
echo "#Delete Junk Character
stty erase ^?
stty erase ^H
#stty erase ^h" >> .bashrc;
###-------------------Install dependency packages:
#yum --noplugins install -y kernel-devel gcc gcc-c++ ksh autoconf make
yum --noplugins install -y cpp gcc gcc-c++ binutils ksh m4 glibc-headers glibc-devel libstdc++-devel ntp;
yum install -y kernel-headers-$(uname -r);
yum install -y kernel-devel-$(uname -r);
## Install Spectrum Scale on Node:
dir1=/root;
cd "$dir1";
tar zxvf Spectrum_Scale_STD_500_x86_64_LNX.tar.gz;
chmod +x Spectrum_Scale_Standard-5.0.1.2-x86_64-Linux-install;
sh Spectrum_Scale_Standard-5.0.1.2-x86_64-Linux-install --silent;
dir2=/usr/lpp/mmfs/5.0.1.2/gpfs_rpms;
cd "$dir2";
rpm -ivh gpfs.base* gpfs.gpl* gpfs.license* gpfs.gskit* gpfs.msg* gpfs.compression* gpfs.docs* gpfs.ext*;
#rpm -ivh /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.base* /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.gpl* /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.license* \
#/usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.gskit* /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.msg* /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.compression* /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.docs*
# Compiling modules
export PATH=$PATH:/usr/lpp/mmfs/bin/;
/usr/lpp/mmfs/bin/mmbuildgpl;
##-------- Map LUN
#Scan new disk without reboot
for i in {0..2}; do echo "- - -" > /sys/class/scsi_host/host$i/scan; done;
iscsiadm -m discovery -t st -p 172.20.10.12;
iscsiadm -m node -p 172.20.10.12 -l;
iscsiadm -m session --rescan;
iscsiadm -m session -P 3;
#---
echo "Completed to Install GPFS on this Node: ";
hostname;
echo "Need to Install GPFS on all Nodes";
echo "When all Nodes already Install GPFS, Next step is Create GFPS Cluslter, NSD";
}
#Function 2: Install GPFS GUI
function InstallGUI
{
echo " Script Install GPFS GUI by ThanhTV ";
#Install GPFS GUI on This Node
echo " Install GPFS GUI on This Node ";
hostname;
yum install -y postgresql-contrib postgresql-server;
# Download and up file boost-regex-1.53 to /root
yum install -y libboost_regex.so.1.53*;
dir2=/usr/lpp/mmfs/5.0.1.2/gpfs_rpms;
cd "$dir2";
rpm -ivh gpfs.ext*;
rpm -ivh /usr/lpp/mmfs/5.0.1.2/zimon_rpms/rhel7/gpfs.gss.pmcollector* /usr/lpp/mmfs/5.0.1.2/zimon_rpms/rhel7/gpfs.gss.pmsensors*;
rpm -ivh /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.gui* /usr/lpp/mmfs/5.0.1.2/gpfs_rpms/gpfs.java*;
export PATH=$PATH:/usr/lpp/mmfs/bin/;
systemctl enable gpfsgui;
mmperfmon config generate --collectors gpfs-node1;
mmchnode --perfmon -N gpfs-node1,gpfs-node2,gpfs-node3;
systemctl enable pmcollector;
systemctl stop pmcollector;
systemctl start pmcollector;
systemctl enable pmsensors;
systemctl stop pmsensors;
systemctl start pmsensors;
#Enable capacity data collection
mmperfmon config update GPFSDiskCap.restrict=gpfs-node1 GPFSDiskCap.period=86400;
systemctl stop gpfsgui;
systemctl start gpfsgui;
systemctl status gpfsgui;
/usr/lpp/mmfs/gui/cli/mkuser admin -g SecurityAdmin;
echo "The default user is admin";
echo "The default password is admin001";
echo "Completed to Install GPFS GUI on this Node";
hostname;
}
#--- Function Destroy GPFS Cluster, NSD
function DestroyCluster
{
export PATH=$PATH:/usr/lpp/mmfs/bin/;
echo " Destroy GPFS Cluster, NSD ";
#Unmount all GPFS file systems on all nodes
mmumount all -a;
#Remove each GPFS File systems
mmdelfs gpfs;
#Remove the NSD volume
mmdelnsd nsd1;
mmdelnsd nsd2;
mmlsnsd;
#Remove the tiebreaker disks
mmchconfig tiebreakerdisks=no;
mmdelnsd tiebreaker;
mmlsnsd;
#--- Remove the GUI node from the GUI_MGT_SERVERS node class (Option)
#mmchnodeclass GUI_MGMT_SERVERS delete -N guinode
systemctl stop gpfsgui;
mmchnodeclass GUI_MGMT_SERVERS delete -N gpfs-node1,gpfs-node2,gpfs-node3;
mmlsnodeclass;
#Shutdown GPFS nodes
mmshutdown -a;
mmgetstate -a;
mmlscluster;
}
#--- Function Uninstall GPFS Pakages
function UninstallGPFS
{
#--- Uninstall GPFS on This Node
echo " Uninstall GPFS Pakages ";
yum remove -y gpfs.crypto* gpfs.adv* gpfs.ext* gpfs.gpl* gpfs.license* gpfs.msg* gpfs.compression* gpfs.base* gpfs.docs* gpfs.gskit*;
rpm -qa | grep gpfs;
#Remove the /var/mmfs and /usr/lpp/mmfs directories
rm -Rf /var/mmfs/;
rm -Rf /usr/lpp/mmfs;
#Remove all files that start with mm from the /var/adm/ras directory.
rm -rf /var/adm/ras/mm*;
#Remove /tmp/mmfs directory and its content
rm -Rf /tmp/mmfs;
#--- Uninstall GPFS GUI on This Node
#Stop GUI
systemctl stop gpfsgui;
#Sudo
export SUDO_USER=gpfsadmin;
#Clean up the GUI database
psql postgres postgres -c "drop schema fscc cascade";
#Remove the GUI package
yum remove -y gpfs.gui* gpfs.java*;
#Uninstall the performance monitoring tool
yum remove -y gpfs.gss.pmsensors* gpfs.gss.pmcollector* pmswift* gpfs.pm-ganesha*;
rpm -qa | grep gpfs;
echo "Completed to Uninstall GPFS on this Node: ";
hostname;
}
function UninstallGPFSOther
{
echo "Uninstall GPFS on Other Node ";
#Delete Junk Character for IP Address Insert
echo "#Delete Junk Character
stty erase ^?
stty erase ^H
stty erase ^h" >> .bashrc;
echo " Type the IP address or hostname: ";
read server;
#--- Uninstall GPFS Pakages Function
ssh root@$server "$(declare -f UninstallGPFS);UninstallGPFS";
}
###------------------------------------------------------------------------- Condition -----------------------------------------------------
###------Install GPFS on Node
if [ $n == 1 ];
then
##----- Install Node Menu
{
echo "***Script Install GPFS on Other Node by ThanhTV***"
echo "*** Reference IBM: https://www.ibm.com/support/knowledgecenter/STXKQY_5.0.1/com.ibm.spectrum.scale.v5r01.doc/bl1ins_manuallyinstallingonlinux_packages.htm ***"
echo "Select the operation: "
echo " 1)Press 1 to install GPFS on This Node"
echo " 2)Press 2 to install GPFS on Other Node"
echo " 3)Press 3 to exit"
echo -n "Enter choice: "
read m
#Install GPFS on Node
if [ $m == 1 ];
then
{
echo " Install GPFS on This Node "
#Prepare Installl
echo "***Script Setup Enviroment and Install GPFS by ThanhTV***"
echo "Please! Up the install file: /root/Spectrum_Scale_STD_500_x86_64_LNX.tar.gz"
echo "Please! Mount the DVD install to server"
read -n 1 -s -r -p "Press Enter to continue if all prepare tasks have done!"
InstallGPFS
export PATH=$PATH:/usr/lpp/mmfs/bin/
cd "$dir1"
sh GPFS-Fullv3.sh
}
#Install GPFS on Other Node
elif [ $m == 2 ];
then
{
echo " Install GPFS on Other Node "
echo "Type the IP address or hostname: "
read server
stty erase ^?
stty erase ^H
stty erase ^h
scp /root/GPFS-Fullv3.sh root@$server:/root/
ssh root@$server "$(declare -f InstallGPFS);InstallGPFS"
cd "$dir1"
sh GPFS-Fullv3.sh
}
#Back to Menu
else
cd "$dir1"
sh GPFS-Fullv3.sh
fi
}
###------Uninstall GPFS on Node
elif [ $n == 2 ];
then
{
###------- Uninstall Menu
echo "***Script by ThanhTV***"
echo "***Please read a Read Me file carefully***"
echo "*** Reference IBM: https://www.ibm.com/support/knowledgecenter/en/STXKQY_5.0.1/com.ibm.spectrum.scale.v5r01.doc/bl1ins_uninstall.htm ***"
option=0
until [ "$option" = "3" ]; do
echo "Select the operation: "
echo " 1)Press 1 to Uninstall GPFS on This Node "
echo " 2)Press 2 to Uninstall GPFS on Other Node"
echo " 3)Press 3 to Exit"
echo -n "Enter choice: "
read option
case $option in
#----- Uninstall GPFS on This Node
1)DestroyCluster && UninstallGPFS;;
#----- Uninstall GPFS on Other Node
2)UninstallGPFSOther;;
3)exit;;
* )echo " Invalid Option. Try again! ";;
esac
done
}
###------Create GPFS Cluster, NSD
elif [ $n == 3 ];
then
{
echo "***Script Create Cluster and NSD for GPFS by ThanhTV***"
# Create node description
export PATH=$PATH:/usr/lpp/mmfs/bin/
#vi /root/NodeDescFile
cat > /root/NodeDescFile <<EOF
gpfs-node1:quorum-manager
gpfs-node2:quorum-manager
gpfs-node3:quorum-client
EOF
#create cluster
mmcrcluster -N /root/NodeDescFile -p gpfs-node1 -s gpfs-node2 -r /usr/bin/ssh -R /usr/bin/scp -C gpfs-cluster
mmchconfig unmountOnDiskFail=yes -N gpfs-node3
# Accept license
mmchlicense server --accept -N gpfs-node1,gpfs-node2,gpfs-node3
mmlslicense -L
# Create disk.stanza
cat > /root/disk.stanza <<EOF
%nsd: device=/dev/sdb nsd=nsd1 servers=gpfs-node1,gpfs-node2 usage=dataAndMetadata failureGroup=1 pool=system
%nsd: device=/dev/sdc nsd=nsd2 servers=gpfs-node1,gpfs-node2 usage=dataAndMetadata failureGroup=1 pool=system
EOF
#Create NSD
mmcrnsd -F /root/disk.stanza -v no
#Start cluster
mmstartup -a
mmgetstate -aL
#Wait a minnute
echo "Wait a minutes to start cluster"
sleep 1m
mmgetstate -aL
#---
echo "Completed to Create Cluster and NSD for GPFS"
echo "Next step is Create File System GPFS"
cd "$dir1"
sh GPFS-Fullv3.sh
}
###------Create File System
elif [ $n == 4 ];
then
{
echo "***Script Create File System by ThanhTV***"
function CreateFS
{
option2=0
until [ "$option2" = "5" ]; do
#------ Create FS Menu
echo "Select the operation: "
echo " 1)Press 1 to startup all node again"
echo " 2)Press 2 to check state"
echo " 3)Press 3 create file system"
echo " 4)Press 4 to mount all file system"
echo " 5)Press 5 to exit"
echo -n "Enter choice: "
read option2
echo ""
case $option2 in
1 ) mmstartup -a;;
2 ) mmgetstate -aL;;
3 ) mmcrfs gpfs -F /root/disk.stanza -T /gpfs -B 1M -A yes -v yes && mmlsfs all;;
4 ) /usr/lpp/mmfs/bin/mmmount all -a && df -h;;
5 ) sh GPFS-Fullv3.sh;;
* ) CreateFS;;
esac
done
}
CreateFS
}
###------ Install GUI
elif [ $n == 5 ];
then
{
echo "***Script Install GPFS on Other Node by ThanhTV***"
echo "Select the operation: "
echo " 1)Press 1 to install GPFS GUI on This Node"
echo " 2)Press 2 to install GPFS GUI on Other Node"
echo " 3)Press 3 to exit"
echo -n "Enter choice: "
read p
#Install GPFS GUI on This Node
if [ $p == 1 ];
then
{
InstallGUI
#Back to Menu
cd "$dir1"
sh GPFS-Fullv3.sh
}
elif [ $p == 2 ];
then
{
echo " Install GPFS GUI on Other Node "
Install GPFS GUI on Other Node
stty erase ^?
stty erase ^H
stty erase ^h
echo "Type the IP address or hostname: "
read server
ssh root@$server "$(declare -f InstallGUI);InstallGUI"
cd "$dir1"
sh GPFS-Fullv3.sh
}
#Back to Menu
else
cd "$dir1"
sh GPFS-Fullv3.sh
fi
}
###------Exit Menu
elif [ $n == 6 ];
then
echo " Exit ! "
exit
else
echo "***Invalid Option***"
dir1=/root
cd "$dir1"
sh GPFS-Fullv3.sh
fi
#-------- clean up ------
#hosts
#cp /etc/hosts /etc/hosts.bak
#cat /etc/hosts | sort | uniq > /etc/hosts.txt
#mv /etc/hosts.txt /etc/hosts
#.bashrc
#cp .bashrc .bashrc.bak
#cat .bashrc| sort | uniq > .bashrc.txt
#mv .bashrc.txt .bashrc | true |
6a967a178b585adad42e434a0c490ed687d73cb9 | Shell | studentperson/Arrays | /projectTestMain.bash | UTF-8 | 1,614 | 3.5625 | 4 | [] | no_license | #!/bin/sh
# for a command line shell script argument
# $0 is the shell script $1+ are the arguments
# make executable with chmod permissions scriptname
# i.e. chmod 754 projectTestMain.bash
# 4 read, 2 write, 1 execute, 0 none
# permissions: user group other; 7 for user, 5 for group, 4 for other
for count00 in {0..4}
do
for count01 in {0..3}
do
order="null"
sorttype="null"
length=0
case $count00 in
0)
sorttype="sel"
#echo $sorttype
;;
1)
sorttype="ins"
#echo $sorttype
;;
2)
sorttype="bub"
#echo $sorttype
;;
3)
sorttype="mrg"
#echo $sorttype
;;
4)
sorttype="qik"
#echo $sorttype
;;
*)
echo "Error not in scope"
;;
esac
case $count01 in
0)
order="rand"
#echo $order
;;
1)
order="sort"
#echo $order
;;
2)
order="rev"
#echo $order
;;
3)
order="part"
#echo $order
;;
*)
echo "Error not in scope"
;;
esac
case $1 in
0)
length=1
#echo $length
;;
1)
length=10
#echo $length
;;
2)
length=100
#echo $length
;;
3)
length=1000
#echo $length
;;
4)
length=10000
#echo $length
;;
5)
length=100000
#echo $length
;;
6)
length=1000000
#echo $length
;;
*)
echo "Error not in scope"
;;
esac
echo "Hello "$sorttype" "$order" "$length
#Note the directory has to exist for it to save it there
./projectTestHelper.bash $count00 $count01 $length > ./results/$sorttype$order$1
echo "Bye "$sorttype" "$order" "$length
done
done
| true |
6cc17ff2af9c1e01b1fca036a3bdebca7a04bce3 | Shell | Global19-atlassian-net/networkexplorer | /bin/blockexplorer.sh | UTF-8 | 2,317 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
cmd=${1:-all}
case $cmd in
ui)
runUi=1
runApi=0
runProxy=0
;;
apiNoProxy)
runUi=0
runApi=1
runProxy=0
;;
api)
runUi=0
runApi=1
runProxy=1
;;
all)
runUi=1
runApi=1
runProxy=1
;;
*)
echo "Error: unknown command: $cmd"
exit 1
;;
esac
cwd=$PWD
rootDir=$(
cd "$(dirname "$0")";
node -p '
try {
let package_json = [
"../lib/node_modules/@solana/blockexplorer/package.json",
"../@solana/blockexplorer/package.json",
"../package.json"
].find(require("fs").existsSync);
assert(
require(package_json)["name"] === "@solana/blockexplorer",
"Invalid package name in " + package_json
);
const path = require("path");
path.resolve(path.dirname(package_json))
} catch (err) {
throw new Error("Unable to locate blockexplorer directory: " + String(err));
}
'
)
cd "$rootDir"
if [[ ! -d build || ! -f build/api/api.js ]]; then
echo "Error: build/ artifacts missing. Run |yarn run build| to create them"
exit 1
fi
cleanup() {
set +e
for pid in "$api" "$proxy" "$ui"; do
[[ -z $pid ]] || kill "$pid"
done
exit 1
}
trap cleanup SIGINT SIGTERM ERR
if ((runApi)); then
(
set -x
redis-cli ping
)
fi
rm -f "$cwd"/solana-blockexplorer-{api,proxy,ui}.log
api=
proxy=
ui=
while true; do
if ((runApi)); then
if [[ -z $api ]] || ! kill -0 "$api"; then
logfile="$cwd"/solana-blockexplorer-api.log
echo "Starting api process (logfile: $logfile)"
date | tee -a "$logfile"
npm run start-prod:api >> "$logfile" 2>&1 &
api=$!
echo " pid: $api"
fi
fi
if ((runProxy)); then
if [[ -z $proxy ]] || ! kill -0 "$proxy"; then
logfile="$cwd"/solana-blockexplorer-proxy.log
echo "Starting proxy process (logfile: $logfile)"
date | tee -a "$logfile"
npm run start-prod:proxy -- --keys "$cwd" >> "$logfile" 2>&1 &
proxy=$!
echo " pid: $proxy"
fi
fi
if ((runUi)); then
if [[ -z $ui ]] || ! kill -0 "$ui"; then
logfile="$cwd"/solana-blockexplorer-ui.log
echo "Starting ui process (logfile: $logfile)"
date | tee -a "$logfile"
npm run start-prod:ui >> "$logfile" 2>&1 &
ui=$!
echo " pid: $ui"
fi
fi
sleep 1
done
| true |
8d03dbe9e08798df8a9d903cc0765a38e8c6b65b | Shell | signalfx/collectd-build-ubuntu | /build-collectd/sfx_scripts/cmdseq | UTF-8 | 2,248 | 3.4375 | 3 | [] | no_license | #!/bin/bash
set -xe
if [ ${1+x} ]; then
DEBUG=$1
export DEBUG=$DEBUG
fi
echo "Repositories file:"
cat /etc/apt/sources.list
cat /opt/collectd-build/sfx_scripts/pbuilderrc > ~/.pbuilderrc
rm -rf /opt/workspace/*
mkdir -p /opt/workspace/collectd
cd /opt/workspace/collectd
rm -rf /opt/collectd/debian/
cp -rf /opt/collectd-build/debian/ .
cp -rf /opt/collectd/* .
sed -i 's/trusty/'${DISTRIBUTION}'/g' debian/changelog
if [ "$DISTRIBUTION" = "precise" ] || [ "$DISTRIBUTION" = "wheezy" ]; then
patch -p0 < debian/patches/precise_control.patch
patch -p0 < debian/patches/precise_rules.patch
elif [ "$DISTRIBUTION" = "bionic" ]; then
patch -p0 < debian/patches/bionic_rules.patch
fi
#patch version-gen.sh
VERSION_TAG="$(head -1 debian/changelog | awk -F"[-~]" 'NF>2{print $2}')"
_NEW_VERSION=$(grep DEFAULT_VERSION= version-gen.sh | awk -F\" '{ print $2; }' | perl -p -e "s/git/$VERSION_TAG/g;")
cat <<HERE > version-gen.sh
#!/bin/sh
printf "$_NEW_VERSION"
HERE
chmod 755 version-gen.sh
./build.sh
DIST=${DISTRIBUTION} ARCH=amd64 yes | debuild -us -uc -S
rm -rf /opt/result/*
mkdir -p /opt/result/debuild
cp -rf ../* /opt/result/debuild/
if [[ "$BUILD_PUBLISH" == "True" ]]; then
DELETE_PUBUILDER_CACHE_FOLDER="rm -rf /var/cache/pbuilder/*"
else
# make pbuilder faster during development by mounting the cached packages on to RAM
mkdir -p /var/cache/pbuilder/build
echo "tmpfs /var/cache/pbuilder/build tmpfs defaults,auto 0 0" >> /etc/fstab
mount /var/cache/pbuilder/build
DELETE_PUBUILDER_CACHE_FOLDER=""
fi
eval $DELETE_PUBUILDER_CACHE_FOLDER
if [ "${DISTRIBUTION}" = "bionic" ]; then
sudo DIST=${DISTRIBUTION} ARCH=amd64 pbuilder create --extrapackages apt-transport-https --debootstrapopts --include=gnupg,ca-certificates
else
sudo DIST=${DISTRIBUTION} ARCH=amd64 pbuilder create
fi
DIST=${DISTRIBUTION} ARCH=amd64 pdebuild /opt/result/debuild/*.dsc
mkdir -p /opt/result/pdebuild/
cp /var/cache/pbuilder/${DISTRIBUTION}-amd64/result/* /opt/result/pdebuild/
eval $DELETE_PUBUILDER_CACHE_FOLDER
cd ..
rm -rf /opt/result/debuild/collectd
# Create unsigned PPA on the Amazon s3 bucket
cd /opt/result/
mkdir -p test/debs
cp -rf pdebuild/* test/debs/
cd test
dpkg-scanpackages debs /dev/null > Packages
| true |
c1c2b80882b5772ed60da55ea53851d129b0cf7b | Shell | ETHZ-INS/enrichMiR_benchmark | /processing/amin.datagen.raw.SE.sh | UTF-8 | 1,254 | 2.578125 | 3 | [] | no_license | #!/bin/bash/
a=/reference/Mus_musculus/GENCODE/GRCm38.p5/Annotation/Genes/genes.gtf
fc_opts=" --largestOverlap --primary -s 0 -T 10 --extraAttributes gene_name,gene_biotype"
featureCounts -t transcript $fc_opts -a $a -o comb.counts /mnt/schratt/p1006/Amin_2015/STAR_2/*.sam
R --slave --no-restore --no-save <<RSCRIPT
fc2se <- function(countfile){
library(SummarizedExperiment)
x <- read.delim(countfile,header=TRUE,skip=1,row.names=1)
rd <- x[,1:7]
levels(rd[["Chr"]]) <- sapply(strsplit(levels(rd[["Chr"]]),";"), FUN=function(x){ paste(unique(x), collapse=";")})
levels(rd[["Strand"]]) <- sapply(strsplit(levels(rd[["Strand"]]),";"), FUN=function(x) x[1])
x <- as.matrix(x[,-1:-7])
colnames(x) <- sapply(strsplit(colnames(x),"\\\."), function(y) rev(y)[[4]])
se <- SummarizedExperiment(list(counts=x), rowData=rd)
row.names(se) <- paste(row.names(se),rowData(se)[["gene_name"]],sep=".")
se[["Assigned"]] <- colSums(x)
if(file.exists(paste0(countfile,".summary"))){
x <- read.delim(paste0(countfile,".summary"),header=TRUE,row.names=1)
colnames(x) <- colnames(se)
se[["Unassigned"]] <- colSums(x[-1,colnames(se)])
}
se
}
se <- fc2se("./comb.counts")
saveRDS(se, file="amin.raw.SE.rds")
RSCRIPT
| true |
a1fac71e5d77f5c8444827ac43e2f3cbac33842c | Shell | Chewyfitz/Coding-Adventures | /OpenMP/simple/car_parallel.sh | UTF-8 | 648 | 4.1875 | 4 | [] | no_license | #! /bin/bash
usage() {
cat << EOF >&2
Usage: $0 [-v] filename
EOF
exit 1
}
verbose=false;
file=$1;
while getopts v:verbose o; do
case $o in
(v) verbose=true ; file=$2 ;;
(*) usage
esac
done
if [[ "$file" = *.c ]]
then
echo "Wrong argument format, should be 'filename' not 'filename.c'.";
usage;
fi
if [[ "$file" = *- ]]
then
echo "Wrong argument format.";
usage;
fi
if [[ ! -f "$file.c" ]]
then
echo "File $file.c not found.";
usage;
fi
if [ $verbose = true ]
then
echo "> cat $file.c"
cat $file.c
fi
echo "> gcc-11 -fopenmp $file.c -o $file";
gcc-11 -fopenmp $file.c -o $file
echo "> ./$file";
./$file;
| true |
5ebee384fb7a489e36883d2f4d64ecc8bf1789c7 | Shell | luoxz-ai/mmo-server | /server-code/bin/shell/cfg/convert_pb_all.sh | UTF-8 | 258 | 3.109375 | 3 | [] | no_license | #!/bin/bash
cd `dirname $0`
cd ../..
DIR_file=`pwd`
set -e
rm res/config/*.bytes -f
filelist=`ls ../../server-res/excel/*.xlsx`
for file in $filelist
do
filename=$(basename "$file")
echo "process: "$filename
shell/cfg/convert_pb.sh ${filename%\.*}
done
| true |
677004ff07ae3ab5adb26eb0fa587c0c07835b9d | Shell | attacker-codeninja/BugBounty-1 | /getjs_paths_words.sh | UTF-8 | 256 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ -z "$1" ]; then
echo " - Give the url to grab jspaths and create wordlist"
else
hakrawler -url $1 -js -depth 1 -plain > jspaths
for name in $(cat jspaths); do python3 /root/opt/getjswords.py $name | sort -u |tee -a jswordlist ;done
fi
| true |
daebad0fefba466cc70ab14df7e0852d578ad093 | Shell | espnet/espnet | /egs2/sms_wsj/enh1/local/create_database.sh | UTF-8 | 6,583 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2020 Shanghai Jiao Tong University (Authors: Wangyou Zhang)
# Apache 2.0
set -e
set -u
set -o pipefail
nj=16
min_or_max=max
sample_rate=8k
num_spk=2
download_rir=true
write_all=true
. utils/parse_options.sh
. ./path.sh
if [[ "$min_or_max" != "max" ]]; then
echo "Error: min_or_max must be max: ${min_or_max}"
exit 1
fi
if [[ "$sample_rate" == "16k" ]]; then
sample_rate2=16000
echo "Warning: sample_rate=16k is not officially supported yet."
exit 1
elif [[ "$sample_rate" == "8k" ]]; then
sample_rate2=8000
else
echo "Error: sample rate must be either 16k or 8k: ${sample_rate}"
exit 1
fi
if [[ $num_spk != [2-4] ]]; then
echo "Error: number of speakers must be 2, 3, or 4: ${num_spk}"
exit 1
fi
if [[ "$download_rir" != "true" ]] && [[ "$download_rir" != "false" ]]; then
echo "Error: download_rir must be either true or false: ${download_rir}"
exit 1
fi
if [[ "$write_all" != "true" ]] && [[ "$write_all" != "false" ]]; then
echo "Error: write_all must be either true or false: ${write_all}"
exit 1
fi
if [ $# -ne 4 ]; then
echo "Usage: $0 <wsj0-path> <wsj1-path> <wsj-zeromean-wav> <sms-wsj-wav>"
echo " where <wsj0-path> is the original wsj0 path,"
echo " <wsj1-path> is the original wsj1 path,"
echo " <wsj-zeromean-wav> is path to store the zero-mean normalized wsj."
echo " <sms-wsj-wav> is path to store the generated sms-wsj."
echo "[Optional]"
echo " --nj <nj> # number of parallel jobs (Default=${nj})"
echo " --min-or-max <min_or_max> # min or max length for generating mixtures (Default=${min_or_max})"
echo " --sample-rate <sample_rate> # sample rate (Default=${sample_rate})"
echo " --num-spk <num_spk> # number of speakers (Default=${num_spk})"
echo " --download-rir <download_rir> # whether to download or simulate RIRs (Default=${download_rir})"
echo " --write-all <download_rir> # whether to store all intermediate audio data (Default=${write_all})"
echo "Note: this script won't actually re-download things if called twice,"
echo "because we use the --continue flag to 'wget'."
exit 1;
fi
wsj0_path=$1
wsj1_path=$2
wsj_zeromean_wav=$3
sms_wsj_wav=$4
json_dir=${sms_wsj_wav}
rir_dir=${sms_wsj_wav}/rirs
sph2pipe=${KALDI_ROOT}/tools/sph2pipe_v2.5/sph2pipe
if ! command -v "${sph2pipe}" &> /dev/null; then
echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
exit 1;
fi
if ! command -v "mpiexec" &> /dev/null; then
echo "Could not find (or execute) the mpiexec program";
exit 1;
fi
if ! command -v "sox" &> /dev/null; then
echo "Could not find (or execute) the sox program";
exit 1;
fi
# This takes about 15 minutes with nj=16.
if [[ ! -d ${wsj_zeromean_wav} ]]; then
echo "Creating zero-mean normalized wsj at '${wsj_zeromean_wav}'."
mpiexec -np ${nj} python -m sms_wsj.database.wsj.write_wav \
with dst_dir=${wsj_zeromean_wav} wsj0_root=${wsj0_path} \
wsj1_root=${wsj1_path} sample_rate=${sample_rate2}
fi
mkdir -p ${json_dir}
if [[ ! -f ${json_dir}/wsj_${sample_rate}_zeromean.json ]]; then
echo "Creating ${json_dir}/wsj_${sample_rate}_zeromean.json"
python -m sms_wsj.database.wsj.create_json \
with json_path=${json_dir}/wsj_${sample_rate}_zeromean.json \
database_dir=${wsj_zeromean_wav} as_wav=True
fi
if [[ ! -d ${rir_dir} ]]; then
if ${download_rir}; then
mkdir -p ${rir_dir}
echo "Downloading RIRs (50.8 GB) in '${rir_dir}'"
# wget -qO- https://zenodo.org/record/3517889/files/sms_wsj.tar.gz.parta{a,b,c,d,e} \
# | tar -C ${rir_dir}/ -zx --checkpoint=10000 --checkpoint-action=echo="%u/5530000 %c"
## In case of instable network connection, please use the following command:
temp_dir=$(mktemp -d data/temp.XXX) || exit 1
for url in https://zenodo.org/record/3517889/files/sms_wsj.tar.gz.parta{a,b,c,d,e}; do
wget --continue -O "${temp_dir}/$(basename ${url})" ${url}
done
cat ${temp_dir}/sms_wsj.tar.gz.parta* | \
tar -C ${rir_dir}/ -zx --checkpoint=10000 --checkpoint-action=echo="%u/5530000 %c"
rm -rf "${temp_dir}"
else
echo "Simulating RIRs in '${rir_dir}'"
# This takes around 1900 / (ncpus - 1) hours.
mpiexec -np ${nj} python -m sms_wsj.database.create_rirs \
with database_path=${rir_dir}
fi
fi
if [[ ! -f ${json_dir}/intermediate_sms_wsj.json ]]; then
echo "Creating ${json_dir}/intermediate_sms_wsj.json"
python -m sms_wsj.database.create_intermediate_json \
with json_path=${json_dir}/intermediate_sms_wsj.json rir_dir=${rir_dir} \
wsj_json_path=${json_dir}/wsj_${sample_rate}_zeromean.json debug=False num_speakers=${num_spk}
fi
# This takes about 25 minutes with the default configuration.
# NOTE (Wangyou): If you try to rerun this part, please make sure the directories under
# ${sms_wsj_wav}/ are deleted in advance.
echo "Creating ${sms_wsj_wav} audio data in '${sms_wsj_wav}'"
mpiexec -np ${nj} python -m sms_wsj.database.write_files \
with dst_dir=${sms_wsj_wav} json_path=${json_dir}/intermediate_sms_wsj.json \
write_all=True debug=False
if [[ ! -f ${json_dir}/sms_wsj.json ]]; then
echo "Creating ${json_dir}/sms_wsj.json"
python -m sms_wsj.database.create_json_for_written_files \
with db_dir=${sms_wsj_wav} intermed_json_path=${json_dir}/intermediate_sms_wsj.json \
write_all=True json_path=${json_dir}/sms_wsj.json debug=False
fi
# The total disk usage of SMS-WSJ is 442.1 GiB + 240.2 GiB = 682.3 GiB.
# This number may be larger than the officially reported one, because we write
# all intermediate files (see [additional data] below) to the disk.
# --------------------------------------------------------------------------------
# directory/file disk usage #channels #samples
# --------------------------------------------------------------------------------
# tail 120.1 GiB 6 35875 * 2 (only when write_all=True)
# early 120.1 GiB 6 35875 * 2 (only when write_all=True)
# observation 60.0 GiB 6 35875
# noise 60.0 GiB 6 35875
# --------------------------- [additional data] ----------------------------------
# source_signal 120.1 GiB 6 35875 * 2
# reverb_source 120.1 GiB 6 35875 * 2
# --------------------------------------------------------------------------------
# rirs 52.6 GiB 6 143500=(33561+982+1332)*4 (up to 4 srcs)
# wsj_8k_zeromean 29.2 GiB 1 131824
# --------------------------------------------------------------------------------
| true |
f260b374b11dc892bc92c7f7c99c39548643ecbf | Shell | u-n-i-v-e-r-z/single_Cell01 | /exp/qualilty_analysis/bin/trimGalore2FastQC.sh | UTF-8 | 2,523 | 3.640625 | 4 | [] | no_license | # Single Cell RNA seq project
# Cuvier's Team
# Schaak - Heurteau
# 2017
# Apply trimGalore and Fastqc on new trimmed sequences recursively on all directories under the current one
#!/bin/bash
# Tools path
trimG="/usr/local/bioinfo/src/Trim_Galore/trim_galore_v0.4.0/trim_galore"
# GET OPTION
while getopts "f:g:o:l:s:h" option
do
case $option in
f)
fastq_path=$OPTARG
;;
g)
out_path=$OPTARG
;;
o)
out_trim_path=$OPTARG
;;
l)
qarray_out_path=$OPTARG
;;
s)
sub_path=${OPTARG%%.*}
;;
\?)
echo "-f emplacement of fastq files eg:/myfastq/directory"
echo "-o path to output trimmed reads (created by the script if doesn't exists) eg:/my/path_to_trimmed"
echo "-g path to graphics fastqc output (created by the script if doesn't exists) eg:/my/path_to_fastqc_graph"
echo "-l log directory where error and output of qarray will be written (created by the script if doesn't exists) eg: /my/log/directory"
echo "-s where the script that will be launched by qarray should be written eg: /my/out/directory"
echo "-h Display this help"
exit 1
;;
h)
echo "-f emplacement of fastq files eg:/myfastq/directory"
echo "-o path to output trimmed reads (created by the script if doesn't exists) eg:/my/path_to_trimmed"
echo "-g path to graphics fastqc output (created by the script if doesn't exists) eg:/my/path_to_fastqc_graph"
echo "-l log directory where error and output of qarray will be written (created by the script if doesn't exists) eg: /my/log/directory"
echo "-s where the script that will be launched by qarray should be written eg: /my/out/directory"
echo "-h Display this help"
;;
esac
done
# Qarray Errors
mkdir -p ${qarray_out_path}/{out_`date +%F_%H-%M`,err_`date +%F_%H-%M`}
mkdir -p ${out_path}
mkdir -p ${out_trim_path}
cd ${fastq_path}
if [ -s ${out_path}/trace.cmd ];then rm ${out_path}/trace.cmd;fi
for r1 in `ls *R1*`
do
replaceBy="R2"
r2="${r1/R1/$replaceBy}"
script_name=$0
sub_dir=${sub_path}/${script_name%.*}_sub.sh
echo ${trimG} -q 20 -stringency 5 --fastqc_args \"--nogroup --outdir ${out_path}\" -o ${out_trim_path} --paired ${fastq_path}/${r1} ${fastq_path}/${r2} >> ${out_path}/trace.cmd
echo ${trimG} -q 20 -stringency 5 --fastqc_args \"--nogroup --outdir ${out_path}\" -o ${out_trim_path} --paired ${fastq_path}/${r1} ${fastq_path}/${r2} >> ${sub_dir}
done
cd ../
| true |
351479f959fc65d459d6b7148422d4a9c548d8f3 | Shell | prplfoundation/prplMesh-old | /tests/start_interfaces | UTF-8 | 3,750 | 3.140625 | 3 | [
"BSD-2-Clause-Patent"
] | permissive | #! /bin/bash
# prplMesh Wi-Fi Multi-AP
#
# Copyright (c) 2018, prpl Foundation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Subject to the terms and conditions of this license, each copyright
# holder and contributor hereby grants to those receiving rights under
# this license a perpetual, worldwide, non-exclusive, no-charge,
# royalty-free, irrevocable (except for failure to satisfy the
# conditions of this license) patent license to make, have made, use,
# offer to sell, sell, import, and otherwise transfer this software,
# where such license applies only to those patent claims, already
# acquired or hereafter acquired, licensable by such copyright holder or
# contributor that are necessarily infringed by:
#
# (a) their Contribution(s) (the licensed copyrights of copyright holders
# and non-copyrightable additions of contributors, in source or binary
# form) alone; or
#
# (b) combination of their Contribution(s) with the work of authorship to
# which such Contribution(s) was added by such copyright holder or
# contributor, if, at the time the Contribution is added, such addition
# causes such combination to be necessarily infringed. The patent
# license shall not apply to any other combinations which include the
# Contribution.
#
# Except as expressly stated above, no rights or licenses from any
# copyright holder or contributor is granted under this license, whether
# expressly, by implication, estoppel or otherwise.
#
# DISCLAIMER
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# Make sure we are root
test $(id -u) = 0 || exec sudo "$0" "$@"
al_entity_exec="$1"; shift
test -x "${al_entity_exec}" || {
echo "$0: $al_entity_exec not executable" 1>&2
exit 1
}
stop_interfaces() {
# If there is a background job, kill it
kill -9 %1 2>/dev/null || true
wait %1 2>/dev/null # Suppress "Terminated" output
for num in 0 1 2 3; do
ip link delete aletest$num
done
}
trap stop_interfaces EXIT
interfaces=""
for num in 0 1 2 3; do
ip link add aletest$num type veth peer name aletestpeer$num
ip link set dev aletest$num up address 00:ee:ff:33:44:${num}0
ip link set dev aletestpeer$num up address 00:ee:ff:33:44:${num}1
interfaces="${interfaces}${interfaces:+,}aletest${num}:simulated:aletest${num}.sim"
done
# Generate 8MB core files
ulimit -c 8000
"$al_entity_exec" -m 02:ee:ff:33:44:00 -i "$interfaces" -r aletest2 -v -v &
"$@" || exit $?
# Test that background job is still running.
jobs %1 >/dev/null 2>&1 || exit 1
| true |
21f68492452fd30bea2530926a2557f409bfd8b7 | Shell | irinadeeva/filler | /resources/ex.sh | UTF-8 | 179 | 2.6875 | 3 | [] | no_license | #!/bin/bash
max=10
c=0
rm traceresult
for i in `seq 2 $max`
do
./filler_vm -f $1 -p1 $2 -p2 $3 -f > result
cat filler.trace >> traceresult
done
cat traceresult | grep "won"
| true |
cd7b9e1c55b6fba5c8200f3515a7fb7132cb378f | Shell | ignatisD/bash-aliases | /.bash_aliases | UTF-8 | 4,934 | 3.734375 | 4 | [] | no_license | alias untar="tar -zxvf"
alias targz="tar cvzf"
alias clipfile="xclip -sel clip"
alias clipout="xclip -o -sel clip"
alias myip=GetMyIP
alias myinter=GetMyInterface
alias postmanupdate=UpdatePostman
alias gitstats="git diff --shortstat"
alias memoryusage=memoryUsage
alias remoteupload=RemoteUpload
alias remotedownload=RemoteDownload
alias tsnpm=InstallNpmWithTypes
alias match=PerlMatch
alias aliasedit=EditMyAliases
alias sshedit="gedit ~/.ssh/config"
function dui()
{
local FOLDER="."
if [[ ! -z "${1}" ]] && [[ -d "${1}" ]]; then
FOLDER="${1}"
fi
local DEPTH="1"
if [[ ! -z "${2}" ]]; then
DEPTH="${2}"
fi
du -h -d "$DEPTH" "$FOLDER"
}
function EditMyAliases()
{
if [ "$1" != "" ] && [ -f ~/.bash_"$1" ]; then
gedit ~/.bash_"$1"
return;
fi;
if [ "$1" == "ssh" ]; then
gedit ~/.ssh/config
return;
fi;
gedit ~/.bash_aliases
}
function PerlMatch()
{
grep -oP "$1"
}
function InstallNpmWithTypes()
{
npm i --save "$1" && npm i --save-dev @types/"$1"
}
function RemoteDownload()
{
scp "$1":"$2" "$3"
}
function RemoteUpload()
{
scp -p "$1" "$2":"$3"
}
function memoryUsage()
{
ps -eo size,pid,user,command --sort -size | awk '{ hr=$1/1024 ; printf("%13.2f Mb ",hr) } { for ( x=4 ; x<=NF ; x++ ) { printf("%s ",$x) } print "" }' |cut -d "" -f2 | cut -d "-" -f1
}
function UpdatePostman()
{
wget https://dl.pstmn.io/download/latest/linux64 -O ~/Downloads/postman.tar.gz
sudo tar -xzf ~/Downloads/postman.tar.gz -C /opt
rm ~/Downloads/postman.tar.gz
}
function GetMyIP()
{
echo Local: $(ip route get 8.8.8.8 | awk '{print $(NF-2); exit}')
if [ "$1" == "" ]; then
echo Public: $(curl -s -w " - Ping: %{time_total} sec\n" http://whatismyip.akamai.com)
else
echo Public: $(curl -x "$1" -s -w " - Ping: %{time_total} sec\n" http://whatismyip.akamai.com)
fi;
}
function GetMyInterface()
{
ACTUAL_INTERFACE=$(ip route get 8.8.8.8 | grep -Po '(?<=(dev ))[a-zA-Z0-9]+(?= )')
echo -e "Interface: \e[38;5;196m${ACTUAL_INTERFACE:-NOT_FOUND}\e[0m"
}
function GetLocalDevices()
{
GetMyInterface
if [ "$EUID" == 0 ]; then
arp-scan --interface="${ACTUAL_INTERFACE}" --localnet
else
arp -i "${ACTUAL_INTERFACE}"
fi
}
function btc()
{
local OPTIND
usage() { echo "Usage: btc [-c USD] [-t <int>]" 1>&2; exit 1; }
PAIR=EUR
COINPAIR="BTC-EUR"
KRAKPAIRRES="XXBTZEUR"
KRAKPAIR="XBTEUR"
SLEEP_FOR=""
while getopts ":t::c::" o; do
case "${o}" in
t)
SLEEP_FOR=${OPTARG}
;;
c)
if [[ ! -z ${OPTARG} ]]; then
PAIR="${OPTARG:0:3}";
PAIR="${PAIR^^}";
if [[ $PAIR = "USD" ]]; then
KRAKPAIR="XBTUSD"
KRAKPAIRRES="XXBTZUSD"
COINPAIR="BTC-USD"
else
KRAKPAIR="${PAIR}XBT"
KRAKPAIRRES="X${PAIR}XXBT"
COINPAIR="${PAIR}-BTC"
fi;
fi;
;;
*)
usage
return;
;;
esac
done
shift $(( OPTIND - 1 ))
while true; do
echo "-------------------------------------------------"
KRAKNODE="parseFloat(JSON.parse(require('fs').readFileSync('/dev/stdin').toString()).result.${KRAKPAIRRES}.c[0])"
COINNODE="parseFloat(JSON.parse(require('fs').readFileSync('/dev/stdin').toString()).price)"
KRAKENTO=""
COINBASETO=""
KRAKEN=$(curl -s -k -X GET "https://api.kraken.com/0/public/Ticker?pair=${KRAKPAIR}" | node -pe "${KRAKNODE}")
COINBASE=$(curl -s -k -X GET "https://api.pro.coinbase.com/products/${COINPAIR}/ticker" | node -pe "${COINNODE}")
KRAK=$(node -pe "((parseFloat(${KRAKEN}) - parseFloat('${PREVKRAKEN}' || ${KRAKEN}))*100/parseFloat(${KRAKEN})).toFixed(2)")
COIN=$(node -pe "((parseFloat(${COINBASE}) - parseFloat('${PREVCOINBASE}' || ${COINBASE}))*100/parseFloat(${COINBASE})).toFixed(2)")
if [[ "${PREVKRAKEN}" > "${KRAKEN}" ]]; then
KRAKENTO="\033[31m${KRAK}%"
else
KRAKENTO="\033[32m+${KRAK}%"
fi;
if [[ "${PREVCOINBASE}" > "${COINBASE}" ]]; then
COINBASETO="\033[31m${COIN}%"
else
COINBASETO="\033[32m+${COIN}%"
fi;
echo -e "- \033[94mCoinbase : 1 BTC = ${COINBASE} ${PAIR} ${COINBASETO}\033[0m \t"
echo -e "- \033[95mKraken : 1 BTC = ${KRAKEN} ${PAIR} ${KRAKENTO}\033[0m \t"
echo "-------------------------------------------------"
if [[ -z "${PREVKRAKEN}" ]]; then
PREVKRAKEN=${KRAKEN}
PREVCOINBASE=${COINBASE}
fi;
if [ -z "${SLEEP_FOR}" ]; then
return;
fi;
sleep "${SLEEP_FOR}"
echo -e "\033[5A"
done;
}
| true |
2282fa4a7e85da751382601370efae06370a5f5e | Shell | kalpishs/IRE--Medical_NER_Twitter | /src/codes/bash_5gram.sh | UTF-8 | 1,649 | 2.84375 | 3 | [] | no_license | export java_home=/home/chinmay/Workstation/IRE/Major/mallet
export path=/home/chinmay/Workstation/IRE/Major/public_mm/bin/
rm ../training_files/training_file_5gram || ls ../training_files/*
rm ../testing_files/gold_testing_5gram || ls ../testing_files/*
train="../../data/training_Annotations/*"
testi="../../data/testing_Annotations/*"
#------making training feature files-----#
for folder in $train
do
for files in $folder/*.txt
do
echo $files
`echo "bash $path/testapi.sh --input $files --output out"`
python training_features_5gram.py $files ../training_files/training_file_5gram
done;
done;
#---------making testing feature files-------#
for folder in $testi
do
for files in $folder/*.txt
do
echo $files
`echo "bash $path/testapi.sh --input $files --output out"`
python training_features_5gram.py $files ../testing_files/gold_testing_5gram
done;
done;
#----------train model-------#
`echo "java -cp $java_home/class:$java_home/lib/mallet-deps.jar cc.mallet.fst.SimpleTagger --train true --model-file ../models/trained_model_5gram ../training_files/training_file_5gram"`
#---testing----#
cut -d' ' -f1,2,3,4,5,6,7,8,9,10 ../testing_files/gold_testing_5gram > ../testing_files/testing_file_5gram
cut -d' ' -f11 ../testing_files/gold_testing_5gram > ../testing_files/gold_tags_5gram
`echo $(java -cp $java_home/class:$java_home/lib/mallet-deps.jar cc.mallet.fst.SimpleTagger --model-file ../models/trained_model_5gram ../testing_files/testing_file_5gram) > ../system_result/system_tags_5gram`
#----accuracy---#
python accuracy.py ../testing_files/gold_tags_5gram ../system_result/system_tags_5gram
| true |
6e483567fe246c8636a0c5cc17df7037a5f23906 | Shell | voc/cm | /bundlewrap/bundles/voctocore/files/streaming-sink.sh | UTF-8 | 4,047 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
### Filter Parameter
## PA
# parameter dynaudnorm
para_pa_leveler="p=0.35:r=1:f=300"
## TRANSLATOR
# parameter compand as gate
para_trans_gate="attacks=0:points=-80/-115|-55.1/-80|-55/-55|20/20"
# parameter compand as limiter
para_trans_limiter="attacks=0:points=-80/-80|-12/-12|20/-12"
# parameter dynaudnorm
para_trans_leveler="p=0.35:r=1:f=300"
## MIX
# parameter volume for both tracks
para_mix_vol_pa="0.2"
para_mix_vol_trans="1.0"
# parameter dynaudnorm
para_mix_leveler="p=0.35:r=1:f=30"
# paramter loudnorm
para_mix_loudnorm="i=-23.0:lra=12.0:tp=-3.0"
if [[ -z "$VOC_STREAMING_AUTH" ]]
then
voc2alert "error" "streaming" "failed to start stream, credentials missing!"
exit 1
fi
ffmpeg -y -nostdin -hide_banner -re \
% if vaapi_enabled:
-init_hw_device vaapi=streaming:/dev/dri/renderD128 \
-hwaccel vaapi \
-hwaccel_output_format vaapi \
-hwaccel_device streaming \
% endif
-thread_queue_size 512 -i tcp://localhost:15000?timeout=3000000 \
% if parallel_slide_streaming and slides_port:
-thread_queue_size 512 -i tcp://localhost:15001?timeout=3000000 \
% endif
% if vaapi_enabled:
-filter_hw_device streaming \
% endif
-filter_complex "
[0:v] hqdn3d\
% if vaapi_enabled:
, format=nv12,hwupload \
% endif
[hd];\
% if parallel_slide_streaming:
[1:v] fps=5, hqdn3d\
% if vaapi_enabled:
, format=nv12,hwupload,scale_vaapi=w=1024:h=576\
% else:
, scale=1024:576\
% endif
[slides];\
% endif
\
[0:a]pan=stereo|c0=c0|c1=c1[s_pgm];\
[0:a]pan=stereo|c0=c2|c1=c2[s_trans_1];\
[0:a]pan=stereo|c0=c3|c1=c3[s_trans_2];\
\
[s_pgm] asplit=2 [pgm_1] [pgm_2] ;\
% if dynaudnorm:
[pgm_2] dynaudnorm=$para_pa_leveler [pgm_le] ;\
% endif
\
[s_trans_1] compand=$para_trans_gate [trans_gate_1] ;\
[trans_gate_1] compand=$para_trans_limiter [trans_lim_1] ;\
[trans_lim_1] dynaudnorm=$para_trans_leveler [trans_lev_1] ;\
\
[s_trans_2 ] compand=$para_trans_gate [trans_gate_2] ;\
[trans_gate_2] compand=$para_trans_limiter [trans_lim_2] ;\
[trans_lim_2] dynaudnorm=$para_trans_leveler [trans_lev_2] ;\
\
% if dynaudnorm:
[pgm_lev] volume=$para_mix_vol_pa,asplit [mix_int_1][mix_int_2] ;\
% else:
[pgm_2] volume=$para_mix_vol_pa,asplit [mix_int_1][mix_int_2] ;\
% endif
[trans_lev_1] volume=$para_mix_vol_trans [mix_trans_1] ;\
[trans_lev_2] volume=$para_mix_vol_trans [mix_trans_2] ;\
[mix_int_1][mix_trans_1] amix=inputs=2:duration=longest [mix_out_1] ;\
[mix_int_2][mix_trans_2] amix=inputs=2:duration=longest [mix_out_2] \
% if dynaudnorm:
;\
[pgm_1] dynaudnorm=$para_mix_leveler,loudnorm=$para_mix_loudnorm [pgm]; \
[mix_out_1] dynaudnorm=$para_mix_leveler,loudnorm=$para_mix_loudnorm [duck_out_1]; \
[mix_out_2] dynaudnorm=$para_mix_leveler,loudnorm=$para_mix_loudnorm [duck_out_2] \
% endif
" \
% if vaapi_enabled:
-c:v h264_vaapi \
% else:
-c:v libx264 \
% endif
-flags +cgop -aspect 16:9 \
% if parallel_slide_streaming:
-g:v:1 15 -crf:v:1 25 -maxrate:v:1 100k -bufsize:v:1 750k \
% endif
-r:v:0 25 -g:v:0 75 -crf:v:0 21 -maxrate:v:0 4M -bufsize:v:0 18M \
-c:a aac -b:a 192k -ar 48000 \
-map "[hd]" \
% if parallel_slide_streaming:
-map "[slides]" \
% endif
-metadata:s:v:0 title="HD" \
% if parallel_slide_streaming:
-metadata:s:v:1 title="Slides" \
% endif
% if dynaudnorm:
-map "[pgm]" -metadata:s:a:0 title="native" \
-map "[duck_out_1]" -metadata:s:a:1 title="translated" \
-map "[duck_out_2]" -metadata:s:a:2 title="translated-2" \
% else:
-map "[pgm_1]" -metadata:s:a:0 title="native" \
-map "[mix_out_1]" -metadata:s:a:1 title="translated" \
-map "[mix_out_2]" -metadata:s:a:2 title="translated-2" \
% endif
\
% if srt_publish:
-f mpegts \
"srt://ingest.c3voc.de:1337?streamid=publish/${endpoint}/$VOC_STREAMING_AUTH"
% else:
-f matroska \
-password "$VOC_STREAMING_AUTH" \
-content_type video/webm \
"icecast://live.ber.c3voc.de:7999/${endpoint}"
% endif
| true |
c1b2d4dde445c8994e8dafaa268b28fdf6f8828b | Shell | amarts/GlusterFS-Regression | /8/regr/testcase | UTF-8 | 628 | 3.21875 | 3 | [] | no_license | #!/bin/bash
description="# Incorrect PID printed in the glusterfs log header"
comments="# The log header prints the pre-daemon pid which does not match the pid seen if we do a ps."
source ../../init
start_glusterfs --no-servers --no-clients
sleep 2
$($GLUSTERFS --run-id regr.c.$BUGID -f $SPECDIR/server1.vol -l $LOGDIR/$(hostname)-server1.log --pid-file=$PWD/server1.pid)
if [ $(cat server1.pid) == $(cat $LOGDIR/$(hostname)-server1.log | grep PID | cut -d":" -f2) ] ; then
ok $description
comment $comments
else
not_ok $description
comment $comments
fi
cleanup_glusterfs
rm *.pid >/dev/null
| true |
0d5360b96f0e8f50f5d41fa8fa10a79098fba431 | Shell | AliOC/blog | /tools/run-examples.sh | UTF-8 | 2,044 | 3.859375 | 4 | [] | no_license | #!/bin/bash
# Runs all examples and produces the following files in the testing/ dir.
#
# status.txt
# - A CSV of all Blog Examples alongside their status
#
# successExamples.txt
# - A list of all BLOG examples that run successfully
#
# errorExamples.txt
# - A list of all BLOG examples that throw exceptions
#
# errors/*
# - A list of files that threw a Java Exception (non-empty Standard Error)
# - Each file as its content contains the Stack Trace
mkdir -p testing
# A list of all BLOG examples that throw exceptions
errorFiles='testing/errorExamples.txt'
echo "Error Examples" > $errorFiles
# A list of all BLOG examples that run successfully
successFiles='testing/successExamples.txt'
echo "Successful Examples" > $successFiles
# A list of all BLOG examples that thre exceptions alongside with their stack trace
fileErrors='testing/errors.txt'
echo "" > $fileErrors
# A list of whether the BLOG example runs correctly or not
statusFiles='testing/status.csv'
echo "FileName,Status" > $statusFiles
for f in $(find ../example -name '*.blog'); do
echo "Running $f"
../blog -n 100 $f 2> testing/errors > testing/output
errors=`cat testing/errors | wc -l`
if [ "$errors" == "0" ]; then
echo "$f,Pass" >> $statusFiles
echo "$f" >> $successFiles
else
echo "$f,Fail" >> $statusFiles
echo "$f" >> $errorFiles
echo "$f" >> $fileErrors
echo $(cat testing/errors) >> $fileErrors
echo "" >> $fileErrors
fi
done
rm testing/errors testing/output
for f in $(find ../example -name '*.dblog'); do
echo "Running $f"
../dblog -n 100 $f 2> testing/errors > testing/output
errors=`cat testing/errors | wc -l`
if [ "$errors" == "0" ]; then
echo "$f,Pass" >> $statusFiles
echo "$f" >> $successFiles
else
echo "$f,Fail" >> $statusFiles
echo "$f" >> $errorFiles
echo "$f" >> $fileErrors
echo $(cat testing/errors) >> $fileErrors
echo "" >> $fileErrors
fi
done
rm testing/errors testing/output
| true |
aeaa6f939838719f24bfafd52b05b5360fe90f41 | Shell | marianabsctba/automatization | /ciscotelnetscript | UTF-8 | 475 | 2.96875 | 3 | [] | no_license | #!/bin/bash
set -x
USER=""
PASSWD=""
ENABLE=""
LOG="telnet.log"
HOSTS="
# 10.144.33.196
172.11.11.11
172.22.22.22
"
for H in $HOSTS
do
echo START SCRIPT: >> $LOG
date +%x-%R >> $LOG
(
sleep 1;
echo -en "$USER\r";
sleep 2;
echo -en "$PASSWD\r";
sleep 2;
echo -en "enable\r";
sleep 2;
echo -en "$ENABLE\r";
sleep 2;
echo -en "show version | include BOOT\r";
sleep 2;
echo -en "exit\r"
sleep 1;
) | telnet $H >> $LOG
echo =================================== >> $LOG
done
| true |
91ebdd5f9703c09eab670403c340b866bf93d681 | Shell | idanbak/git-tips | /.profile | UTF-8 | 2,073 | 3.4375 | 3 | [] | no_license | # ========================================
# General Aliases
# ========================================
alias groovy='/d/groovy-2.5.3/bin/groovy'
alias cdg='cd /d/git-clones'
# ========================================
# Git Clone Aliases
# ========================================
# clone repository and directly cd into it
gclone() {
git clone "$1" && cd "$(basename $1 .git)"
}
alias gcd=gclone
alias go='git clone'
# ========================================
# Main Git Aliases
# ========================================
alias gs='git status'
alias gss='git status -sb'
alias gl='git pull'
alias gp='git push'
alias gf='git fetch'
alias ga='git add '
alias gaa='git add .'
alias gcmsg='git commit -m '
alias gco='git checkout '
alias gcb='git checkout -b'
alias gclear='git checkout -- '
alias gh='git checkout -'
alias gr='git reset HEAD '
alias gb='git branch'
alias gblist='git branch -a'
alias gbshow='git branch -vv'
alias gm='git merge'
alias gd='git diff'
alias gda='git diff HEAD'
alias gdc='git diff --cached'
alias gst='git stash'
alias gstp='git stash pop'
alias gstl='git stash list'
alias gconfig='git config --global'
alias gpsup='git push --set-upstream origin $(git rev-parse --abbrev-ref HEAD)'
# ========================================
# Other Aliases
# ========================================
alias gwhat='git whatchanged'
alias g1="git whatchanged --since='1 day ago'"
alias g7="git whatchanged --since='7 days ago'"
alias glg='git log'
alias glo='git log --oneline --decorate --color'
alias glola="git log --graph --pretty='%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --all"
alias gr='git grep -l '
alias gra="git rev-list --all | xargs git grep "
alias ggrep="git log -p | grep "
alias gk='gitk --all --branches'
# To find which commits and which files a string was added or removed in:
# git log -S'search string' --oneline --name-status
# To see the diff of that
# git log -S'search string' -p | grep 'search string' -C5
| true |
7d4439b498bac572d40b8a8f04234994d6a1e14d | Shell | metalglove/dotfiles | /i3status/update_checker.sh | UTF-8 | 451 | 3.9375 | 4 | [] | no_license | # update_checker.sh
function check_updates()
{
# count how many updates we have got
ups=`/usr/lib/update-notifier/apt-check --human-readable | head -1 | awk '{print $1;}'`
update_status=""
if [ "$ups" -eq "1" ]
then
update_status="There is 1 update"
elif [ "$ups" -gt "1" ]
then
update_status="There are $ups updates"
else
update_status="Up to date"
fi
}
| true |
580a809a0ec4bffc11ff8f6de292ef0595f073ea | Shell | pact-foundation/pact-ruby-standalone | /script/update-and-release.sh | UTF-8 | 154 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
./script/update.sh
if git log -1 | grep "feat(gems)"; then
./script/release.sh $1
else
echo "No gems updated, not releasing"
fi
| true |
60947b9c2d5ed3f4a1be1702e26cb0950f408cd9 | Shell | thexhr/config | /bin/firefox-default | UTF-8 | 182 | 2.6875 | 3 | [] | no_license | #!/bin/sh
AS=$HOME/.ssh/$(hostname).agent
if [ -f "$AS" ]; then
. $AS
fi
scp ~/.Xauthority firefox@127.0.0.1:
exec ssh firefox@127.0.0.1 -n "export DISPLAY=:0.0; TZ=UTC firefox"
| true |
b61084f6a92a973218f5031c30a850a8c6e73c24 | Shell | nix-chu/androidstudio_bash_plugin | /android-studio.plugin.bash | UTF-8 | 994 | 4 | 4 | [
"MIT"
] | permissive | # Android Studio
# Assumes PATH = C:\Program Files\Android\Android Studio\bin\studio64.exe
androidstudio () {
# Open Android Studio and desired project
if [ $# -gt 1 ]
then
echo "Too many arguments passed.
Type 'androidstudio' to open a project at the current directory or pass a project folder that exists in the current directory.
(eg. 'androidstudio MyApplication')"
elif [ -n "$1" ]
then
# Open project at specified directory
working_directory=$(pwd)
project_name=$working_directory"/"$1
cd "C:\Program Files\Android\Android Studio\bin"
start studio64.exe $project_name
cd $working_directory
echo "Opening project at "$1
else
# Open project at current directory
working_directory=$(pwd)
cd "C:\Program Files\Android\Android Studio\bin"
start studio64.exe $working_directory
cd $working_directory
echo "Opening project at current directory"
fi
}
| true |
56d2083ea46c3cfad0263179c77f91215fb3862b | Shell | chicas-lancaster/websource | /tools/testurls.sh | UTF-8 | 500 | 3.3125 | 3 | [] | no_license | #!/bin/sh
OUT=`tempfile`
OUTSTAT=`tempfile`
grep -o -i "http[s]*://[^\"<]*" content/projects/*.html | sort | uniq >>$OUT
grep -o -i "http[s]*://[^\"<]*" content/people/*.html | sort | uniq >>$OUT
grep -o -i "http[s]*://[^\"<]*" content/news/*.html | sort | uniq >>$OUT
# cat $OUT
while read LINE; do
PAGE=`echo $LINE | awk -F: '{print $1}'`
URL=`echo $LINE | sed 's/[^:]*://'`
echo $URL
curl -o /dev/null --silent --write-out "%{http_code} $URL\n" "$URL" >>$OUTSTAT
done < $OUT
| true |
06c46007581a1542f10d1412c669b47ae6ed4c98 | Shell | bu6hunt3r/nix-dotfiles | /users/bin/playartist | UTF-8 | 556 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env sh
. $HOME/.cache/wal/colors.sh
alias bemenu='bemenu --tf "$background" --fb "$background" --hf "$background" --nb "$background" --sb "$background" --scb "$background" --tb "$foreground" --ff "$foreground" --nf "$foreground" --hb "$foreground" --sf "$foreground" --scf "$foreground" --fn "sans 9.5" -b -i --scrollbar always'
artist=$(mpc -q update
mpc list artist |\
awk '!seen[$0]++' |\
bemenu -p 'choose an artist: ' -l 20)
[ -z "$artist" ] && exit
mpc -q clear
mpc search any "$artist" | mpc -q add
mpc -q random on
mpc -q play
| true |
57f53c776d3d2ca45439190152b537ce04eedf9f | Shell | vipsql/hadoop-deployer | /old/profile_hive.sh | UTF-8 | 596 | 3.46875 | 3 | [] | no_license | #!/bin/bash
BAPF="$HOME/.bash_profile"
HIPF="$HOME/.hive_profile"
if [ ! -e $BAPF ]; then
touch $BAPF;
fi
if ! grep -q "hive profile" $BAPF; then
echo "" >> $BAPF;
echo "#" >> $BAPF;
echo "# hive profile" >> $BAPF;
echo "#" >> $BAPF;
echo "if [ -f $HIPF ]; then" >> $BAPF;
echo " . $HIPF" >> $BAPF;
echo "fi" >> $BAPF;
echo "#END#" >> $BAPF;
fi
echo "# Hive profile
export HIVE_HOME=\$HOME/hive
export HIVE_BIN=\$HIVE_HOME/bin
export HIVE_CONF_DIR=\$HIVE_HOME/conf
export PATH=\$HIVE_BIN:\$PATH
alias cch='cd \$HIVE_HOME'
alias cchf='cd \$HIVE_CONF'
" > $HIPF
. $HIPF
| true |
4ebac98f542611ac972a694d14a0b062d87abbb6 | Shell | Jolin-blank/ansible-cluster | /script/install_cluster.sh | UTF-8 | 3,883 | 2.96875 | 3 | [] | no_license | #!/bin/bash
file_dir=/root/ansible-cluster/
set_jvmargs(){
if [ $memory == 4G ];then
sed -i '/runbroker.sh/s/-Xms4g -Xmx4g -Xmn2g/-Xms2g -Xmx2g -Xmn1g/' $file_dir/yaml/install_rocketmq.yml
else
echo "make sure your dest machine memory >=8g"
fi
}
judge_run(){
if [ $mode == "root" ];then
ansible-playbook -i $file_dir/$2.host $file_dir/yaml/$1 -k
elif [ $mode == "prtg" ];then
ansible-playbook -i $file_dir/$2.host $file_dir/yaml/$1 -u prtg -k -b --become-method=su -K
else
ansible-playbook -i $file_dir/$2.host $file_dir/yaml/$1 -u prtg -b --become-method=su -K
fi
}
gen_password(){
python $file_dir/files/pwd
}
usage(){
echo "Please enter as prompted"
}
install_rocketmq()
{
read -p 'please input rocketmq-cluster ip : ' rocketmq_ip1
read -p 'please input rocketmq-cluster ip : ' rocketmq_ip2
read -p 'please input rocketmq-cluster ip : ' rocketmq_ip3
cat <<EOF > $file_dir/rocketmq.host
[rocketmq-cluster]
$rocketmq_ip1
$rocketmq_ip2
$rocketmq_ip3
EOF
cat <<EOF > $file_dir/vars/rocketmq_var.yml
ROCKETMQIP1: $rocketmq_ip1
ROCKETMQIP2: $rocketmq_ip2
ROCKETMQIP3: $rocketmq_ip3
EOF
read -p 'please input rocketmq-console ip : ' rocketmq_console_ip
cat <<EOF >> $file_dir/rocketmq.host
[rocketmq-console]
$rocketmq_console_ip
EOF
read -p 'dest machine memory : ' memory
set_jvmargs
judge_run $1 rocketmq
}
install_3redis()
{
read -p 'please input redis-cluster ip : ' redis_ip1
read -p 'please input redis-cluster ip : ' redis_ip2
read -p 'please input redis-cluster ip : ' redis_ip3
cat <<EOF > $file_dir/3redis.host
[redis-cluster]
$redis_ip1
$redis_ip2
$redis_ip3
EOF
cat <<EOF > $file_dir/vars/redis3_var.yml
REDIS1: $redis_ip1
REDIS2: $redis_ip2
REDIS3: $redis_ip3
PORT1: 7000
PORT2: 7001
PASSWORD: $(gen_password)
EOF
read -p 'please input create redis ip :' createredis_ip
cat <<EOF >> $file_dir/3redis.host
[redis-create]
$createredis_ip
EOF
judge_run $1 3redis
}
install_redis(){
read -p 'please input redis-cluster ip : ' redis_ip1
read -p 'please input redis-cluster ip : ' redis_ip2
read -p 'please input redis-cluster ip : ' redis_ip3
read -p 'please input redis-cluster ip : ' redis_ip4
read -p 'please input redis-cluster ip : ' redis_ip5
read -p 'please input redis-cluster ip : ' redis_ip6
cat <<EOF > $file_dir/redis.host
[redis-cluster]
$redis_ip1
$redis_ip2
$redis_ip3
$redis_ip4
$redis_ip5
$redis_ip6
EOF
cat <<EOF > $file_dir/vars/redis_var.yml
REDIS1: $redis_ip1
REDIS2: $redis_ip2
REDIS3: $redis_ip3
REDIS4: $redis_ip4
REDIS5: $redis_ip5
REDIS6: $redis_ip6
PORT: 7000
PASSWORD: $(gen_password)
EOF
read -p 'please input create redis ip :' createredis_ip
cat <<EOF >> $file_dir/redis.host
[redis-create]
$createredis_ip
EOF
judge_run $1 redis
}
install_zookeeper(){
read -p 'please input zookeeper-cluster ip : ' zookeeper_ip1
read -p 'please input zookeeper-cluster ip : ' zookeeper_ip2
read -p 'please input zookeeper-cluster ip : ' zookeeper_ip3
cat <<EOF > $file_dir/zookeeper.host
[zookeeper-cluster]
$zookeeper_ip1
$zookeeper_ip2
$zookeeper_ip3
EOF
cat <<EOF > $file_dir/vars/zookeeper_var.yml
ZOOKEEPER_IP1: $zookeeper_ip1
ZOOKEEPER_IP2: $zookeeper_ip2
ZOOKEEPER_IP3: $zookeeper_ip3
EOF
judge_run $1 zookeeper
}
operate(){
if [ $state == "install" ];then
install_$1 install_$1.yml
elif [ $state == "remove" ];then
judge_run remove_$1.yml $1
else
usage
fi
}
read -p 'user mode : ' mode
read -p 'please input cluster you want to opreate(rocketmq|3redis|redis|zookeeper) : ' cluster
read -p 'install or remove : ' state
case $cluster in
zookeeper)
operate zookeeper
;;
3redis)
operate 3redis
;;
redis)
operate redis
;;
rocketmq)
operate rocketmq
;;
*)
usage
esac
sed -i '/runbroker.sh/s/-Xms2g -Xmx2g -Xmn1g/-Xms4g -Xmx4g -Xmn2g/' $file_dir/yaml/install_rocketmq.yml
| true |
f4e71dc9ce0a8c3edfbea8a43b070de878e5e2ab | Shell | zzzapzzz/pentest | /scripts/frida-gadgets-download.sh | UTF-8 | 683 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
urls="
https://build.frida.re/frida/mac/lib/FridaGadget.dylib
https://build.frida.re/frida/ios/lib/FridaGadget.dylib
https://build.frida.re/frida/android/arm/lib/frida-gadget.so
https://build.frida.re/frida/android/arm64/lib/frida-gadget.so
https://build.frida.re/frida/android/arm64/lib/frida-gadget.so
https://build.frida.re/frida/linux/i386/lib/frida-gadget.so
https://build.frida.re/frida/linux/x86_64/lib/frida-gadget.so
"
outdir="frida-gadgets"
test -d "${outdir}" || mkdir -p "${outdir}"
for url in $urls; do
out="${outdir}/$(echo -n ${url}|cut -f5- -d/)"
test -d "$(dirname "${out}")" || mkdir -p "${_}"
wget -k -L "${url}" -O "${out}"
done
| true |
fc405e17ca72f61ec51fef4abba146e66d22f6cc | Shell | PoojaAK-24/linux-prgorams | /assignment2/fiveb.sh | UTF-8 | 166 | 2.84375 | 3 | [] | no_license | #!/bin/bash -x
Length=60
Width=40
Area=$(( $Length * $Width ))
echo $area
meters=$(echo $area 0.3048 | awk '{print “%f”,$1 * $2}')
echo $meters “meters”
| true |
0ff708d17b3fd2ffdbcb08d7c5cb2f985119724a | Shell | cloud-init/ubuntu-sru | /bugs/lp-1634678/disk-setup | UTF-8 | 690 | 3.671875 | 4 | [] | no_license | #!/bin/sh
dev=/dev/vdb
part=1
label=mydata
msg() { echo "$@" 1>&2; }
fail() { echo "$@" 1>&2; exit 1; }
set -e
[ "$(id -u)" = "0" ] || fail "not root"
umount /mnt || :
msg "wiping $dev"
# 4 of them to wipe any old filesystem data on first part
out=$(dd if=/dev/zero of=$dev bs=1M count=4 2>&1) ||
fail "failed wiping $dev: $out"
msg "partitioning $dev"
out=$(echo "2048," | sudo sfdisk --label=gpt --force --unit=S "$dev" 2>&1) ||
fail "failed sfdisk $dev: $out"
if [ "$1" = "mkfs" ]; then
msg "filesystem label $label on $dev${part}"
out=$(mkfs.ext4 -F "${dev}${part}" -L "$label" 2>&1) ||
fail "failed mkfs.ext4: $out"
fi
sleep 1
blkid "$dev" "${dev}${part}"
| true |
c07744bc26fb13231d0517e39a747a46930c95ed | Shell | CopyPasteByteArtist/timecloud | /squashfs-root/etc/init.d/smbauth | UTF-8 | 630 | 3.109375 | 3 | [] | no_license | #!/bin/sh
. /lib/functions.sh
smb='0'
smbpwd=''
option_cb(){
case $1 in
smb)
smb='1'
;;
smbpwd)
smbpwd=$2
;;
esac
}
config_load autostart
if [ $smb = '1' ]; then
smbpasswd root $smbpwd
sed -i 's/null passwords = yes/null passwords = no/g' /tmp/etc/smb.conf
sed -i 's/security = share/security = user/g' /tmp/etc/smb.conf
sed -i 's/guest ok = yes/guest ok = no/g' /tmp/etc/smb.conf
else
smbpasswd -del root
sed -i 's/null passwords = no/null passwords = yes/g' /tmp/etc/smb.conf
sed -i 's/security = user/security = share/g' /tmp/etc/smb.conf
sed -i 's/guest ok = no/guest ok = yes/g' /tmp/etc/smb.conf
fi
| true |
688dc9ffaaeff714fa9d11b978563bfbf6ba41ef | Shell | LLNL/pLiner | /benchmarks/Varity/Varity-intel/reference/cmp.sh | UTF-8 | 209 | 2.96875 | 3 | [
"Apache-2.0",
"LLVM-exception",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
ftest=$1
input=`sed -n "2p" $ftest.c | cut -d "/" -f 3 | sed "s/,/ /g" `
O0exe="${ftest}_O0"
O3exe="${ftest}_O3"
echo "./$O0exe $input"
./$O0exe $input
echo "./$O3exe $input"
./$O3exe $input
| true |
e535bbeea02dbb5556aa07196ca1cb4a7658d06a | Shell | greencom-project/greencom-public | /Provisioning/RepositoryGateway/python/py-post-alive/post-alive.sh | UTF-8 | 310 | 2.578125 | 3 | [] | no_license | #!/bin/bash
# note: in future we may a better place to resolve the installation_id
# At the moment we get it from /home/pi/greencom_installation_id
inst_id=$(cat /home/pi/greencom_installation_id)
python /home/pi/greencom/python/py-post-alive/PyPostAlive.py $inst_id
date > /home/pi/.py-post-alive-last.log
| true |
40e1bc455eef2cd6ec8d6ef47c0944e5e62da3ee | Shell | emersonmx/stm | /docs/run_if_needed.sh | UTF-8 | 70 | 2.734375 | 3 | [] | no_license | #!/bin/bash
cmd="$1"
shift
[[ $# -le 0 ]] && exit
bash -c "$cmd $*"
| true |
f56613f21ed9810ed05cb75d3e4ad2f74b88e64f | Shell | dignifiedquire/blake2s_simd | /benches/bench_libsodium.sh | UTF-8 | 765 | 3.46875 | 3 | [
"MIT"
] | permissive | #! /usr/bin/bash
set -e -u -o pipefail
# To squeeze the absolute most out of this benchmark:
# - Download a source tarball for libsodium. Or if you're using the GitHub
# repo, make sure to check out the latest release tag. (origin/master is slower
# for some reason.)
# - Build libsodium with `./configure --enable-opt && make` to enable
# optimizations specific to the current machine.
# - Link against what you just built by removing `-lsodium` below and adding
# {libsodium}/src/libsodium/.libs/libsodium.a as an additional source file.
here="$(dirname "$BASH_SOURCE")"
target="$(mktemp --tmpdir="" bench_libsodium.XXXXX)"
set -v
gcc -Wall --pedantic -O3 -lsodium -o "$target" "$here/bench_libsodium.c"
set +v
echo "$target"
"$target"
rm "$target"
| true |
df4866ec7c1621b47d646771eed747b87590c8b2 | Shell | johnpoth/Dragonfly | /hack/build-supernode.sh | UTF-8 | 1,150 | 3.75 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SUPERNODE_BINARY_NAME="supernode.jar"
curDir=$(cd "$(dirname "$0")" && pwd)
cd "${curDir}" || return
SUPERNODE_SOURCE_HOME="${curDir}/../src/supernode"
SUPERNODE_BIN="${SUPERNODE_SOURCE_HOME}/target"
BUILD_SOURCE_HOME=$(cd ".." && pwd)
BIN_DIR="${BUILD_SOURCE_HOME}/bin"
USE_DOCKER=${USE_DOCKER:-"0"}
build-supernode-docker() {
cd "${SUPERNODE_SOURCE_HOME}" || return
mvn clean package -DskipTests docker:build -e
cp "${SUPERNODE_BIN}/supernode.jar" "${BIN_DIR}/${SUPERNODE_BINARY_NAME}"
}
build-supernode-local(){
cd "${SUPERNODE_SOURCE_HOME}" || return
mvn clean package -DskipTests
cp "${SUPERNODE_BIN}/supernode.jar" "${BIN_DIR}/${SUPERNODE_BINARY_NAME}"
}
create-dirs() {
test -e "$1"
mkdir -p "$1"
}
main() {
create-dirs "${BIN_DIR}"
case "${USE_DOCKER}" in
"1")
echo "Begin to build supernode with docker."
build-supernode-docker
;;
*)
echo "Begin to build supernode in the local environment."
build-supernode-local
;;
esac
echo "BUILD: supernode in ${BIN_DIR}/${SUPERNODE_BINARY_NAME}"
}
main "$@"
| true |
7fae3ee07b563f36bb881ad5a33c9e3b27a4a93f | Shell | epheo/kvm-cfg | /setup-kvm.sh | UTF-8 | 1,550 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# libvirt appears to be the cleanest abstraction of KVM/QEMU, Xen, LXC, and
# others
sudo apt-get install qemu-kvm libvirt-bin bridge-utils
curl -LO http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
sudo cp trusty-server-cloudimg-amd64-disk1.img /var/lib/libvirt/images/
# tell libvirt to re-scan for new files
virsh pool-refresh default
cat <<EOF | sudo tee -a /etc/network/interfaces
auto br0
iface br0 inet dhcp
bridge_ports eth0
EOF
sudo ifup br0
cat <<EOF > meta-data
instance-id: iid-local01;
local-hostname: ubuntu
EOF
cat <<EOF > user-data
#cloud-config
# upgrade packages on startup
package_upgrade: true
# install git
packages:
- git
password:ubuntu
ssh_pwauth: True
chpasswd: { expire: False }
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAkuglTT9z6rVe1OYQKmKs2wGszXlIfDL1i+dUSCPnD0umkR1PM+Qki7fDWw99YZeTYqWBZSTub0VH4AOOmfZR6ODzisa1siZ6yTEuJSE1AVuY4lC7uYtvRqy8Ez7SDRJgaJ3ZdsI2h+a0h0QsE4Y1vbVmH9TvLq7dQkDlm6GhOXM= rsa-key-20080613
EOF
genisoimage -output configuration.iso -volid cidata -joliet -rock user-data meta-data
sudo cp configuration.iso /var/lib/libvirt/images/
virsh pool-refresh default
virsh vol-clone --pool default trusty-server-cloudimg-amd64-disk1.img test.img
virt-install -r 1024 \
-n test \
--vcpus=1 \
--autostart \
--memballoon virtio \
--network bridge=br0 \
--boot hd \
--disk vol=default/test.img,format=qcow2,bus=virtio \
--disk vol=default/configuration.iso,bus=virtio
virsh list
virsh dumpxml test
| true |
b58d2783caf0f874be3051b2d0d38b2b6ce73eb3 | Shell | gurbthebruin/CS-35L | /assignment3/poornames | UTF-8 | 1,934 | 3.765625 | 4 | [] | no_license | #! /bin/bash
#Gurbir Arora
#105178554
recursive=0
dir=.
dir_rec=0
#------if statements and case statements---------
if [ $# -gt 2 ]; then
echo "Only up to two argument are allowed"
exit
fi
#wrong usage or wrong number of arguments?
if [ "$1" ]; then
case "$1" in
-r)
shift
# -r passed? (use shift command here)
recursive=1
dir_rec=1
;;
-?*)
echo 'Invalid Directory name'
exit 1
;;
*)
dir="$1"
;;
esac
fi
if [ "$1" ] && [ "$dir_rec" == 1 ]
then
dir="$1"
fi
if [ "$dir" ]; then
if [ ! -d "$dir" ]; then
echo 'Invalid Directory Name'
exit 1
elif [ -L "$dir" ]; then
echo 'Symbolic Link!'
exit 1
fi
fi
set -f
IFS='
'
#else -> run non-recursive block
if [ "$recursive" == 0 ]; then
filenames=$(find "$dir" -maxdepth 1 | \
grep -v -E '^(.*)\/([a-zA-Z_\n][a-zA-Z._-]{0,13})$|^\.\.$|^\.$')
filesort=$(find "$dir" -maxdepth 1 | sort -f | uniq -iD)
combo=$(echo "$filenames" && echo "$filesort")
combosort=$(echo "$combo" | sort -u)
#get all immediate filenames and print the ones that have invalid characters
for var in $combosort
do
if [ -d "$var" ]; then
if [ $var != $dir ]; then
echo "$var"/
fi
else
echo "$var"
fi
done
fi
#if recursion -> run recursion statement
if [ "$recursive" == 1 ]; then
filenames=$(find "$dir" | \
grep -v -E '^(.*)\/([a-zA-Z_][a-zA-Z._-]{0,13})$|^\.\.$|^\.$')
sorted=
for var2 in `find "$dir"`
do
if [ -d "$var2" ]; then
filesort=$(find "$var2" -maxdepth 1 | sort -f | uniq -iD)
if [ "$filesort" ]; then
sorted=$(echo "$sorted" && echo "$filesort")
fi
fi
done
combo=$(echo "$filenames" && echo "$sorted")
combosort=$(echo "$combo" | sort -u)
for var1 in $combosort
do
if [ -d "$var1" ]; then
if [ $var1 != $dir ]; then
echo "$var1"/
fi
else
echo "$var1"
fi
done
fi
IFS=''
| true |
155719765b4ab5cb827963b0264e6df65cd9d9d3 | Shell | wawltor/models | /PaddleCV/caffe2fluid/examples/imagenet/tools/cmp.sh | UTF-8 | 518 | 3.65625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
#function:
# a tool used to compare the results produced by paddle and caffe
#
if [[ $# -lt 2 ]];then
echo "usage:"
echo " bash $0 [model_name] [param_name] [caffe_name]"
exit 1
fi
model_name=$1
param_name=$2
paddle_file="./results/${model_name}.paddle/${param_name}.npy"
if [[ $# -eq 3 ]];then
caffe_file="./results/${model_name}.caffe/${3}.npy"
else
caffe_file="./results/${model_name}.caffe/${2}.npy"
fi
cmd="python ./compare.py $paddle_file $caffe_file"
echo $cmd
eval $cmd
| true |
32b11fb44d4d79cdfb4accf072da114b99b53d3b | Shell | magnetised/proletaneous | /templates/home/services/publish/finish | UTF-8 | 531 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# This is run when the inotify service terminates
# The exit_status is 0 if the service terminated normally
# This occurs when the file changes, if the service is being started or
# restarted then the codes are != 0
exit_code=$1
exit_status=$2
FILE=<%= @revision_file %>
if [ "$exit_status" = 0 ]; then
echo "Published revision $(cat $FILE) ... Restarting front server ..."
# Puma restarts when it recieves a SIGUSER2
# https://github.com/puma/puma#restart
/usr/bin/sv 2 <%= @home %>/service/enabled/front
fi
| true |
dbad75a94b9cbf2d4e6194a02b38593469c58b10 | Shell | kampde/ktools | /photo/geotagdir | UTF-8 | 809 | 4 | 4 | [] | no_license | #!/bin/bash
SCRIPT=`readlink -f "$0"`
GPSPHOTO=`dirname "$SCRIPT"`/gpsPhoto.pl
REMOVE_GPS=`dirname "$SCRIPT"`/borrar_datos_gps
TIMEOFFSET=0
if [ ! -x $GPSPHOTO ]
then
echo "Can't find gpsPhoto.pl or it is not executable" >&2
exit 1
fi
if [ ! -x $REMOVE_GPS ]
then
echo "Can't find the remove gps script or it is not executable" >&2
exit 1
fi
for dir
do
if [ ! -d "${dir}" ]
then
echo "$dir is not a directory" >&2
exit 1
fi
done
for dir
do
echo "Working on ${dir}..."
echo "Removing old GPS data on ${dir}..."
$REMOVE_GPS "${dir}" | while read LINE
do
echo " $LINE"
done
echo "geotagging ${dir}..."
$GPSPHOTO --timeoffset "${TIMEOFFSET}" --maxtimediff 600 --dir "${dir}" --gpsdir "${dir}" >> "${dir}/geotag.log" 2>> ${dir}/geotag.err
echo
echo "Folder ${dir} done"
echo
done
| true |
1fcbfd79b1b32e4e298aec0bfb8071822d738376 | Shell | tentacode/tamagotchi-batch | /scripts/display.sh | UTF-8 | 1,099 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
TAMAGOTCHI_DIRECTORY=`dirname $0`/..
DATA_DIRECTORY="$TAMAGOTCHI_DIRECTORY/data"
TEMPLATE_DIRECTORY="$TAMAGOTCHI_DIRECTORY/templates"
# Permet d'utiliser les fonctions sur l'humeur du tamagotchi
source "$TAMAGOTCHI_DIRECTORY/functions/mood.sh"
function display_stat()
{
local STAT_NAME=$1
local STAT_EMOJI=$2
# remplit la variable CURRENT_VALUE
get_current_value $STAT_NAME
echo -n "$STAT_EMOJI "
# la stat est à 0
if [ $CURRENT_VALUE -eq 0 ]
then
echo -n ☑️
fi
# sinon, on affiche une bulle pour chaque point de la stat
local INDEX=$CURRENT_VALUE
while [ $INDEX -gt 0 ]
do
echo -n ●
((INDEX--))
done
# retour à la ligne
echo
}
# Affichage de l'humeur du tamagotchi
TAMAGOTCHI_NAME=`cat $DATA_DIRECTORY/name`
get_mood_value
get_mood_text $MOOD_VALUE
cat $TEMPLATE_DIRECTORY/mood_$MOOD_VALUE.txt
echo $TAMAGOTCHI_NAME $MOOD_TEXT
# Affichage des statistiques
display_stat sad 😭
display_stat hunger 🍔
display_stat poop 💩
display_stat disease 🤒
# retour à la ligne
echo | true |
7c662ffb617d87790f80c21c0c7580382a2de242 | Shell | bestephe/loom | /exps/hadoop-fairness/spark_run_scripts/two_sort_bess.sh | UTF-8 | 2,345 | 3.1875 | 3 | [] | no_license | #!/bin/bash
RUNS=1
for i in {1..3}
do
for conf in bess bess-qpf
do
sudo -u ubuntu -H ./spark_run_scripts/spark_all_bess_netconf.sh $conf.conf
sudo tcpdump -i loom1 -w /dev/shm/spark_tcp_flows_loom.pcap -s 64 src 10.10.1.1 or src 10.10.101.1 or src 10.10.102.1 &
#sudo tcpdump -i loom1 -w /dev/shm/spark_tcp_flows_loom1.pcap -s 64 src 10.10.1.2 or src 10.10.101.2 or src 10.10.102.2 &
#sudo tcpdump -i loom2 -w /dev/shm/spark_tcp_flows_loom2.pcap -s 64 src 10.10.1.2 or src 10.10.101.2 or src 10.10.102.2 &
#TODO: I could collect a trace from BESS internals as well
# Note: tcpdump has already been started as part of configuring BESS (fairnes.bess)
# However, in order to get this to work, bessctl is run in the background
# an may not be finished running yet.
# For now, just sleep and hope BESS gets configured correctly.
sleep 3
ping 10.10.102.1 -c 1
if [ $? -ne 0 ]
then
echo "BESS failed to configure correctly!"
exit 1
fi
SORT_JOBS=()
echo "Starting TeraSort #1"
time sudo -u ubuntu -H ./spark_run_scripts/spark_terasort_h1.sh &> tmp_sort1.out &
SORT_JOBS+=$!
echo "Starting TeraSort #2"
time sudo -u ubuntu2 -H ./spark_run_scripts/spark_terasort_h2.sh &> tmp_sort2.out &
SORT_JOBS+=" $!"
wait ${SORT_JOBS[@]}
echo "Finished TeraSorts"
cat tmp_sort1.out > results/two_sort_bess.$conf.$i.out
echo "" >> results/two_sort_bess.$conf.$i.out;
echo "" >> results/two_sort_bess.$conf.$i.out;
cat tmp_sort2.out >> results/two_sort_bess.$conf.$i.out
sudo killall tcpdump
#mergecap -F pcap -w /dev/shm/spark_tcp_flows.pcap /dev/shm/spark_tcp_flows_loom1.pcap /dev/shm/spark_tcp_flows_loom2.pcap
#./pcap_flows/get_job_tput_ts.py --pcap /dev/shm/spark_tcp_flows_loom.pcap --outf results/tputs_two_sort_bess.$i.yaml
./pcap_flows/get_job_tput_ts.py --pcap /dev/shm/spark_tcp_flows_loom.pcap --outf results/tputs_two_sort_bess.$conf.$i.yaml
rm tmp_sort1.out
rm tmp_sort2.out
sudo rm -f /dev/shm/spark_tcp_flows.pcap
sudo rm -f /dev/shm/spark_tcp_flows_loom1.pcap
sudo rm -f /dev/shm/spark_tcp_flows_loom2.pcap
done
done
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.