blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
99836b8ce976794c52f9b58c4ba3fdd09cb2a46e | Shell | Qithub-BOT/sqiima | /testdata/tools/request_src/request_test.sh | UTF-8 | 1,177 | 3.046875 | 3 | [
"MIT"
] | permissive | #shellcheck shell=sh
# Simple example of shellspec usage
Describe 'echo command'
It 'should print ok'
When call echo 'ok'
The output should eq 'ok'
End
End
# テスト中に使われるグローバル変数のテスト
Describe 'Global Variable'
It 'check PATH_DIR_REQUEST_SRC is defined and is a valid path'
The value "$PATH_DIR_REQUEST_SRC" should be defined
The path "$PATH_DIR_REQUEST_SRC" should be exist
End
It 'check PATH_DIR_TOOLS is defined and is a valid path'
The value "$PATH_DIR_TOOLS" should be defined
The path "$PATH_DIR_TOOLS" should be exist
End
It 'check PATH_DIR_TESTDATA is defined and is a valid path'
The value "$PATH_DIR_TESTDATA" should be defined
The path "$PATH_DIR_TESTDATA" should be exist
End
It 'check PATH_DIR_WORK is defined and is avalid path'
The value "$PATH_DIR_WORK" should be defined
The path "$PATH_DIR_WORK" should be exist
End
It 'check PATH_DIR_REPO is defined and is a valid path'
The value "$PATH_DIR_REPO" should be defined
The path "${PATH_DIR_REPO}/.git" should be exist
End
End
| true |
d2e175e9a6e1e79ece954b517f9d2a1692548844 | Shell | claytonflesher/dw-cli-test-example | /test.sh | UTF-8 | 293 | 3.703125 | 4 | [] | no_license | #!/bin/bash -e
while getopts s:i: option
do
case "${option}"
in
s) SCRIPTS=${OPTARG};;
i) INPUT=${OPTARG};;
esac
done
for FILE in $SCRIPTS/*
do
SCRIPT=$(<$FILE)
echo $FILE
echo $SCRIPT
dw -i payload $INPUT "$SCRIPT"
if [ "$?" -ne 0 ] # Exit code is not 0
then
exit
fi
done
| true |
ff8ed2f3b82336c3599d69a03dd10d7a42d04eb0 | Shell | statetroopers/personnel | /scripts/sync-personnel-api.sh | UTF-8 | 279 | 2.703125 | 3 | [] | no_license | DIR_CWD=$1
echo 'Syncing personnel-api...'
(cd ${DIR_REPOS}personnel-api &&
CURRENT_BRANCH=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p') &&
git fetch upstream &&
git checkout master &&
git merge upstream/master
git checkout $CURRENT_BRANCH)
echo 'Finished syncing personnel-api' | true |
dddfa89b1ebbb0c3388e292d7262f4641002678c | Shell | cloudfoundry/capi-release | /jobs/cloud_controller_ng/templates/post-restore-unlock.sh.erb | UTF-8 | 906 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -exo pipefail
source /var/vcap/packages/capi_utils/monit_utils.sh
source /var/vcap/packages/capi_utils/syslog_utils.sh
<% if p('release_level_backup') %>
tee_output_to_sys_log "cloud_controller_ng.$(basename "$0")"
if /var/vcap/jobs/bpm/bin/bpm list | grep nginx_maintenance | awk '{ print "nginx_maintenance is:", $3; if ($3=="stopped") {exit 1} }';
then
/var/vcap/jobs/bpm/bin/bpm stop cloud_controller_ng -p nginx_maintenance
wait_for_server_to_become_unavailable <%= "localhost:#{p("cc.external_port")}/healthz" %> 60
fi
monit_start_job cloud_controller_ng
wait_for_server_to_become_healthy <%= "localhost:#{p("cc.external_port")}/healthz" %> <%= p("cc.post_bbr_healthcheck_timeout_in_seconds") %>
sleep 30
<% (1..(p("cc.jobs.local.number_of_workers"))).each do |index| %>
monit_start_job cloud_controller_worker_local_<%= index %>
<% end %>
<% end %>
| true |
bb79de0d970c8eff0c0cf255509e14a66ec5ad0e | Shell | risooonho/DroneSimLab | /demos/bluerov/runtmux_game_docker.sh | UTF-8 | 2,924 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
UNREAL_PROXY_PATH=/DroneLab/demos/bluerov/unreal_proxy
DEMO_PATH=/DroneLab/demos/bluerov
SITL_POSITION_PORT=9989
ENTRY_POINT=unreal_proxy
ENTRY_PATH=$UNREAL_PROXY_PATH/
DRONESIMLAB_PATH=../../
#game defenitions
#GAME_PATH=/DroneLab/baked_games/Ocean1_packed/LinuxNoEditor/
GAME_PATH=/project_files/Ocean1_packed/LinuxNoEditor/
PACKED_NAME=Oceantest1
kill-session -t dronelab
source ../../scripts/common.sh
kill_images python_dev
kill_images sitl_image
if [ `docker ps | grep -v unreal_engine |wc -l` -gt 1 ]; then
echo "ERROR: Make sure no other docker images other then unreal_engine are running (counting on sequencial IP addresses)";
echo "use docker rm and docker ps to remove other docker containers"
exit 0
fi
function init_rov {
tmux send-keys "cd ../../dockers/python3_dev && ./run_image.sh" ENTER
#tmux send-keys "export UNREAL_PROXY_PATH=$UNREAL_PROXY_PATH" ENTER
tmux send-keys "export DEMO_PATH=$DEMO_PATH" ENTER
tmux send-keys "export SITL_POSITION_PORT=$1" ENTER
tmux send-keys "cd /DroneLab/ardupilot/ArduSub && ../Tools/autotest/sim_vehicle.py --out=udp:0.0.0.0:14550 -L OSRF0" ENTER
}
function pub_fdm {
tmux send-keys "cd ../../dockers/python3_dev && ./run_image.sh" ENTER
tmux send-keys "export DEMO_PATH=$DEMO_PATH" ENTER
tmux send-keys "export SITL_POSITION_PORT=$1" ENTER
tmux send-keys "export PATH=/miniconda/bin/:\$PATH" ENTER
tmux send-keys "cd $DEMO_PATH && python fdm_pub_underwater.py --config unreal_proxy/" ENTER
}
function image_bridge {
tmux send-keys "cd ../../dockers/python3_dev && ./run_image.sh" ENTER
tmux send-keys "export DEMO_PATH=$DEMO_PATH" ENTER
tmux send-keys "export SITL_POSITION_PORT=$1" ENTER
tmux send-keys "export PATH=/miniconda/bin/:\$PATH" ENTER
tmux send-keys "cd $DEMO_PATH && python ue4_image_bridge.py" ENTER
}
function run_game {
tmux send-keys "cd $DRONESIMLAB_PATH/dockers/python3_dev && PROJECT_FILES_DIR=$PROJECT_FILES_DIR ./run_image.sh" ENTER
tmux send-keys "export PATH=/miniconda/bin:\$PATH" ENTER
tmux send-keys "cd ${DEMO_PATH}" ENTER
tmux send-keys "python /DroneLab/UE4PyhtonBridge/set_path.py --entry_point $ENTRY_POINT --entry_path $ENTRY_PATH --packed_game_name $PACKED_NAME --packed_game_path $GAME_PATH" ENTER
tmux send-keys "cd ${GAME_PATH}" ENTER
tmux send-keys "INITIAL_DRONE_POS=$INITIAL_DRONE_POS CAMERA_RIG_PITCH=$CAMERA_RIG_PITCH DISPLAY=:0.0 ./run.sh" ENTER
}
#cleanning prev run
tmux new-session -d -s dronelab
#tmux send-keys "python drone_main.py" ENTER
#tmux send-keys "cd ../../dockers/unreal_engine_4 && ./attach.sh" ENTER
run_game
tmux new-window
tmux split-window -h
tmux select-pane -t 0
tmux split-window -v
tmux select-pane -t 2
tmux split-window -v
tmux select-pane -t 0
pub_fdm $SITL_POSITION_PORT
tmux select-pane -t 1
init_rov $SITL_POSITION_PORT
tmux select-pane -t 2
image_bridge
#tmux send-keys "./run.sh" ENTER
#tmux select-window -t 0
#tmux set -g mouse on
tmux att
| true |
047c60f0d61ef074d16b19ada29af8c9f0cd06d7 | Shell | thepwagner/dependagot | /bin/dependagot | UTF-8 | 274 | 2.59375 | 3 | [] | no_license | #!/bin/bash
initial_wd="$(pwd)"
cd "$(dirname "$0")/.."
gradle build
rel_wd=$(echo $initial_wd | sed -e "s#$(pwd)/##g")
docker run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
--net=host \
-v $(pwd):/work -w /work/$rel_wd \
dependagot-cli:latest "$@"
| true |
d2a70b08f1fe2a2345b651c788a218dcfb769a27 | Shell | gorgonia/gorgonia | /examples/mnist/download.sh | UTF-8 | 400 | 2.65625 | 3 | [
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
DST=$(cd $(dirname $0)/../testdata/mnist; pwd)
mkdir -p $DST 2> /dev/null
cd "$DST"
curl -O http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
curl -O http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
curl -O http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
curl -O http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
gzip -f -d t*-ubyte.gz
| true |
302aa4c9cc6050cee56a9c5477022f532962b4e4 | Shell | aivanli/test | /usbTest-desktop.sh | UTF-8 | 286 | 3.09375 | 3 | [] | no_license | #!/bin/bash
dev=$1
if [ "$dev" = '' ];then
dev=/dev/sdb
fi
while [ 1 ]
do
read -p "Please input ENTER if you are ready"
echo "---------------------"
lsusb |tee -a usb.txt
echo "*********************"
hdparm -tT $dev|tee -a usb.txt
echo "---------------------"
echo
done
| true |
8af44c049185ca60574bcd29d613c5caf434c7dc | Shell | s22644/SOP2021 | /BASH/zad1.sh | UTF-8 | 103 | 3.078125 | 3 | [] | no_license | #!/bin/bash
if [$@ -gt 1];
then
echo "Pierwszy parametr to: $1"
else
echo "Nie podano parametrów"
fi
| true |
0184de35c6c2b8cf08cf3c995588a8b9fc4d7008 | Shell | marulitua/dotfiles | /install_font.sh | UTF-8 | 945 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env bash
set -x
DIR=~/.local/share/fonts
mkdir -p $DIR
pushd $DIR
#curl -fLo "Fura Code Retina Nerd Font Complete.otf" https://github.com/haasosaurus/nerd-fonts/raw/regen-mono-font-fix/patched-fonts/FiraCode/Retina/complete/Fura%20Code%20Retina%20Nerd%20Font%20Complete.otf
#curl -fLo "Fura Code Retina Nerd Font Complete Mono.otf" https://github.com/haasosaurus/nerd-fonts/raw/regen-mono-font-fix/patched-fonts/FiraCode/Retina/complete/Fura%20Code%20Retina%20Nerd%20Font%20Complete%20Mono.otf
curl -fLo "Fura Code Retina Nerd Font Complete Mono.ttf" https://github.com/haasosaurus/nerd-fonts/raw/regen-mono-font-fix/patched-fonts/FiraCode/Retina/complete/Fura%20Code%20Retina%20Nerd%20Font%20Complete%20Mono.ttf
curl -fLo "Fura Code Retina Nerd Font Complete.ttf" https://github.com/haasosaurus/nerd-fonts/raw/regen-mono-font-fix/patched-fonts/FiraCode/Retina/complete/Fura%20Code%20Retina%20Nerd%20Font%20Complete.ttf
popd
| true |
788f34162a00c6eca7d5ea2a6e7fa02d880ed6e7 | Shell | bferrentinonascar/Scripts | /RAMDISK.sh | UTF-8 | 247 | 2.65625 | 3 | [] | no_license | #!/bin/sh
# RAMDISK.sh
#
#
# Created by Ferrentino, Ben on 8/8/13.
#
DISK_ID=$(hdid -nomount ram://2097152)
newfs_hfs -v Ramdisk ${DISK_ID}
diskutil mount ${DISK_ID}
echo "Enjoy your new Ramdisk! Don't try running this more than once, please"
exit
| true |
c20450ff7f8ee0f33f219443c1a62771de41ff5f | Shell | rudisimo/ansible-role-steam | /templates/valheim-server.sh.j2 | UTF-8 | 774 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
test -e steamcmd && steamcmd \
+login {{ steam_account | default('anonymous') }} {{ steam_password | default('') }} \
+force_install_dir {{ valheim_install_path }} \
+app_update {{ valheim_steam_server_id }} validate \
+quit
export OLD_LD_LIBRARY_PATH=$LD_LIBRARY_PATH
export LD_LIBRARY_PATH={{ valheim_install_path }}/linux64:$LD_LIBRARY_PATH
export SteamAppId={{ valheim_steam_app_id }}
cd {{ valheim_install_path }}
{{ valheim_install_path }}/valheim_server.x86_64 \
-name "{{ item.name }}" \
-password "{{ item.password }}" \
-port "{{ item.port }}" \
-world "{{ item.world | default('Dedicated') }}" \
-savedir "{{ item.savedir | default(valheim_saves_path) }}/{{ item.port }}"
export LD_LIBRARY_PATH=$OLD_LD_LIBRARY_PATH
| true |
43cac15767a3837cf167db7213375fd1e5cd4bdc | Shell | dkaramchandani/AWS-Instance-Benchmarking-for-CPU-Memory-and-Disk | /cpu/CPUScript_SP.sh | UTF-8 | 285 | 2.734375 | 3 | [] | no_license | #!/bin/sh
echo CPU Benchmarking begins
listOfNames="SP"
make all
for operation in $listOfNames
do
for i in 1 2 3
do
for t in 1 2 3
do
#sbatch ./MyCPUBench cpu_${operation}_${thread}thread.dat
sbatch run$((t)).slurm
done
done
done
echo CPU Benchmarking completed
| true |
ad52afd61594d5bf2b0442b4fa7de011ae4a3813 | Shell | hpcuantwerpen/vsc-tutorial | /testscripts/workflows/1_example_dependent/workflow_job_depend.pbs.sh | UTF-8 | 215 | 2.640625 | 3 | [] | no_license | #!/bin/bash
#PBS -L tasks=1:lprocs=1:swap=1gb
#PBS -l walltime=10:00
cd "$PBS_O_WORKDIR"
mkdir mult-$multiplier ; cd mult-$multiplier
number=$(cat ../outputfile)
echo $(($number*$multiplier)) >outputfile
sleep 30
| true |
287864c44496c196fb63444c13fc11f5832de569 | Shell | PovilasU/devops-practice5 | /commands.sh | UTF-8 | 589 | 2.65625 | 3 | [] | no_license | #! /bin/bash
#step1 install docker-compose using PIP
pip install -U docker-compose
#step2 Create docker ocmpose file at any location on your system
#docker-compose.yml
#step3 check docker-compose validity
docker-compose config
#Command 'docker-compose' not found, but can be installed with:
#sudo snap install docker # version 19.03.11, or
#sudo apt install docker-compose
#Step 4 to isntall dependencies create playbook.yaml and run it in terminal
#to check if correct yaml sytax goto this site http://yaml-online-parser.appspot.com/
#sudo ansible-playbook playbook.yaml
| true |
37cb2b203e92eee65a767c06e579506e9dbb1c67 | Shell | xue-fei/chip-battery-status | /source/bin/chip-battery-xfce-genmon | UTF-8 | 429 | 3.359375 | 3 | [] | no_license | #!/bin/bash
PERCENTAGE=$(</usr/local/lib/chip-battery-status/percentage)
CHARGING=$(</usr/local/lib/chip-battery-status/charging)
if [[ "$CHARGING" -eq "1" ]] || [[ "$PERCENTAGE" -eq "100" ]]
then
# Show white text when charging or 100%
echo "<txt><span fgcolor='#FFFFFF'>""$PERCENTAGE""%</span></txt>"
else
# Show grey text when not charging
echo "<txt><span fgcolor='#AFAFAF'>""$PERCENTAGE""%</span></txt>"
fi | true |
51cfa1dfc73c29e5aa2a30020012c0f655b89867 | Shell | neeleshgupta27/devops | /shell-scripting/bash-c-style-for.sh | UTF-8 | 88 | 2.828125 | 3 | [] | no_license | #!/bin/bash
for (( i=1; i <= 3; i++ ))
do
echo "Your random number $i: $RANDOM"
done
| true |
00c46259a52abf040c6ff364b38dba78ded7f963 | Shell | zweecn/tuqu_bd | /tuqu_dev/shell/produce_img_shell/stat_data_source.sh | UTF-8 | 478 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#input="data/data_source";
used_objs="data/used_objs";
if [ $# -ne 1 ]
then
echo "useage: stat_data_source data_filename";
exit 1;
fi;
input=$1;
echo ${input};
awk -F '\t' '{
if(FILENAME==ARGV[1]){
used_objs[$1]=1;
}else if(! ($1 in used_objs)){
count[$NF]+=1;
}
}END{
total=0;
for(type in count){
print type"\t"count[type];
total+=count[type];
}
print "total="total;
}' ${used_objs} ${input}
| true |
01477eb2e6ab0a5b8bc594c0936289062a6342cd | Shell | alvis/.dotfiles | /.zshrc | UTF-8 | 4,831 | 2.84375 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | # //
# GENERAL OPTIONS
# //
# don't complain if no match is found on a glob expression
setopt +o nomatch
# remove duplicated command history
setopt hist_ignore_all_dups
# allow any comment starting with a space (` `) will not be remembered in history
setopt hist_ignore_space
# use case-sensitive completion
# CASE_SENSITIVE="true"
# display red dots whilst waiting for completion
COMPLETION_WAITING_DOTS="true"
# disable bi-weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# disable auto-setting terminal title
# DISABLE_AUTO_TITLE="true"
# disable colors in ls.
# DISABLE_LS_COLORS="true"
# disable marking untracked files under VCS as dirty
# NOTE: This makes repository status check for large repositories much faster
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# enable command auto-correction.
# ENABLE_CORRECTION="true"
# change the command execution time stamp shown in the history command output
# NOTE: either "mm/dd/yyyy", "dd.mm.yyyy", or "yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# use hyphen-insensitive completion
# HYPHEN_INSENSITIVE="true"
# zsh custom folder
# ZSH_CUSTOM=$ZSH/custom
# compilation flags
export ARCHFLAGS="-arch x86_64"
# language environment
export LANG=en_US.UTF-8
# preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='micro'
else
# open a file using the emacs deamon with a new frame on the current screen
export EDITOR='emacsclient -c'
fi
# //
# TMUX
# //
# Do not use autostart, explicitly start/attach session
# https://github.com/syl20bnr/spacemacs/issues/988
ZSH_TMUX_AUTOSTART=false
[[ $TMUX == "" ]] && tmux new-session -A
# //
# THEME
# //
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="bullet-train"
BULLETTRAIN_PROMPT_CHAR="%F{red}❯%F{yellow}❯%F{green}❯%f"
BULLETTRAIN_PROMPT_ORDER=(
time
status
custom
dir
virtualenv
git
cmd_exec_time
nvm
)
# //
# // PLUGINS
# //
# path to oh-my-zsh installation
export ZSH=~/.oh-my-zsh
# MacOS intergration
# > usage:
# - pfd: return the path of the frontmost Finder window
# - pfs: return the current Finder selection
# - cdf: cd to the current Finder directory
# - quick-look: quick-Look a specified file
plugins=(macos)
# direnv
plugins+=(direnv)
# bind ctrl-r for history searching
plugins+=(history-search-multi-word)
zstyle ":history-search-multi-word" highlight-color "fg=yellow,bold"
# enable notification
plugins+=(notify)
zstyle ':notify:*' error-title "🔥 Error!!!"
zstyle ':notify:*' success-title "🎉 Success!!!"
zstyle ':notify:*' activate-terminal yes
# enable nvm
plugins+=(zsh-nvm)
# enable aws
plugins+=(aws)
# source oh my zsh
source $ZSH/oh-my-zsh.sh
# autosuggest the rest of a command
source $(brew --prefix)/share/zsh-autosuggestions/zsh-autosuggestions.zsh
# cd command with an interactive filter
source $ZSH_CUSTOM/plugins/enhancd/init.sh
# highlight commands whilst they are typed
source $(brew --prefix)/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
# enable credential injection via 1password
source ~/.config/op/plugins.sh || true
# //
# GO
# //
export GOPATH=$HOME/go
export GOROOT=$(brew --prefix)/opt/go/libexec
export PATH=$PATH:$GOPATH/bin
export PATH=$PATH:$GOROOT/bin
# //
# PYTHON
# //
__conda_setup="$('$(brew --prefix)/Caskroom/miniconda/base/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "$(brew --prefix)/Caskroom/miniconda/base/etc/profile.d/conda.sh" ]; then
. "$(brew --prefix)/Caskroom/miniconda/base/etc/profile.d/conda.sh"
else
export PATH="$(brew --prefix)/Caskroom/miniconda/base/bin:$PATH"
fi
fi
unset __conda_setup
# //
# SSH
# //
export SSH_KEY_PATH="~/.ssh/id_rsa"
# //
# DevOps
# //
command -v flux >/dev/null && . <(flux completion zsh)
# //
# GCP
# //
export CLOUDSDK_PYTHON="$(brew --prefix)/opt/python@3/libexec/bin/python"
source "$(brew --prefix)/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/path.zsh.inc"
source "$(brew --prefix)/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/completion.zsh.inc"
# //
# ALIAS
# //
# inject copilot cli's ??, git? & gh? alias
eval "$(github-copilot-cli alias -- "$0")"
# assume aws role with `assume <profile>`
alias assume=". awsume"
# count the number of files under the current folder
alias filecount="du -a | cut -d/ -f2 | sort | uniq -c | sort -nr"
# edit a file via the default editor
alias edit="$EDITOR"
# download a file with wget
alias download="wget --debug --continue --tries=0 --read-timeout=30 --random-wait"
# add iterm2 integration
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
| true |
aaa8ee106f7e91a7d372833f0201ac56c2a5ce80 | Shell | compbiocore/conda-recipes | /recipes/salmon/build.sh | UTF-8 | 305 | 2.734375 | 3 | [] | no_license | #!/bin/bash
set -eu -o pipefail
mkdir -p $PREFIX/bin
mkdir -p $PREFIX/lib
mkdir -p build
cd build
cmake -DCMAKE_OSX_DEPLOYMENT_TARGET=10.8 -DCONDA_BUILD=TRUE -DCMAKE_INSTALL_PREFIX:PATH=$PREFIX -DBOOST_ROOT=$PREFIX -DBoost_NO_SYSTEM_PATHS=ON ..
make install CFLAGS="-L${PREFIX}/lib -I${PREFIX}/include"
| true |
e4f36657f0c56a8e19a877ff868265030f689954 | Shell | duk3luk3/dotfiles | /bins/mute.sh | UTF-8 | 285 | 2.828125 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
CURRENT_STATE=`amixer -c 1 get Master | egrep 'Playback.*?\[o' | egrep -o '\[o.+\]'`
if [[ $CURRENT_STATE == '[on]' ]]; then
amixer -c 1 set Master mute
else
amixer -c 1 set Master unmute
amixer -c 1 set Speaker unmute
amixer -c 1 set Headphone unmute
fi
| true |
f3da532bac58f4c831a3fc30d6b0748faba771b2 | Shell | ipfire/ipfire-2.x | /src/initscripts/packages/clamav | UTF-8 | 3,014 | 3.421875 | 3 | [] | no_license | #!/bin/sh
###############################################################################
# #
# IPFire.org - A linux based firewall #
# Copyright (C) 2007-2022 IPFire Team <info@ipfire.org> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
. /etc/sysconfig/rc
. $rc_functions
case "$1" in
start)
if [ $(basename $0) == "clamav" ]; then
boot_mesg "Starting Clamav Definition Updater..."
loadproc /usr/bin/freshclam -d -c 10
COUNTER=0
while [ "$COUNTER" -lt "61" ]; do
[ -e "/var/lib/clamav/main.cld" ] || \
[ -e "/var/lib/clamav/main.cvd" ] && \
[ -e "/var/lib/clamav/bytecode.cld" ] || \
[ -e "/var/lib/clamav/bytecode.cvd" ] && \
[ -e "/var/lib/clamav/daily.cld" ] || \
[ -e "/var/lib/clamav/daily.cvd" ] && \
break
if [ "$COUNTER" -lt "1" ]; then
boot_mesg -n "Downloading database"
else
boot_mesg -n "."
fi
sleep 15
COUNTER=$(($COUNTER + 1))
done
if [ "$COUNTER" -gt "0" ]; then
boot_mesg
fi
if [ "$COUNTER" -gt "60" ]; then
boot_mesg "Download takes longer than 15min check freshclam status!"
echo_failure;
exit 1;
fi
boot_mesg "Starting Clamav Daemon..."
loadproc /usr/sbin/clamd
else
boot_mesg "Starting Clamav in background..."
/etc/init.d/clamav start > /dev/tty12 < /dev/tty12 &
echo_ok;
exit 0;
fi
;;
stop)
boot_mesg "Stopping Clamav Definition Updater..."
killproc /usr/bin/freshclam
rm -rf /var/lib/clamav/*.tmp
boot_mesg "Stopping Clamav Daemon..."
killproc /usr/sbin/clamd
;;
restart)
$0 stop
sleep 1
$0 start
;;
status)
statusproc /usr/sbin/clamd
statusproc /usr/bin/freshclam
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac
| true |
9bc1976520e52520f9bc52ec6ec55051197994d2 | Shell | dreamsxin/example | /imagick/scripts/striations | UTF-8 | 7,336 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#
# Developed by Fred Weinhaus 1/20/2009 .......... revised 2/11/2009
#
# USAGE: striations [-t type] [-r radius] [-c center] [-k column] infile outfile
# USAGE: striations [-h or -help]
#
# OPTIONS:
#
# -t type type of striations; choices are radial (or r)
# or circular (c); default=radial
# -r radius radius from center point where striations begin;
# integer>=0; default is half the minimum image dimension
# -c center center point for striation effect; center=cx,cy;
# integer>=0; default is center of image
# -k column column to use for circular striations; integers;
# 0<=column<width
#
###
#
# NAME: STRIATIONS
#
# PURPOSE: To apply radial or circular striations to image.
#
# DESCRIPTION: STRIATIONS applies radial or circular striations to image
# starting at a user specified center point and radius.
#
# OPTIONS:
#
# -t type ... TYPE of striations. Choices are radial (or r) or circular (or c).
# The default is radial.
#
# -r radius ... RADIUS is the radial distance from the center point at
# which the striations begin. Values are integers>=0. The default is
# half the minimum dimension of the image.
#
# -c center ... CENTER=cx,cy are the comma separated coordinates in the image
# from where the radial striations eminate. Values are integers>=0. The default
# is the center of the image.
#
# -k column ... COLUMN of the polar image to use for generating the circular
# striations. Values are integers, such that 0<=column<width of the image.
# The default=0.
#
# NOTE: Requires IM 6.4.2-8 or higher due to the use of -distort polar/depolar.
#
# CAVEAT: No guarantee that this script will work on all platforms,
# nor that trapping of inconsistent parameters is complete and
# foolproof. Use At Your Own Risk.
#
######
#
# set default values
type="radial" #radial or circular
rad="" #defaults to half the min width or height
center="" #defaults to the center of the image
column=0
# set directory for temporary files
dir="." # suggestions are dir="." or dir="/tmp"
# set up functions to report Usage and Usage with Description
PROGNAME=`type $0 | awk '{print $3}'` # search for executable on path
PROGDIR=`dirname $PROGNAME` # extract directory of program
PROGNAME=`basename $PROGNAME` # base name of program
usage1()
{
echo >&2 ""
echo >&2 "$PROGNAME:" "$@"
sed >&2 -n '/^###/q; /^#/!q; s/^#//; s/^ //; 4,$p' "$PROGDIR/$PROGNAME"
}
usage2()
{
echo >&2 ""
echo >&2 "$PROGNAME:" "$@"
sed >&2 -n '/^######/q; /^#/!q; s/^#*//; s/^ //; 4,$p' "$PROGDIR/$PROGNAME"
}
# function to report error messages
errMsg()
{
echo ""
echo $1
echo ""
usage1
exit 1
}
# function to test for minus at start of value of second part of option 1 or 2
checkMinus()
{
test=`echo "$1" | grep -c '^-.*$'` # returns 1 if match; 0 otherwise
[ $test -eq 1 ] && errMsg "$errorMsg"
}
# test for correct number of arguments and get values
if [ $# -eq 0 ]
then
# help information
echo ""
usage2
exit 0
elif [ $# -gt 10 ]
then
errMsg "--- TOO MANY ARGUMENTS WERE PROVIDED ---"
else
while [ $# -gt 0 ]
do
# get parameter values
case "$1" in
-h|-help) # help information
echo ""
usage2
exit 0
;;
-t) # get type
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID TYPE SPECIFICATION ---"
checkMinus "$1"
type=`echo "$1" | tr '[A-Z]' '[a-z]'`
case "$type" in
radial|r) type="radial";;
circular|c) type="circular";;
*) errMsg "--- TYPE=$type IS AN INVALID VALUE ---"
esac
;;
-r) # get rad
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID RADIUS SPECIFICATION ---"
checkMinus "$1"
rad=`expr "$1" : '\([0-9]*\)'`
[ "$rad" = "" ] && errMsg "RADIUS=$rad MUST BE A NON-NEGATIVE INTEGER"
;;
-c) # get center
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID CENTER SPECIFICATION ---"
checkMinus "$1"
test=`echo "$1" | tr "," " " | wc -w`
[ $test -eq 1 -o $test -gt 2 ] && errMsg "--- INCORRECT NUMBER OF COORDINATES SUPPLIED ---"
center=`expr "$1" : '\([0-9]*,[0-9]*\)'`
[ "$center" = "" ] && errMsg "--- CENTER=$coords MUST BE A PAIR OF NON-NEGATIVE INTEGERS SEPARATED BY A COMMA ---"
center="$1,"
cx=`echo "$center" | cut -d, -f1`
cy=`echo "$center" | cut -d, -f2`
;;
-k) # get column
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID COLUMN SPECIFICATION ---"
checkMinus "$1"
column=`expr "$1" : '\([0-9]*\)'`
[ "$column" = "" ] && errMsg "COLUMN=$column MUST BE A NON-NEGATIVE INTEGER"
;;
-) # STDIN and end of arguments
break
;;
-*) # any other - argument
errMsg "--- UNKNOWN OPTION ---"
;;
*) # end of arguments
break
;;
esac
shift # next option
done
#
# get infile and outfile
infile=$1
outfile=$2
fi
# test that infile provided
[ "$infile" = "" ] && errMsg "NO INPUT FILE SPECIFIED"
# test that outfile provided
[ "$outfile" = "" ] && errMsg "NO OUTPUT FILE SPECIFIED"
# setup temporary images
tmpA1="$dir/striations_1_$$.mpc"
tmpA2="$dir/striations_1_$$.cache"
trap "rm -f $tmpA1 $tmpA2; exit 0" 0
trap "rm -f $tmpA1 $tmpA2; exit 1" 1 2 3 15
# read input and make sure OK
if convert -quiet -regard-warnings "$infile" +repage "$tmpA1"
then
: ' do nothing '
else
errMsg "--- FILE $infile DOES NOT EXIST OR IS NOT AN ORDINARY FILE, NOT READABLE OR HAS ZERO SIZE ---"
fi
# get center if not provided
if [ "$center" = "" ]; then
cx=`convert $tmpA1 -format "%[fx:(w-1)/2]" info:`
cy=`convert $tmpA1 -format "%[fx:(h-1)/2]" info:`
fi
# get radius if not profided
if [ "$rad" = "" ]; then
rad=`convert $tmpA1 -format "%[fx:floor(min(w,h)/2))]" info:`
fi
# correct radius to account for polar transformation scaling
rad1=`convert $tmpA1 -format "%[fx:floor(2*$rad*h/sqrt(w*w+h*h))]" info:`
# get image width and height and distance outside radius
ww=`convert $tmpA1 -format %w info:`
hh=`convert $tmpA1 -format %h info:`
hmr=`convert xc: -format "%[fx:max(1,$hh-$rad1)]" info:`
# test column
[ $column -ge $ww ] && errMsg "--- COLUMN=$column MUST BE AN INTEGER BETWEEN 0 AND IMAGE WIDTH ---"
# convert image to polar coords
convert $tmpA1 -distort depolar -1,0,$cx,$cy $tmpA1
if [ "$type" = "radial" ]; then
# crop to radius and repeat last row and convert back to rectangular
convert $tmpA1[${ww}x${rad1}+0+0] \
\( $tmpA1[${ww}x1+0+${rad1}] -scale ${ww}x${hmr}! \) \
-append -crop ${ww}x${hh}+0+0 +repage \
-distort polar -1,0,$cx,$cy $outfile
elif [ "$type" = "circular" ]; then
convert $tmpA1[${ww}x${rad1}+0+0] \
\( $tmpA1[1x${hmr}+${column}+${rad1}] -scale ${ww}x${hmr}! \) \
-append -crop ${ww}x${hh}+0+0 +repage \
-distort polar -1,0,$cx,$cy $outfile
fi
exit 0 | true |
11111f99b55db0e8d44123c0ad46e396ffbfc9c8 | Shell | chainsawriot/mightysteve | /mightysteve.sh | UTF-8 | 1,222 | 4.09375 | 4 | [] | no_license | #!/usr/bin/env bash
# Mighty Steve 1.0.2
# A shell script to perform hazel-like photo sorting operation in Mac OS X
# By Chainsaw Riot (https://github.com/chainsawriot)
# released under MIT License
# tested in Mac OS X 10.10.2
function checkfile {
fdate=`GetFileInfo -m "$1"`
parseddate=(`echo $fdate | sed -e 's/[:/]/ /g'`)
x="${parseddate[2]}"
y="${parseddate[0]}"
echo $x-$y
}
## safe version of mv, will add date before extension if filename already existed in the directory
## A work round for no mv -b in BSD mv
function mv_safe {
if [ -e $2/$1 ]
then
fnamet=$1
extension=${fnamet##*.}
mv "$1" "$2"/${fnamet%.$extension}_`date +%Y%m%d%H%M%S`.$extension
else
mv "$1" "$2"
fi
}
function sortfile {
dirName=`checkfile $1`
if [ ! -d $dirName ]
then
mkdir $dirName
fi
mv_safe "$1" "$dirName"
}
# rename any file with space(s) in filename
shopt -s nullglob
for f in *\ *
do
mv "$f" "${f// /_}";
done
# do the sorting
for image in {*.jpg,*.JPG}
do
sortfile $image
done
# sorting video files
for video in {*.m4v,*.M4V,*.avi,*.AVI,*.mp4,*.MP4}
do
mv_safe "$video" "Video"
done
# remove the by-product of "-" folder
if [ -d "-" ]
then
rmdir '-'
fi
| true |
42d441db8110b9404351781e86afc0c802c6b216 | Shell | jkitching/dotfiles | /bin/lockscreen | UTF-8 | 352 | 3.203125 | 3 | [] | no_license | #!/bin/bash
# Tries to use GraphicsMagick if available (faster image processing).
if command -v gm >/dev/null 2>&1; then
GM=""
else
GM=""
fi
PIPE=$(mktemp -u --suffix=.png)
mkfifo $PIPE
scrot $PIPE &
i3lock -i <(cat $PIPE | $GM convert -blur -10x8 - png:-)
rm $PIPE
#i3lock -i <($GM import -window root png:- | $GM convert -blur -10x8 - png:-)
| true |
3b9399b9025f36a85750dd2da04acc7357c650cd | Shell | gustavo-adorno/Thunder | /a_modulo-5/s-test | UTF-8 | 231 | 2.734375 | 3 | [] | no_license | #!/bin/bash
USUARIO=!_Gustavo_Adorno
F2=/DATA/a_modulo-7/nro_screen_telegram.txt
# Leer Numero de sesion
NUM2=$(head -1 $F2 | tail -1)
sleep 1
screen -S $NUM2.telegram-session -p 0 -X stuff "`printf "msg $USUARIO Hola\r"`";
| true |
b6bf577cd829cc352f7a2472d2db4daaacbd5e7b | Shell | cvidler/amd_build_disc | /amdcentos74/downloadupdates.sh | UTF-8 | 4,654 | 4.3125 | 4 | [] | no_license | #!/bin/bash
#Use yum to update all packages on the build disc
PKGDIR="disc/Packages"
DEBUG=0
function debugecho {
dbglevel=${2:-1}
if [ $DEBUG -ge $dbglevel ]; then techo "*** DEBUG[$dbglevel]: $1 \e[39m"; fi
}
function techo {
echo -e "[`date -u "+%Y-%m-%d %H:%M:%S"`]: $1"
}
function quit {
#cleanup temp dirs and exit passing exit code
if [ -d "${TMPDIR}" ]; then rm -rf "${TMPDIR}"; fi
if [ -d "${TMPROOT}" ]; then rm -rf "${ROOTDIR}"; fi
exit $1
}
# command line arguments
while getopts ":d" OPT; do
case $OPT in
d)
DEBUG=$((DEBUG + 1))
;;
\?)
OPTS=0 #show help
techo "\e[31m***FATAL:\e[0m Invalid argument -$OPTARG."
;;
:)
OPTS=0 #show help
techo "\e[31m***FATAL:\e[0m argument -$OPTARG requires parameter."
;;
esac
done
# sanity test
if [ ! -w $PKGDIR ]; then techo "\e[31mERROR:\e[39m No write permissions to $PKGDIR, cannot continue."; quit 1; fi
# build package list
# list already available rpm files and extract the package name from them, append the list to send to yum.
#REGEX="^([a-zA-Z0-9\_\-\+]+?)(?=-(?:[0-9][a-z0-9\.-]+?)(?:\.el7[_[0-9\.]*)(?:\.centos)?(?:\.[0-9])?(?:\.noarch|\.x86_64)\.rpm$)"
#REGEX="^([a-zA-Z0-9\_\-\+\.]+)(-api)?(?=-(?:[0-9][a-z0-9\.-]+?)(?:\.el7[_[0-9\.]*)(?:\.centos)?(?:\.[0-9])?(?:\.noarch|\.x86_64)\.rpm$)"
REGEX="^([a-zA-Z0-9\_\-\+\.]+?(?:-[0-9\.]+-api|-1\.8\.0-openjdk[-a-z]*?)?)(?=-(?:[0-9][a-z0-9\.-]+?)(?:\.el7[_[0-9\.]*)(?:\.centos)?(?:\.[0-9])?(?:\.noarch|\.x86_64)\.rpm$)"
PKGLISTNL=$(cd "$PKGDIR/" && ls -1 | grep -Po "$REGEX")
PKGLIST=$(echo -e "${PKGLISTNL}" | tr '\n' ' ')
PKGCOUNT=$(echo -e "${PKGLISTNL}" | wc -l)
debugecho "PKGLIST: [[$PKGLIST]]" 3
debugecho "PKGCOUNT: [[$PKGCOUNT]]" 2
# Get yum to download all packages from the built package list
TMPDIR=$(mktemp -d)
TMPROOT=$(mktemp -d)
techo "\e[34mINFO:\e[39m Check for Updates for ${PKGCOUNT} packages and Downloading to: ${TMPDIR}"
techo "Will take a few minutes..."
#YUMOUT=$(sudo yum update --disableplugin=deltarpm --downloadonly --downloaddir=${TMPDIR} ${PKGLIST})
YUMOUT=$(yum clean all && yumdownloader --archlist=x86_64 -x "*i686" --destdir ${TMPDIR} ${PKGLIST})
RC=$?
debugecho "RC: [[$RC]] YUMOUT: [[$YUMOUT]]" 3
techo "\e[32mPASS:\e[39m Download Complete."
if [ $RC -ne 0 ]; then techo "\e[33mWARNING:\e[39m Error indicated in yum output:\n ${YUMOUT}"; quit $RC; fi
# Copy new rpms to the build disc folder
# iterate list of packages, find matching (old and new) versions and remove the old, replacing with new version.
PKGI=0
while read -r PKG; do
PKGI=$((PKGI + 1))
OLDFILE=""
NEWFILE=""
OLDFILE=$(ls $PKGDIR/$PKG-* 2>&1 | head -n 1)
NEWFILE=$(ls $TMPDIR/$PKG-* 2>&1 | head -n 1)
debugecho "PKGI: [[$PKGI]] PKG: [[$PKG]] - OLDFILE: [[$OLDFILE]] - NEWFILE: [[$NEWFILE]]" 3
if [ ! -f "$OLDFILE" ]; then OLDFILE=""; fi
if [ ! -f "$NEWFILE" ]; then NEWFILE=""; fi
debugecho "PKGI: [[$PKGI]] PKG: [[$PKG]] - OLDFILE: [[$OLDFILE]] - NEWFILE: [[$NEWFILE]]" 3
if [ "$OLDFILE" == "" ] && [ "$NEWFILE" == "" ]; then techo "PKG: [[$PKG]] shit's broke!"; quit 255; fi
# check for updated package
if [ "$NEWFILE" == "" ]; then
# new package file doesn't exist
debugecho "\e[34mINFO:\e[39m #$PKGI/$PKGCOUNT $(basename $OLDFILE) has no update!" 2
continue;
fi
if [ "$OLDFILE" == "" ] && [ -r "$NEWFILE" ]; then
# old file doesn't exist - new package
debugecho "\e[34mINFO:\e[39m #$PKGI/$PKGCOUNT New package $(basename $NEWFILE)" 1
cp $NEWFILE $PKGDIR
if [ $? -ne 0 ]; then techo -e "\e[33mWARNING:\e[39m Couldn't copy new package $NEWFILE to $PKGDIR!"; quit 1; fi
continue;
fi
# check for same version
if [ "$(basename $OLDFILE)" == "$(basename $NEWFILE)" ]; then
# package files match, skip, nothing to do
debugecho "\e[34mINFO:\e[39m #$PKGI/$PKGCOUNT $(basename $OLDFILE) and $(basename $NEWFILE) are identical - no update!" 3
continue;
fi
# copy update package and remove the old package.
techo "\e[34mINFO:\e[39m #$PKGI/$PKGCOUNT Updating $(basename $OLDFILE) to $(basename $NEWFILE)"
cp "$NEWFILE" "$PKGDIR" && rm -f "$OLDFILE"
if [ $? -ne 0 ]; then techo "\e[33mWARNING:\e[39m #$PKGI/$PKGCOUNT Couldn't update package $NEWFILE to $PKGDIR and remove $OLDFILE"; quit 1; fi
done < <(echo -e "$PKGLISTNL")
# after, use existing scripts to update repo info.
./testrepo.sh
RC=$?
if [ $RC -ne 0 ]; then
techo "\e[33mWARNING:\e[39m Repository test failed, need manual intevention to fix dependencies."
quit $RC
fi
./updaterepo.sh
RC=$?
if [ $RC -ne 0 ]; then
techo "\e[33mWARNING:\e[39m Repository update failed, need manual intevention."
quit $RC
fi
techo "\e[32mPASS:\e[39m Package update process complete."
# done
quit 0
| true |
c2aaa00b44c13105e108642b78c7787a218645b1 | Shell | mpictor/dotfiles | /bin/quiet | UTF-8 | 1,015 | 3.9375 | 4 | [] | no_license | #!/bin/bash
function show_help {
cat << EOH
$0 [-l] [-p] command args...
-l: suppress LD_LIBRARY_PATH
-p: clean PATH, removing entries referencing \$HOME, inin, p4v
-d: debug (prints command before executing)
EOH
}
if [[ ! -e $XAUTHORITY ]]; then
echo "missing XAUTHORITY, attempting to fix..."
if [[ -e ~/.Xauthority ]]; then
cp ~/.Xauthority $XAUTHORITY
else
echo failed
fi
fi
#clean PATH, removing entries referencing \$HOME, inin, p4v
function clean_path {
tr ':' '\n' <<<$PATH |\
grep -ve $HOME -e inin -e opt/p4v -e ^$ |\
tr '\n' ':' |\
sed 's/:$//;'
}
OPTIND=1
env=''
debug=0
while getopts "h?lpd" opt; do
case "$opt" in
h|\?)
show_help
exit 0
;;
l) env='env -u LD_LIBRARY_PATH'
;;
p) PATH=`clean_path`
;;
d) debug=1
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
[[ $debug -eq 1 ]] && echo "PATH=$PATH $env $@"
PATH=$PATH $env $@ 2>/dev/null >/dev/null &
unset OPTIND env debug
| true |
943e27afd72cf5c776bc759af41441e96d024add | Shell | ppc64le/build-scripts | /c/camel-k/camel-k_rhel8.2.sh | UTF-8 | 1,963 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | # ----------------------------------------------------------------------------
#
# Package : camel-k
# Version : 1.3.0
# Source repo : https://github.com/apache/camel-k
# Tested on : RHEL 8.2
# Script License : Apache License, Version 2 or later
# Maintainer : Amit Sadaphule <amits2@us.ibm.com>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
set -eux
CWD=`pwd`
# Install dependencies
yum install -y make git wget java-11-openjdk gcc
JDK_PATHS=$(compgen -G '/usr/lib/jvm/java-11-openjdk-*')
export JAVA_HOME=${JDK_PATHS%$'\n'*}
export PATH=$JAVA_HOME/bin:$PATH
# Download and install go
wget https://golang.org/dl/go1.15.2.linux-ppc64le.tar.gz
tar -xzf go1.15.2.linux-ppc64le.tar.gz
rm -rf go1.15.2.linux-ppc64le.tar.gz
export GOPATH=`pwd`/gopath
export GOROOT=`pwd`/go
export PATH=`pwd`/go/bin:$GOPATH/bin:$PATH
# Clone the repo and build/test
mkdir -p $GOPATH/src/github.com/apache
cd $GOPATH/src/github.com/apache
git clone https://github.com/apache/camel-k.git
cd camel-k/
git checkout v1.3.0
sed -i 's/openjdk11:slim/openjdk11:ubi/g' build/Dockerfile
sed -i 's/BaseImage = "adoptopenjdk\/openjdk11:slim"/BaseImage = "adoptopenjdk\/openjdk11:ubi"/g' pkg/util/defaults/defaults.go
sed -i '/replaces: camel-k-operator.v1.2.0/d' config/manifests/bases/camel-k.clusterserviceversion.yaml
sed -i 's/image: docker.io\/apache\/camel-k:1.3.0-SNAPSHOT/image: docker.io\/apache\/camel-k:1.3.0/g' config/manager/operator-deployment.yaml
make controller-gen
make kustomize
make build
make test
make package-artifacts
make images
echo "Build, unit test execution and image creation successful!"
| true |
b3f064968f06d76ebe2220ce7bfbdd51a264b50e | Shell | containers/storage | /hack/govet.sh | UTF-8 | 194 | 2.890625 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
for package in $(go list ./... | grep -v /vendor/) ; do
if ! go vet ${package} ; then
echo Error: source package ${package} does not pass go vet.
exit 1
fi
done
exit 0
| true |
5f600418bc2b3a0f399b6388fb6f27fca88f4183 | Shell | tech-otaku/vps-config | /home/steve/scripts/add-vhost-na.sh | UTF-8 | 17,344 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
# Steve Ward: 2019-09-29
# USAGE: sudo add-vhost.sh <domain> [<tld>]
# NOTE: If <tld> is not supplied a tld of 'com' is assumed i.e. add-vhost.sh tech-otaku or add-vhost.sh steveward me.uk
# ALIAS: vhost-add <domain> [<tld>]
# CODE: /home/steve/scripts/add-vhost.sh
# TREE: Files and directories below marked '-*-' are created/modified by this script.
#
# |--- etc/
# |--- apache2/
# |--- sites-available/
# |-*- domain.tld.conf 0644 root:root
#
# |--- home/
# |--- user/
# |--- www/
# |-*- domain.tld/ 0755 root:www-data
# |-*- public_html/ 2750 user:www-data
# |-*- error.php 0640 user:www-data
# |-*- index.html 0640 user:www-data
# |-*- info.php 0640 user:www-data
# |-*- .htdbm 0440 user:www-data
# |-*- .prevent_deletion 0400 root:root
clear
# Exit if root is not running this script.
if [ $EUID -ne 0 ]; then
printf "ERROR: This script must be run as root.\n" 1>&2
exit 1
fi
# Exit if no domain name was specified.
if [ -z "${1}" ]; then
printf "ERROR: No domain name was specified.\n"
exit 1
fi
# Exit if the configuration file doesn't exist.
if [ ! -f "/home/steve/config/vhost-config-na.json" ]; then
printf "ERROR: Can't find the configuration file '/home/steve/config/vhost-config-na.json'.\n"
exit 1
fi
DOMAIN="${1}"
if [ -z "${2}" ]; then
TLD="com"
else
TLD="${2}"
fi
# Exit if an apache .conf file for the domain already exists.
if [ -f "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf" ]; then
printf "ERROR: It looks like a virtual host already exists for domain '${DOMAIN}.${TLD}'.\n"
exit
fi
# Exit if the configuration file doesn't contain configuration data for the domain.
grep -q '"'$DOMAIN.$TLD'"' /home/steve/config/vhost-config-na.json
if [ $? -ne 0 ]; then
printf "ERROR: No configuration data found for domain '$DOMAIN.$TLD'.\n"
exit 1
fi
# Exit if attempting to configure a virtual host for a sub-domain of 'techotaku.com'.
if [[ "${DOMAIN}" == *"techotaku" && ! "${DOMAIN}" =~ ^techotaku && "${TLD}" == "com" ]]; then
printf "ERROR: Sub-domains cannot be configured for domain 'techotaku.com'.\n"
exit 1
fi
# Get global default values from the configuration file.
CONFIG_ONLY=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['config_only'])")
CREATE_ERROR=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['create_error'])")
CREATE_INFO=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['create_info'])")
FORCE_WWW=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['force_www'])")
GROUP=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['group'])")
PROTECT=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['protect'])")
PROTECT_INFO=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['protect_info'])")
SSL=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['defaults']['ssl'])")
# Get domain-specific values from the configuration file.
AUTH_USER=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['auth_user'])")
IGNORE_HTACCESS=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['ignore_htaccess'])")
OWNER=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['owner'])")
POOL=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['pool'])")
ROOT_DIR=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['root_dir'])")
WWW=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['www'])")
CERT_NAME=$(cat /home/steve/config/vhost-config-na.json | python3 -c "import sys, json; print(json.load(sys.stdin)['domain']['$DOMAIN.$TLD']['cert_name'])")
DOCUMENT_ROOT=$ROOT_DIR/public_html
# Exit if the user associated with the domain doesn't exist.
if [ $(grep -c "^${OWNER}:" /etc/passwd) -eq 0 ]; then
printf "ERROR: The user '${OWNER}' associated with the domain '$DOMAIN.$TLD' doesn't exist.\n"
fi
# Exit if the user's 'www' directory doesn't exist.
if [ ! -d "/home/${OWNER}/www" ]; then
printf "ERROR: The directory '/home/${OWNER}/www' associated with the domain '$DOMAIN.$TLD' doesn't exist.\n"
exit 1
fi
# Exit if PHP-FPM pool associated with the user doesn't exist.
if [ ! -f "/etc/php/7.4/fpm/pool.d/${POOL}.conf" ]; then
printf "ERROR: The PHP-FPM pool '${POOL}' associated with the domain '$DOMAIN.$TLD' doesn't exist.\n"
exit 1
fi
printf "====================================== CONFIGURE VIRTUAL HOST =====================================\n"
printf "Create virtual host for domain '${DOMAIN}.${TLD}'\n"
printf "Configuration directory: ${ROOT_DIR}/\n"
printf "===================================================================================================\n"
read -e -i "${POOL}" -p "> PHP-FPM Pool to use: " input
POOL="${input:-$POOL}"
read -e -i "${SSL}" -p "> Configure for SSL (Y/n) ? " input
SSL="${input:-$SSL}"
if [[ $SSL == "Y" ]]; then
read -e -i "${CERT_NAME}" -p "> SSL Certificate to use: " input
CERT_NAME="${input:-$CERT_NAME}"
fi
read -e -i "${WWW}" -p "> Does this domain have a www prefix (Y/n) ? " input
WWW="${input:-$WWW}"
#FORCE="n"
if [[ $WWW == "Y" ]]; then
#FORCE="Y"
read -e -i "${FORCE_WWW}" -p "> Redirect non-www requests to 'www.' (Y/n) ? " input
FORCE_WWW="${input:-$FORCE_WWW}"
fi
read -e -i "${IGNORE_HTACCESS}" -p "> Ignore '.htaccess' files (Y/n) ? " input
IGNORE_HTACCESS="${input:-$IGNORE_HTACCESS}"
read -e -i "${CREATE_INFO}" -p "> Create info.php [f]ull|[p]art|[m]ove|[n]one ? " input
CREATE_INFO="${input:-$CREATE_INFO}"
#PROTECT_INFO="n"
if [[ ! $CREATE_INFO == "n" ]]; then
#PROTECT_INFO="Y"
read -e -i "${PROTECT_INFO}" -p "> Protect info.php with HTTP Auth (Y/n) ? " input
PROTECT_INFO="${input:-$PROTECT_INFO}"
else
PROTECT_INFO="n"
fi
read -e -i "${CREATE_ERROR}" -p "> Create error.php (Y/n) ? " input
CREATE_ERROR="${input:-$CREATE_ERROR}"
FORWARD="N"
if [[ "${DOMAIN}" == "techotaku" ]]; then
FORWARD="Y"
read -e -i "${FORWARD}" -p "> Forward '${DOMAIN}.${TLD}' to 'tech-otaku.com' (Y/n) ? " input
FORWARD="${input:-$FORWARD}"
fi
read -e -i "${PROTECT}" -p "> Prevent future deletion of this virtual host (Y/n) ? " input
PROTECT="${input:-$PROTECT}"
clear
printf "====================================== CONFIGURATION DETAILS ======================================\n"
printf "Virtual host: ${DOMAIN}.${TLD}\n"
printf "Root directory: ${ROOT_DIR}\n"
printf "Document Root: ${DOCUMENT_ROOT}\n"
printf "PHP-FPM Pool: ${POOL}\n"
printf "Configure for SSL: " && [[ $SSL == 'Y' ]] && printf "Yes\n" || printf "No\n"
[[ $SSL == "Y" ]] && printf "SSL Certificate Name: %s\n" ${CERT_NAME}
printf "Has 'www' prefix: " && [[ $WWW == 'Y' ]] && printf "Yes\n" || printf "No\n"
[[ $WWW == "Y" ]] && printf "Redirect non-www to 'www': " && ( [[ $FORCE_WWW == 'Y' ]] && printf "Yes\n" || printf "No\n" )
printf "Ignore '.htaccess' files: " && [[ $IGNORE_HTACCESS == 'Y' ]] && printf "Yes\n" || printf "No\n"
printf "Create 'info.php': ${CREATE_INFO}\n"
[[ ! $CREATE_INFO == "n" ]] && printf "Protect 'info.php' with HTTP Auth: " && ( [[ $PROTECT_INFO == 'Y' ]] && printf "Yes (user is '${AUTH_USER}')\n" || printf "No\n" )
printf "Create 'error.php': " && [[ $CREATE_ERROR == 'Y' ]] && printf "Yes\n" || printf "No\n"
printf "Protected from deletion: " && [[ $PROTECT == 'Y' ]] && printf "Yes\n" || printf "No\n"
printf "Owner: ${OWNER}\n"
printf "Group: ${GROUP}\n\n"
CONT="n"
read -e -i "${CONT}" -p "> Continue with configuration (Y/n) ? " input
CONT="${input:-$CONT}"
if [[ $CONT == "n" ]]; then
#if [ "$run" == n ] ; then
echo "INFORMATION: Configuration of virtual host '${DOMAIN}.${TLD}' cancelled."
exit 1
fi
TEMPLATES=/home/steve/templates
PHP_VERSION=$(php --version | awk '/^PHP/ {print $2}' | cut -d '.' -f 1-2)
mkdir -p "${ROOT_DIR}"
mkdir "${DOCUMENT_ROOT}"
# PROTECT VIRTUAL HOST CONFIG FROM DELETION
[[ "${PROTECT}" == "Y" ]] && touch "${ROOT_DIR}/.prevent-deletion"
# ERROR.PHP
[[ "${CREATE_ERROR}" == 'Y' ]] && cp "${TEMPLATES}/error-template.php" "${DOCUMENT_ROOT}/error.php"
# INFO.PHP
case "${CREATE_INFO}" in
"f")
cp "${TEMPLATES}/info/info-full.php" "${DOCUMENT_ROOT}/info.php"
;;
"p")
cp "${TEMPLATES}/info/info-part.php" "${DOCUMENT_ROOT}/info.php"
;;
"m")
cp "${TEMPLATES}/info/info-move.php" "${DOCUMENT_ROOT}/info.php"
;;
*)
esac
# PROTECT INFO.PHP WITH HTTP AUTH
[[ "${PROTECT_INFO}" == "Y" ]] && cp -r /home/steve/.htpasswds/.htdbm "${ROOT_DIR}/.htdbm"
# INDEX.HTML
cp "${TEMPLATES}/index-template.html" "${DOCUMENT_ROOT}/index.html"
VER=$(echo "FAC-"$(echo $(date +%a)$(date +%Y%m%d%H%M%S)$(date +%Z) | perl -ne 'print lc'))
sed -i 's/_TITLE_/'"${DOMAIN}"'.'"${TLD}"' | Coming Soon/g' "${DOCUMENT_ROOT}/index.html"
sed -i 's/_DOMAIN_/'"${DOMAIN}"'.'"${TLD}"'/g' "${DOCUMENT_ROOT}/index.html"
sed -i 's/_TAG_LINE_/The future home of something new/g' "${DOCUMENT_ROOT}/index.html"
sed -i 's/_VERSION_/'"${VER}"'/g' "${DOCUMENT_ROOT}/index.html"
# Apache Configuration File
if [[ $WWW == "Y" ]]; then
cp ${TEMPLATES}/nginx-apache/apache/-e-vhost-apache-template-www.conf "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
else
cp ${TEMPLATES}/nginx-apache/apache/-f-vhost-apache-template.conf "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
fi
if [[ "${PROTECT_INFO}" == 'Y' ]]; then
sed -i "/### BEGIN GENERATED .HTACCESS DIRECTIVES \[DO NOT DELETE THIS LINE\]/,/### END GENERATED .HTACCESS DIRECTIVES \[DO NOT DELETE THIS LINE\]/ { /### BEGIN GENERATED .HTACCESS DIRECTIVES \[DO NOT DELETE THIS LINE\]/{p; r ${TEMPLATES}/htaccess/non-wp-htaccess.conf
}; /### END GENERATED .HTACCESS DIRECTIVES \[DO NOT DELETE THIS LINE\]/p; d }" "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
fi
#UNIQUE=$(tr -dc A-Za-z0-9 </dev/urandom | tr '[:upper:]' '[:lower:]' | head -c 5)
APACHE_PORT=$(cat /etc/apache2/ports.conf | grep ^Listen | head -n1 | awk '{print $2}')
GENERATED=$(date "+%d/%m/%y at %H:%M:%S")
sed -i 's!_GENERATED_!'"${GENERATED}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_TEMPLATE_DIRECTORY_!'"${TEMPLATES}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_DOMAIN_!'"${DOMAIN}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_TLD_!'"${TLD}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_APACHE_PORT_!'"${APACHE_PORT}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
DIRECTIVE_VALUE="None"
if [[ "${IGNORE_HTACCESS}" == 'n' ]]; then
DIRECTIVE_VALUE="All"
fi
sed -i 's!_ALLOWOVERRIDE_!'"AllowOverride ${DIRECTIVE_VALUE}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_ALLOWOVERRIDELIST_!'"AllowOverrideList ${DIRECTIVE_VALUE}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's/_POOL_/'"${POOL}"'/g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_DOCUMENT_ROOT_!'"${DOCUMENT_ROOT}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_ROOT_DIRECTORY_!'"${ROOT_DIR}"'!g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's/_AUTH_USER_/'"${AUTH_USER}"'/g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's/_CERT_NAME_/'"${CERT_NAME}"'/g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
#sed -i 's/_UNIQUE_/'"${UNIQUE}"'/g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's/_UNIQUE_/'"${DOMAIN}.${TLD}"'/g' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
# Nginx Configuration File
if [[ $SSL == "Y" ]]; then
if [[ $WWW == "Y" ]]; then
# ssl:YES, www:YES, force-www:YES
if [[ $FORCE_WWW == "Y" ]]; then
cp ${TEMPLATES}/nginx-apache/nginx/-a-vhost-nginx-template-ssl-www-force.conf "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
# else
# cp ${TEMPLATES}/php-fpm/-b-vhost-config-template-ssl-www.conf "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
fi
else
# ssl:YES, www:NO, force-www:NO
cp ${TEMPLATES}/nginx-apache/nginx/-c-vhost-nginx-template-ssl.conf "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
fi
else
if [[ $WWW == "Y" ]]; then
if [[ $FORCE_WWW == "Y" ]]; then
# ssl:NO, www:YES, force-www:YES
cp ${TEMPLATES}/nginx-apache/nginx/-d-vhost-nginx-template-www-force.conf "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
# else
# cp ${TEMPLATES}/php-fpm/-e-vhost-config-template-www.conf "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
fi
else
# ssl:NO, www:NO, force-www:NO
cp ${TEMPLATES}/nginx-apache/nginx/-f-vhost-nginx-template.conf "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
fi
fi
IP=$(hostname -I | awk '{ print $1 }')
sed -i 's!_GENERATED_!'"${GENERATED}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_TEMPLATE_DIRECTORY_!'"${TEMPLATES}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_DOMAIN_!'"${DOMAIN}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_TLD_!'"${TLD}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_DOCUMENT_ROOT_!'"${DOCUMENT_ROOT}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_IP_!'"${IP}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's!_APACHE_PORT_!'"${APACHE_PORT}"'!g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's/_CERT_NAME_/'"${CERT_NAME}"'/g' "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf"
#exit
# FOR DOMAIN TECHOTAKU.COM
if [[ "${DOMAIN}" == "techotaku" && "${FORWARD}" == "Y" ]]; then
sed -i "/# Redirect non-www requests to 'www.'/ {
r ${TEMPLATES}/forward-domain-techotaku.conf
d
}" "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i '/RewriteCond %{HTTP_HOST} !\^www\\./d' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
sed -i 's/https:\/\/www.%{HTTP_HOST}%{REQUEST_URI}/https:\/\/www.tech-otaku.com%{REQUEST_URI}/' "/etc/apache2/sites-available/${DOMAIN}.${TLD}.conf"
[ -f "${ROOT_DIR}/.htdbm" ] && rm "${ROOT_DIR}/.htdbm"
[ -f "${DOCUMENT_ROOT}/error.php" ] && rm "${DOCUMENT_ROOT}/error.php"
#[ -f "${DOCUMENT_ROOT}/index.html" ] && rm "${DOCUMENT_ROOT}/index.html"
[ -f "${DOCUMENT_ROOT}/info.php" ] && rm "${DOCUMENT_ROOT}/info.php"
[ -f "${DOCUMENT_ROOT}/.htaccess" ] && rm "${DOCUMENT_ROOT}/.htaccess"
echo "<!-- Generated on $GENERATED -->" > "${DOCUMENT_ROOT}/index.html"
fi
#if [[ "${DOMAIN}.${TLD}" == *"barrieward.com" ]]; then
chown root:"${GROUP}" "${ROOT_DIR}"
chmod 755 "${ROOT_DIR}"
if [ -f "${ROOT_DIR}/.htdbm" ]; then
chown "${OWNER}":"${GROUP}" "${ROOT_DIR}/.htdbm"
chmod 440 "${ROOT_DIR}/.htdbm"
fi
if [ -f "${ROOT_DIR}/.prevent-deletion" ]; then
chown root:root "${ROOT_DIR}/.prevent-deletion"
chmod 400 "${ROOT_DIR}/.prevent-deletion"
fi
find "${DOCUMENT_ROOT}"/. -type d -exec chmod 750 {} +
find "${DOCUMENT_ROOT}"/. -type f -exec chmod 640 {} +
chown -R "${OWNER}":"${GROUP}" "${DOCUMENT_ROOT}"
chmod g+s "${DOCUMENT_ROOT}"
#setfacl -Rdm g:www-data:rx "${DOCUMENT_ROOT}"
setfacl -Rdm g:"${GROUP}":rx "${DOCUMENT_ROOT}"
#fi
printf "INFORMATION: Enabling domain (Apache) '${DOMAIN}.${TLD}'\n"
a2ensite "${DOMAIN}.${TLD}.conf"
printf "INFORMATION: Enabling domain (Nginx) '${DOMAIN}.${TLD}'\n"
ln -s "/etc/nginx/sites-available/${DOMAIN}.${TLD}.conf" "/etc/nginx/sites-enabled/${DOMAIN}.${TLD}.conf"
printf "INFORMATION: Restarting Apache\n"
systemctl reload apache2
printf "INFORMATION: Restarting Nginx\n"
systemctl reload nginx
#printf "INFORMATION: Restarting php${PHP_VERSION}-fpm\n"
#systemctl restart php"${PHP_VERSION}"-fpm
#echo ""
#echo "IMPORTANT: '${DOCUMENT_ROOT}/info.php' is protected by HTTP Authentication."
#echo "Username: chiaki"
#echo ""
if [[ $SSL == "Y" ]]; then
echo ""
echo "WARNING: This virtual host has been configured to rewrite all requests to HTTPS."
echo "To avoid a redirect loop ensure ${DOMAIN}.${TLD} is paused – not active – on Cloudflare."
echo ""
fi
if [ ! -f "/etc/php/${PHP_VERSION}/fpm/pool.d/${POOL}.conf" ]; then
echo ""
echo "WARNING: The pool '/etc/php/${PHP_VERSION}/fpm/pool.d/${POOL}.conf' does not exist and needs"
echo "to be created in order for this virtual host to function correctly."
echo ""
fi
| true |
18f23824fbfa8d448bb718709b3eafd0dcb4b8f0 | Shell | tylerztl/fabric-sdk-go | /scripts/start_network.sh | UTF-8 | 1,864 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Copyright Ziggurat Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
cd artifacts/network
# Parse commandline args
while getopts "h?m:c:t:d:f:s:l:" opt; do
case "$opt" in
h|\?)
printHelp
exit 0
;;
m) MODE=$OPTARG
;;
esac
done
# Print the usage message
function printHelp () {
echo "Usage: "
echo " runSolo.sh -m up|down|restart"
echo " - 'up' - bring up the network with docker-compose up"
echo " - 'down' - clear the network with docker-compose down"
echo " - 'restart' - restart the network"
}
function dkcl(){
CONTAINER_IDS=$(docker ps -aq)
echo
if [ -z "$CONTAINER_IDS" -o "$CONTAINER_IDS" = " " ]; then
echo "========== No containers available for deletion =========="
else
docker rm -f $CONTAINER_IDS
fi
echo
}
function dkrm(){
DOCKER_IMAGE_IDS=$(docker images | grep "dev\|none\|test-vp\|peer[0-9]-" | awk '{print $3}')
echo
if [ -z "$DOCKER_IMAGE_IDS" -o "$DOCKER_IMAGE_IDS" = " " ]; then
echo "========== No images available for deletion ==========="
else
docker rmi -f $DOCKER_IMAGE_IDS
fi
echo
}
function networkUp() {
echo
echo Start the network
docker-compose up -d
echo
}
function networkDown() {
echo
echo teardown the network and clean the containers and intermediate images
docker-compose down --volumes --remove-orphans
if [ "$MODE" != "restart" ]; then
dkcl
dkrm
fi
}
function networkRestart() {
networkDown
networkUp
echo
}
#Create the network using docker compose
if [ "${MODE}" == "up" ]; then
networkUp
elif [ "${MODE}" == "down" ]; then ## Clear the network
networkDown
elif [ "${MODE}" == "restart" ]; then ## Restart the network
networkRestart
else
printHelp
exit 1
fi
| true |
f1a932cc5da4d3d9842be72aa8cbb3c507a64920 | Shell | jfzo/PUCV-projects | /textos esquizofrenia/data/main.sh | UTF-8 | 852 | 3.515625 | 4 | [] | no_license | #!/bin/bash
function create_links {
rm -f training testing
ln -s $1 training
ln -s $2 testing
}
function execute_experimentation {
# A,C,D,F,I,N,P,R,S,V,W,Z
rm -f testing/results_$1.log
./build_vector_representation.sh $1
python ../src/classify.py --tr training --ts testing -o testing -r 10 -k 3 -l testing/results_$1.log
}
features='A,C,D,F,I,N,P,R,S,V,W,Z'
#features='tfidf'
if [ $# -eq 0 ]
then
echo "Usage:$0 features_to_use"
else
features=$1
fi
echo "Features to be used: "$features
# Generate each dataset
create_links "ab" Relato_C
execute_experimentation $features
create_links "ac" Relato_B
execute_experimentation $features
create_links "bc" Relato_A
execute_experimentation $features
# for each one and for each tuple of features execute: ./build_vector_representation.sh comma_separated_tuple_of_pos_tags
| true |
8930d9d8c6dcb1c82bc7a3b85e2b0092173c5748 | Shell | vfcastro/dropbox-sisop2 | /test/test3.sh | UTF-8 | 1,433 | 2.78125 | 3 | [] | no_license | CWD=`pwd`
FILE1=kubespray-2.10.3.zip
FILE2=inf01043trabalhofinal.zip
cd $CWD/test/server1
nohup ./server1 5000 1 localhost 4999 localhost 4998 &
sleep 2
cd $CWD/test/server2
nohup ./server2 4999 0 localhost 5000 localhost 4998 &
sleep 2
cd $CWD/test/server3
nohup ./server3 4998 0 localhost 5000 localhost 4999 &
sleep 2
cd $CWD/test/client-session1
nohup ./client user1 localhost 5000 6000 &
sleep 2
cd $CWD/test/client-session2
nohup ./client user1 localhost 5000 6001 &
sleep 2
cd $CWD
cp ./test/$FILE1 $CWD/test/client-session1/sync_dir_user1/
for i in `seq 30`
do
if [ ! -f $CWD/test/client-session2/sync_dir_user1/$FILE1 ]
then
sleep 1
fi
done
killall server1
sleep 5
cd $CWD
cp ./test/$FILE2 $CWD/test/client-session2/sync_dir_user1/
for i in `seq 30`
do
if [ ! -f $CWD/test/client-session1/sync_dir_user1/$FILE2 ]
then
sleep 1
fi
done
md5sum $CWD/test/$FILE1
md5sum $CWD/test/client-session1/sync_dir_user1/$FILE1
md5sum $CWD/test/client-session2/sync_dir_user1/$FILE1
md5sum $CWD/test/server1/sync_dir_user1/$FILE1
md5sum $CWD/test/server2/sync_dir_user1/$FILE1
md5sum $CWD/test/server3/sync_dir_user1/$FILE1
md5sum $CWD/test/$FILE2
md5sum $CWD/test/client-session1/sync_dir_user1/$FILE2
md5sum $CWD/test/client-session2/sync_dir_user1/$FILE2
md5sum $CWD/test/server2/sync_dir_user1/$FILE2
md5sum $CWD/test/server3/sync_dir_user1/$FILE2
killall server2 server3 client
exit | true |
4576897526f81c8b1e10b52fd35f7f99463e2655 | Shell | kieranjol/vimeo | /dvdall.sh | UTF-8 | 122 | 2.703125 | 3 | [] | no_license | #!/bin/bash -x
sourcepath="$(dirname "$1")"
for f in "${sourcepath}"/*.mov "${sourcepath}"/*.MOV
do
./dvd.sh "$f"
done | true |
95e4281d4d49aec3f86ccfd77e7094cbb685ccfd | Shell | temptemp3/transltr.sh | /transltr.sh | UTF-8 | 2,084 | 3.703125 | 4 | [
"BSD-2-Clause-Patent"
] | permissive | #!/bin/bash
## transltr
## version 0.0.1 - initial
##################################################
. ${SH2}/error.sh # error handling
error "true" # show errors
. ${SH2}/cecho.sh # colored echo
. ${SH2}/build.sh # builder
. ${SH2}/aliases/commands.sh # commands
file-mime-encoding() { { local infile ; infile="${1}" ; }
file --mime-encoding ${infile} | cut '-d:' '-f2' | sed 's/\s//g'
}
setup-translation() {
ja=$( echo ${line} | cut '-d ' '-f2' )
en=$( echo ${line} | cut '-d ' '-f3-' )
}
translate() {
local line
local en
local js
join {JP,EN}.txt | while read -r line
do
setup-translation
echo "s/${en}/${ja}/g"
done
}
transltr-translate-payload() {
sed -f ${temp} ${infile} > $( dirname ${infile} )/$( basename ${infile} .html )-jp.html
}
transltr-translate() { { local infiles ; infiles="${@}" ; }
local temp
temp=$( mktemp )
test -f "EN.txt" || { "error EN.txt not present" ; false ; }
test "$( file-mime-encoding EN.txt )" = "utf-8" || { "error JP.txt not in utf-8" ; false ; }
test -f "JP.txt" || { "error JP.txt not present" ; false ; }
test "$( file-mime-encoding JP.txt )" = "utf-8" || { "error JP.txt not in utf-8" ; false ; }
cecho yellow $( cp -v EN.txt ${temp}-en )
cecho yellow $( cp -v JP.txt ${temp}-ja )
translate > ${temp}
local infile
for infile in ${infiles}
do
echo ${infile}
${FUNCNAME}-payload ${infile}
done
cecho yellow "$( rm -v ${temp}* )"
}
transltr-build() {
build=build
build true
}
transltr-true() {
true
}
transltr-help() {
cat << EOF
transltr
transltr translate
INPUT
EN.txt English
JP.txt Japanese
1 - infile.html
Output
infile-jp.html
EOF
}
transltr() {
commands
}
##################################################
if [ ! ]
then
true
else
exit 1 # wrong args
fi
##################################################
transltr ${@}
##################################################
## generated by create-stub2.sh v0.1.2
## on Mon, 08 Jul 2019 15:24:44 +0900
## see <https://github.com/temptemp3/sh2>
##################################################
| true |
21819c51ec3d87178e5e6552071f0eecd35422f8 | Shell | ThatHg/valheim-backup | /backup-valheim.sh | UTF-8 | 978 | 3.90625 | 4 | [] | no_license | #!/bin/bash
save_directory=$1
world_directory=$2
now=$(date +'%Y%m%d%H%M%S')
backup_filename="valheim_worlds_${now}.tar.gz"
if [ -z "$save_directory" ]
then
save_directory=$(pwd)
echo "Save directory not set, using current pwd ($save_folder)."
fi
if [ -z "$world_directory" ]
then
world_directory="~/.config/unity3d/IronGate/Valheim/worlds"
echo "World directory not set, using default ($world_directory)."
fi
if [ ! -d "$save_directory" ]
then
echo "Save directory $save_directory DOES NOT exists."
echo "Usage: $0 [save_directory] [world_directory]"
exit 1
fi
if [ ! -d "$world_directory" ]
then
echo "World directory $world_directory DOES NOT exists."
echo "Usage: $0 [save_directory] [world_directory]"
exit 1
fi
cd "$save_directory"
tar -czf "$backup_filename" "$world_directory" > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Backup SUCCESS, created at: ${save_directory}/${backup_filename}"
else
echo "Backup FAILED"
fi | true |
27779e371de4c6d8a80abdcd144570cbb33cd6f5 | Shell | anyu/bash-scripts | /img-sweep.sh | UTF-8 | 1,342 | 4.59375 | 5 | [] | no_license | #!/usr/bin/env bash
# Script for batch organizing photos
set -euo pipefail
NAME=${1:-img}
main() {
create_new_dir || true
rename_files || true
}
create_new_dir() {
cd "$HOME"/Downloads
initial_item_count=$(find . -maxdepth 1 -mindepth 1 -not -path '*/\.*' | wc -l)
num_modified=0
new_dir=$(date +"%Y%m%d")
mkdir $new_dir
}
rename_files() {
echo -e "\nNumber of items in Downloads: $initial_item_count"
for file in *; do
file_type=${file##*.}
shopt -s nocasematch
if [[ $file_type == 'JPEG'* ]] || [[ $file_type == 'JPG'* ]] || [[ $file_type == 'HEIC'* ]] || [[ $file_type == 'MOV'* ]]; then
date_modified=$(date -r "$file" +%Y%m%d)
new_name=2022_02_26_${NAME}-$RANDOM.${file_type}
if [[ ! -e ~/Downloads/$new_dir/$new_name ]]; then
((num_modified++))
echo "Renaming $file to $new_name..."
mv -i "$file" "$new_dir"/"$new_name" # -i prompt as a redundant safety check
else
echo "$new_name already exists. Skipping rename."
fi
shopt -u nocasematch
else
echo "$file is not a JPEG, HEIC, or MOV. Skipping rename."
continue
fi
done
echo -e "\\nModified $num_modified files\\n"
}
main "${@}"
# Considered commands:
# ls | sort -n Sort numerically
# mv -vn Move with no-clobber + verbose; exits 0
| true |
b36fe231557d03c73b4cfd78cc2c12a2ecf85b86 | Shell | cyberpower678/freenas | /src/freenas/usr/local/etc/rc.d/smartd-daemon | UTF-8 | 1,457 | 3.625 | 4 | [] | no_license | #!/bin/sh
# $FreeBSD$
#
# PROVIDE: smartd-daemon
# REQUIRE: LOGIN
# KEYWORD: shutdown nojail
#
# Define these smartd_daemon_* variables in one of these files:
# /etc/rc.conf
# /etc/rc.conf.local
# /etc/rc.conf.d/smartd_daemon
#
# DO NOT CHANGE THESE DEFAULT VALUES HERE
. /etc/rc.subr
name=smartd_daemon
rcvar=smartd_daemon_enable
load_rc_config smartd_daemon
: ${smartd_daemon_enable:="NO"}
required_files=${smartd_daemon_config:="/usr/local/etc/smartd.conf"}
pidfile=${smartd_daemon_pidfile:="/var/run/smartd-daemon.pid"}
command="/usr/local/sbin/smartd"
command_args="-c ${required_files} -p ${pidfile}"
extra_commands="reload report"
reload_cmd="smartd_daemon_reload"
report_cmd="smartd_daemon_report"
start_precmd=smartd_daemon_prestart
start_cmd=smartd_daemon_start
smartd_daemon_prestart()
{
case "${smartd_daemon_flags}" in
-p*|*-p*)
err 1 'smartd_daemon_flags includes the -p option, use smartd_daemon_pidfile instead'
;;
esac
}
smartd_daemon_start()
{
check_startmsgs && echo "Starting $name."
/usr/sbin/daemon -f ${command} -n ${smartd_daemon_flags} ${command_args}
}
smartd_daemon_reload()
{
local status
if ! status=`run_rc_command status 2>&1`; then
echo $status
return 1
fi
echo 'Reloading smartd.'
kill -HUP $rc_pid
}
smartd_daemon_report()
{
local status
if ! status=`run_rc_command status 2>&1`; then
echo $status
return 1
fi
echo 'Checking SMART devices now.'
kill -USR1 $rc_pid
}
run_rc_command "$1"
| true |
66110547ec9d9167d19f51ad6ed87f457ee383c6 | Shell | image-tester/mongohq-cli | /install.sh | UTF-8 | 2,463 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# For installing the MongoHQ CLI tool
#
installMongoHQCli() {
workingdir="/tmp/mongohq-cli"
mkdir -p $workingdir
unamestr=`uname -sm`
if [[ "$unamestr" == 'Linux x86_64' ]]; then
curl https://mongohq-cli.s3.amazonaws.com/builds/master/linux/amd64/mongohq -o $workingdir/mongohq
curl https://mongohq-cli.s3.amazonaws.com/builds/master/linux/amd64/checksum -o $workingdir/checksum
elif [[ "$unamestr" == 'Darwin x86_64' ]]; then
curl https://mongohq-cli.s3.amazonaws.com/builds/master/darwin/amd64/mongohq -o $workingdir/mongohq
curl https://mongohq-cli.s3.amazonaws.com/builds/master/darwin/amd64/checksum -o $workingdir/checksum
else
unamestr=`uname -s`
if [[ "$unamestr" == 'Linux' ]]; then
curl https://mongohq-cli.s3.amazonaws.com/builds/master/linux/386/mongohq -o $workingdir/mongohq
curl https://mongohq-cli.s3.amazonaws.com/builds/master/linux/386/checksum -o $workingdir/checksum
elif [[ "$unamestr" == 'Darwin' ]]; then
curl https://mongohq-cli.s3.amazonaws.com/builds/master/darwin/386/mongohq -o $workingdir/mongohq
curl https://mongohq-cli.s3.amazonaws.com/builds/master/darwin/386/checksum -o $workingdir/checksum
else
echo "We currently only build the CLI for Linux and MacOSX. To request builds for another platform, email support@mongohq.com."
exit 1
fi
fi
hostedChecksum=`cat $workingdir/checksum`
localChecksum=`shasum $workingdir/mongohq | awk '{print $1}'`
if [[ "$hostedChecksum" != "$localChecksum" ]]; then
echo "Could not validate checksum of binary. Please try again."
exit 1
fi
chmod 555 $workingdir/mongohq
if [[ -w "/usr/local/bin/mongohq" ]]; then
mv $workingdir/mongohq /usr/local/bin/mongohq
else
echo "Please enter your sudo password to move the document to /usr/local/bin/mongohq:"
sudo mv $workingdir/mongohq /usr/local/bin/mongohq
fi
echo ""
echo "Install complete. To get started, run:"
echo ""
echo " mongohq deployments "
echo " mongohq --help "
echo ""
echo "This application is still in beta, and still actively changing. Please test appropriately."
echo ""
echo "For documentation on the CLI, please see: http://docs.mongohq.com/getting-started/cli.html"
}
echo ""
echo ""
echo "We are installing the MongoHQ CLI to /usr/local/bin/mongohq. The open sourced code for the CLI is available at https://github.com/MongoHQ/mongohq-cli."
echo ""
echo ""
installMongoHQCli
| true |
07049f73f0d7b368d936b17f05e9e1f2093d0017 | Shell | ahemaid/vocol | /VoCol Archive/Vagrant/BoxFiles/bootstrap.sh | UTF-8 | 6,040 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# update system and install tools like below :
sudo apt-get update
sudo apt-get --yes install apache2
sudo apt-get -y install wget
sudo apt-get -y install curl
sudo apt-get -y install unzip
# TODO on SUSE: zypper install java-1_7_0-openjdk-devel
# TODO on SLES: first run zypper addrepo http://download.opensuse.org/repositories/Java:/Factory/SLE_11_SP3/Java:Factory.repo
sudo apt-get -y install openjdk-7-jdk
# TODO on SLES: first run zypper addrepo http://download.opensuse.org/repositories/devel:/tools:/scm/SLE_11_SP3/devel:tools:scm.repo
sudo apt-get -y install git
sudo apt-get -y install python
sudo apt-get -y install python-pip
sudo apt-get -y install make
# TODO on SUSE: zypper install python-PyGithub
# TODO on SLES: first run zypper addrepo http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo
sudo pip install PyGithub
# TODO at some point, when no root permission is necessary any more, execute the further commands as user "vagrant"
# setup the system variables
export LC_ALL=en_US.UTF-8
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/
echo "export LC_ALL=en_US.UTF-8" >> .bashrc
echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/" >> .bashrc
echo "export JENAROOT=/usr/local/bin/apache-jena-2.12.0" >> .bashrc
echo "export PATH=$PATH:/usr/local/bin/provToolbox/bin:/usr/local/bin/apache-jena-2.12.0/bin" >> .bashrc
echo "update-alternatives --set java /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java" >> .bashrc
#download and unzip appache-jena
curl -O http://apache.websitebeheerjd.nl//jena/binaries/apache-jena-2.12.1.zip
unzip apache-jena-2.12.1.zip
#install latest version of rapper
curl -O http://download.librdf.org/source/raptor2-2.0.15.tar.gz
tar -zxvf raptor2-2.0.15.tar.gz
cd raptor2-2.0.15
# TODO on SLES: first run zypper addrepo http://download.opensuse.org/repositories/home:/tanty:/openSUSEBackports/SLE_11_SP3/home:tanty:openSUSEBackports.repo
sudo apt-get install libxml2-dev libxslt1-dev python-dev
./configure
make
sudo make install
sudo apt-get -y install raptor2-utils
cd ..
#download and unzip google_appengine
curl -O https://storage.googleapis.com/appengine-sdks/featured/google_appengine_1.9.17.zip
unzip google_appengine_1.9.17.zip
#clone repositories like below
git clone https://github.com/rvguha/schemaorg.git
git clone https://github.com/mobivoc/mobivoc.git
git clone https://github.com/mobivoc/vocol.git
#download and unzip Jena Fuseki
curl -O http://mirror.dkd.de/apache//jena/binaries/jena-fuseki-1.1.1-distribution.tar.gz
tar xf jena-fuseki-1.1.1-distribution.tar.gz
#move libraries of appache-jena to src folder of HTML Documentation Generator
sudo mv apache-jena-2.12.1/lib/* vocol/HtmlGenerator/src/
#go to java source file of HTML Documentation Generator
cd vocol/HtmlGenerator/src/
#Compile HTML Documentation Generator
sudo javac -cp .:jena-arq-2.12.1.jar:jena-core-2.12.1.jar:jena-iri-1.1.1.jar:log4j-1.2.17.jar:slf4j-api-1.7.6.jar:xercesImpl-2.11.0.jar:xml-apis-1.4.01.jar HtmlGenerator.java
#run HTML Documentation Generator (now done by vocolJob.sh; see below)
# sudo java -cp .:jena-arq-2.12.1.jar:jena-core-2.12.1.jar:jena-iri-1.1.1.jar:log4j-1.2.17.jar:slf4j-api-1.7.6.jar:xercesImpl-2.11.0.jar:xml-apis-1.4.01.jar HtmlGenerator ~/mobivoc/ChargingPoints.ttl ~/schemaorg/data/schema.rdfa ~/vocol/HtmlGenerator/Templates/template.html ~/schemaorg/docs/schemas.html ~/vocol/HtmlGenerator/Templates/schemasTemplate.html
#sudo java -cp "*:." vocol.HtmlGenerator.src.HtmlGenerator /home/vagrant/mobivoc/ChargingPoints.ttl /home/vagrant/schemaorg/data/schema.rdfa //home/vagrant/vocol/HtmlGenerator/Templates/template.html /home/vagrant/schemaorg/docs/schemas.html /home/vagrant/vocol/HtmlGenerator/Templates/schemasTemplate.html
#Configuring Apache
# TODO on SUSE the virtual hosts are in /etc/apache2/vhosts.d. On a running system, where the VHost configuration file exists already, it should be _adapted_ rather than overwritten.
sudo rm /etc/apache2/sites-enabled/000-default
sudo cp /home/vagrant/vocol/Vagrant/Apache/000-default.conf /etc/apache2/sites-enabled/000-default.conf
sudo a2enmod proxy
sudo a2enmod proxy_http
sudo a2enmod rewrite
# TODO also make sure that apache2 service is started on system startup (e.g. in the runlevel configuration)
sudo /etc/init.d/apache2 restart
#add a cronjob to excecute every 5 min
sudo apt-get -y install gnome-schedule
cat <(crontab -l) <(echo "*/5 * * * * bash $HOME/vocol/vocolJob.sh") | crontab -
#run Schema.org through Google_AppEngine
# TODO if you had to install Python 2.7 manually, you may have to run the *.py script by explicitly invoking "python27".
# Manual installation of Python 2.7 requires packages sqlite3-devel
# TODO instead of starting Google App Engine _here_ only, make sure that it is started on system startup, e.g. by an init script. Many distributions have an init script for "local services to start after everything else has been started"; e.g. on SUSE it's /etc/init.d/after.local. There, put something like the following:
# sudo -u mobivoc -i python2.7 ~mobivoc/google_appengine/dev_appserver.py ~mobivoc/schemaorg/app.yaml --skip_sdk_update_check &
# TODO for a perfect configuration (Christoph was too lazy for this) we should also shut down Google App Engine (e.g. by killing the Python process) from /etc/init.d/halt.local (that's the path on SUSE).
#go to java source file of HTML Documentation Generator
cd ~/jena-fuseki-1.1.1/
sudo chmod +x fuseki-server s-*
sudo touch /etc/init.d/RunMobivocTools
sudo chmod +x /etc/init.d/RunMobivocTools
sudo sh -c 'echo "/home/vagrant/google_appengine/dev_appserver.py /home/vagrant/schemaorg/app.yaml --skip_sdk_update_check &" >> /etc/init.d/RunMobivocTools'
sudo sh -c 'echo "FUSEKI_HOME=/home/vagrant/jena-fuseki-1.1.1 /home/vagrant/jena-fuseki-1.1.1/fuseki-server --update --file=/home/vagrant/mobivoc/ChargingPoints.ttl /myDataset &" >> /etc/init.d/RunMobivocTools'
sudo update-rc.d RunMobivocTools defaults
bash /etc/init.d/RunMobivocTools
| true |
3d317ed3eee25d15dbe7c6beaafd6e439afb7798 | Shell | weaver-sharethis/dotfiles | /bin/common/functions.sh | UTF-8 | 15,479 | 2.640625 | 3 | [] | no_license | cask_apps=(
# Browsers
brave-browser # Brave is so brave.
firefox # Firefox.
google-chrome # Google Chrome browser.
microsoft-edge # Microsoft Edge browser. By Microsoft.
# Communications
discord # Freeware chat/VoIP app, primarily for video game communities.
slack # Communications platform.
telegram-desktop # Secure instant messaging.
whatsapp # Simple, secure messaging with free phone calling.
zoomus # Video conferencing software.
# Dev
cyberduck # File transfers (FileZilla is evil)
docker # OS-level virtualization for containers.
gas-mask # Hosts file manager.
iterm2 # The best terminal software for Mac.
kitematic # Docker GUI.
"local" # WordPress development tool.
jetbrains-toolbox # JetBrains tools manager (mainly for PHPStorm).
phpstorm # The best PHP IDE there is.
postman # API interaction tool.
sequel-pro # The best database management tool.
sublime-text # Sublime Text is a cross-platform source code editor with a Python.
trailer # Github workflow menubar app.
visual-studio-code # Source code editor developed by Microsoft.
# Productivity
1password # Digital password manager and vault.
1password-cli # Command line tool for 1Password.
alfred # Spotlight replacement and productivity tool.
bartender # Organize your menu bar icons (NOTE: Dozer is a free alternative).
charles # HTTP proxy monitor. See all the traffic.
# rectangle # Move/resize windows. Based on Spectacle / written in Swift.
spectacle # Move/resize windows.
timing # Automatic time tracking.
tripmode # Controls which apps can access Internet connection.
# Misc
bitbar # Put the output from any script/program in your Mac OS X Menu Bar.
gfxcardstatus # Menu bar app to visualize current GPU and memory hogs.
google-photos-backup-and-sync # Google Photos backup and sync manager.
horos # Free, open medical image viewer.
minecraft # Minecraft game. Sometimes I need a mental break.
omnidisksweeper # Quickly find large, unwanted files and destroy them (manually).
steam # Steam gaming platform.
transmission # Free, open torrent client.
# Security
backblaze # Backup software.
malwarebytes # Anti-virus.
private-internet-access # VPN software.
# Video
vlc # Free, open cross-platform media player.
)
mac_store_apps=(
# 918207447 # Annotate - Capture and Share
409789998 # Twitter
)
brew_apps=(
awscli # Official Amazon AWS command-line interface.
awslogs # awslogs is a simple command line tool for querying Amazon CloudWatch logs.
composer # Dependency manager for PHP.
docker-compose # Isolated dev environments using Docker.
fd # Simple, fast and user-friendly alternative to find.
figlet # FIGlet is a program for making large letters out of ordinary text.
go # Golang (Open source programming language).
jq # jq is a lightweight and flexible command-line JSON processor.
mas # Mac App Store command-line interface.
node # Node.js. A platform built on V8 for network applications.
oracle-sdk # Java Software Development Kit (for running things like Minecraft)
php # PHP (Latest).
php@7.2 # PHP (7.2).
python # Python.
ruby # Ruby.
tldr # Simplified and community-driven man pages.
tree # Display directory trees.
watchman # Watch files and take action when they change.
wget # Internet file retriever (curl alternative).
zsh # UNIX Shell. Way better than Bash.
)
# =====
# Display a colored "section" header
# =====
section_header() {
printf "%s\n" "" "${magenta}===${normal} $1 ${magenta}===${normal}" ""
}
# =====
# Add dotfiles function
# =====
dotfiles() {
/usr/bin/git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME $@
}
# =====
# Generate SSH Key
# =====
generate_ssh_key() {
read -p "Shall we generate an SSH key? (${ul}Y${normal}|n)" setup_create_ssh_key
setup_create_ssh_key=${setup_create_ssh_key:-y}
if [[ ${setup_create_ssh_key} == "yes" ]] || [[ ${setup_create_ssh_key} == "Y" ]] || [[ ${setup_create_ssh_key} == "y" ]]; then
ssh-keygen -t rsa
echo "Please take the above output and add it to your GitHub/GitLab accounts."
echo "${blue}${ul}https://github.com/settings/keys${normal}"
echo "${blue}${ul}https://gitlab.com/profile/keys${normal}"
read -p "Press [Enter] to continue when you're ready..."
else
echo "Skipping..."
fi
}
# =====
# Generate SSH Key
# =====
set_vim_up() {
echo "Installing vim-plug (Vim plugin manager)..."
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
}
# =====
# XCode installation
# =====
install_xcode() {
read -p "Shall we run XCode install? (${ul}Y${normal}|n)" setup_xcode_install
setup_xcode_install=${setup_xcode_install:-y}
if [[ ${setup_xcode_install} == "yes" ]] || [[ ${setup_xcode_install} == "Y" ]] || [[ ${setup_xcode_install} == "y" ]]; then
echo "Setting up XCode. This may require your password..."
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
else
echo "Skipping..."
fi
}
# =====
# XCode CLI Tools installation
# =====
install_xcode_cli_tools() {
read -p "Shall we install XCode command line tools? (${ul}Y${normal}|n)" setup_xcode_cli_install
setup_xcode_cli_install=${setup_xcode_cli_install:-y}
if [[ ${setup_xcode_cli_install} == "yes" ]] || [[ ${setup_xcode_cli_install} == "Y" ]] || [[ ${setup_xcode_install} == "y" ]]; then
echo "Setting up XCode command line tools..."
if xcode-select -p &>/dev/null; then
echo "XCode command line tools already installed! Skipping..."
else
echo "XCode command line tools not found. Installing..."
xcode-select --install
fi
else
echo "Skipping..."
fi
}
# =====
# Update Homebrew
# =====
update_homebrew() {
echo "Updating Homebrew..."
}
# =====
# Install and configure Homebrew
# =====
install_and_configure_homebrew() {
echo "Checking to see if we have Homebrew installed..."
if command -v brew &>/dev/null; then
echo "Homebrew installed. Skipping installation..."
else
echo "Homebrew not found. Installing..."
bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
fi
update_homebrew
}
# =====
# Install and configure Git
# =====
install_and_configure_git() {
if command -v git &>/dev/null; then
echo "Git command found. Skipping..."
else
echo "Git not found. Installing..."
brew install git
echo "Set Git default config values..."
git config --global user.name $setup_name
git config --global user.email $setup_email
git config --global alias.logline 'log --graph --pretty=format:'"'"'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset'"'"' --abbrev-commit'
git config --global alias.line '!git diff --numstat HEAD~ | awk '"'"'{a+=$1;next;}; END{print a;}'"'"''
echo "Installing Brew Git utilities..."
brew install git-extras
brew install git-flow
fi
}
# =====
# Install Homebrew baseline apps
# =====
install_brew_baseline_apps() {
echo "Install Brew baseline apps..."
brew install ${brew_apps[@]}
# Link kegs
echo "Linking Brew kegs..."
brew link --overwrite docker-compose
brew link libimobiledevice
brew link --overwrite php@7.2
brew link --overwrite python
}
# =====
# Update Homebrew baseline apps
# =====
update_brew_baseline_apps() {
echo "Update Brew baseline apps..."
brew upgrade ${brew_apps[@]}
brew link --overwrite docker-compose
}
# =====
# Install Dotfiles
# =====
install_dotfiles() {
read -p "Shall we run install Dotfiles? (${ul}Y${normal}|n)" setup_install_dotfiles
setup_install_dotfiles=${setup_install_dotfiles:-y}
if [[ ${setup_install_dotfiles} == "yes" ]] || [[ ${setup_install_dotfiles} == "Y" ]] || [[ ${setup_install_dotfiles} == "y" ]]; then
echo "Installing Dotfiles from repository..."
cd ~
git clone --bare --recursive git@github.com:$setup_github_user/dotfiles.git .dotfiles
dotfiles checkout
else
echo "Skipping..."
fi
}
# =====
# Update Dotfiles
# =====
update_dotfiles() {
read -p "Shall we update Dotfiles? (${ul}Y${normal}|n)" setup_update_dotfiles
setup_update_dotfiles=${setup_update_dotfiles:-y}
if [[ ${setup_update_dotfiles} == "yes" ]] || [[ ${setup_update_dotfiles} == "Y" ]] || [[ ${setup_update_dotfiles} == "y" ]]; then
echo "Pulling latest down from Dotfiles repository..."
cd ~
dotfiles pull
else
echo "Skipping..."
fi
}
# =====
# Install Gulp
# =====
install_gulp() {
if command -v gulp &>/dev/null; then
echo "Gulp command found. Skipping..."
else
echo "Gulp command not found. Installing..."
npm i -g gulp-cli
fi
}
# =====
# Install Pure Prompt
# =====
install_pure_prompt() {
if npm list -g pure-prompt --depth=0 &>/dev/null; then
echo "Pure prompt found. Skipping..."
else
echo "Pure prompt not found. Installing..."
npm i -g pure-prompt
fi
}
# =====
# Install ColorLS
# =====
install_colorls() {
echo "ColorLS most likely needs your password."
if sudo gem list colorls | grep colorls &>/dev/null; then
echo "ColorLS found. Skipping..."
else
echo "ColorLS not found. Installing..."
sudo gem install colorls
fi
}
# =====
# Install CocoaPods
# =====
install_cocoapods() {
echo "CocoaPods most likely needs your password."
if sudo gem list cocoapods | grep cocoapods &>/dev/null; then
echo "CocoaPods found. Skipping..."
else
echo "CocoaPods not found. Installing..."
sudo gem install -n /usr/local/bin cocoapods
fi
}
# =====
# Install OhMyZSH
# =====
install_ohmyzsh() {
read -p "Shall we run install Oh My ZSH? (${ul}Y${normal}|n)" setup_install_ohmyzsh
setup_install_ohmyzsh=${setup_install_ohmyzsh:-y}
if [[ ${setup_install_ohmyzsh} == "yes" ]] || [[ ${setup_install_ohmyzsh} == "Y" ]] || [[ ${setup_install_ohmyzsh} == "y" ]]; then
echo "Running installer..."
cd ~
bash -c .oh-my-zsh/tools/install.sh
else
echo "Skipping..."
fi
if echo $SHELL | grep -i zsh &>/dev/null; then
echo "ZSH already set as shell. Skipping..."
else
echo "Setting ZSH as shell..."
chsh -s /bin/zsh
fi
}
# =====
# Update OhMyZSH
# =====
update_ohmyzsh() {
read -p "Shall we upgrade Oh My ZSH? (${ul}Y${normal}|n)" setup_upgrade_ohmyzsh
setup_upgrade_ohmyzsh=${setup_upgrade_ohmyzsh:-y}
if [[ ${setup_upgrade_ohmyzsh} == "yes" ]] || [[ ${setup_upgrade_ohmyzsh} == "Y" ]] || [[ ${setup_upgrade_ohmyzsh} == "y" ]]; then
echo "Running upgrader..."
cd ~
env ZSH="$ZSH" sh "$ZSH/tools/upgrade.sh"
command rm -rf "$ZSH/log/update.lock"
else
echo "Skipping..."
fi
}
# =====
# Install Mac Apps via Homebrew Cask
# =====
function install_mac_apps() {
read -p "Shall we install applications? (${ul}Y${normal}|n)" setup_install_apps
setup_install_apps=${setup_install_apps:-y}
if [[ ${setup_install_apps} == "yes" ]] || [[ ${setup_install_apps} == "Y" ]] || [[ ${setup_install_apps} == "y" ]]; then
read -p "Where would you like to install applications? [${ul}/Applications/${normal}] " setup_app_dir
setup_app_dir=${setup_app_dir:-/Applications/}
echo "Installing applications to ${setup_app_dir}..."
brew cask install --appdir=$setup_app_dir ${cask_apps[@]}
brew cask alfred link
else
echo "Skipping..."
fi
}
# =====
# Update Mac Apps via Homebrew Cask
# =====
function update_mac_apps() {
read -p "Shall we update applications? (${ul}Y${normal}|n)" setup_update_apps
setup_update_apps=${setup_update_apps:-y}
if [[ ${setup_update_apps} == "yes" ]] || [[ ${setup_update_apps} == "Y" ]] || [[ ${setup_update_apps} == "y" ]]; then
read -p "Where would you like to install applications? [${ul}/Applications/${normal}] " setup_app_dir
setup_app_dir=${setup_app_dir:-/Applications/}
echo "Installing applications to ${setup_app_dir}..."
brew cask install --appdir=$setup_app_dir ${cask_apps[@]}
echo "Updating out of date casks..."
brew upgrade --cask
else
echo "Skipping..."
fi
}
# =====
# Install Mac App Store apps via Homebrew Cask
#
# NOTE: These must have already been installed/purchased.
# =====
function install_mac_store_apps() {
read -p "Shall we install App Store applications? (${ul}Y${normal}|n)" setup_install_macapps
setup_install_macapps=${setup_install_macapps:-y}
if [[ ${setup_install_macapps} == "yes" ]] || [[ ${setup_install_macapps} == "Y" ]] || [[ ${setup_install_macapps} == "y" ]]; then
echo "Installing App Store applications..."
mas install ${mac_store_apps[@]}
else
echo "Skipping..."
fi
}
# =====
# Update Mac App Store apps via Homebrew Cask
#
# NOTE: These must have already been installed/purchased.
# =====
function update_mac_store_apps() {
read -p "Shall we update App Store applications? (${ul}Y${normal}|n)" setup_update_macapps
setup_update_macapps=${setup_update_macapps:-y}
if [[ ${setup_update_macapps} == "yes" ]] || [[ ${setup_update_macapps} == "Y" ]] || [[ ${setup_update_macapps} == "y" ]]; then
echo "Installing and upgrading App Store applications..."
mas install ${mac_store_apps[@]}
mas upgrade
else
echo "Skipping..."
fi
}
# =====
# Set Mac preferences
# =====
function set_mac_preferences() {
echo "Disable Dashboard..."
defaults write com.apple.dashboard mcx-disabled -boolean YES
echo "Set Dock autohide..."
osascript -e 'tell application "System Events" to set the autohide of the dock preferences to true'
echo "Restarting Dock..."
killall Dock
}
# =====
# Update Dotfile submodules
# =====
function update_dotfile_submodules() {
echo "Updating Dotfile submodules..."
dotfiles submodule update --init --recursive
} | true |
aa55b1fbbae4b6b1e6295d0f8c56bc10cc5dcef4 | Shell | slabasan/dotfiles | /.byobu/usr/lib/byobu/release | UTF-8 | 1,769 | 3.65625 | 4 | [] | no_license | #!/bin/sh -e
#
# release: grab the os/distro release
# Copyright (C) 2008 Canonical Ltd.
#
# Authors: Dustin Kirkland <kirkland@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
PKG="byobu"
color 2>/dev/null || color() { true; }
if [ "$1" = "--detail" ]; then
cat /etc/issue
exit 0
fi
if [ -n "$DISTRO" ]; then
# skip down to the bottom
true
elif which lsb_release >/dev/null 2>&1; then
if [ "$1" = "--detail" ]; then
lsb_release -a 2>/dev/null
exit 0
fi
# If lsb_release is available, use it
r=$(lsb_release -s -d)
if echo "$r" | grep -qs "^Ubuntu .*\..*\..*$"; then
# Use the -d if an Ubuntu LTS
DISTRO="$r"
else
# But for other distros the description
# is too long, so build from -i and -r
i=$(lsb_release -s -i)
r=$(lsb_release -s -r)
DISTRO="$i $r"
fi
elif [ -r "/etc/palm-build-info" ]; then
# Palm Pre hack
DISTRO=$(grep "^PRODUCT_VERSION_STRING=Palm " /etc/palm-build-info | sed "s/^.*=Palm //")
elif [ -r "/etc/issue" ]; then
# Otherwise, grab part of /etc/issue, ideally the distro and version
DISTRO=$(grep -m1 "^[A-Za-z]" /etc/issue | sed "s/ [^0-9]* / /" | awk '{print $1 " " $2}')
else
DISTRO="Byobu"
fi
printf "$(color bold2)%s$(color -) " "$DISTRO"
| true |
f43377fd73c8e482fdd7beb6fc996f652db13b61 | Shell | lovejatps/linux | /static_ID.sh | UTF-8 | 843 | 3.3125 | 3 | [] | no_license | #!/bin/bash
##########################################
#
# Arguments: eth0 ip gateway netmask dns1 dns2
#
##########################################
p='/etc/sysconfig/network-scripts/ifcfg-'
a='ifcfg-'
b=$a$1
mac=`ip addr | egrep 'link/ether\s+(.+)\s+brd.+$' | sed 's/^\s*//g' | sed 's/\s*$//g' | cut -d' ' -f2`
cp $p$1 $b
sed -i 's/^BOOTPROTO=.*$/BOOTPROTO="static"/' $b
sed -i 's/^HWADDR=.*//g' $b
sed -i 's/^MACADDR=.*//g' $b
sed -i 's/IPADDR.*//g' $b
sed -i 's/GATEWAY=.*//g' $b
sed -i 's/DNS1=.*//g' $b
sed -i 's/DNS2=.*//g' $b
echo 'HWADDR='$mac >> $b
echo 'MACADDR='$mac >> $b
echo 'IPADDR='$2 >> $b
echo 'GATEWAY='$3 >> $b
if [ $4!='' ]
then
echo 'NETMASK='$4 >> $b
fi
if [ $5!='' ]
then
echo 'DNS1='$5 >> $b
fi
if [ $6!='' ]
then
echo 'DNS2='$6 >> $b
fi
cat $b
rm -f $p$1
cp -f $b $p$1
service network restart
| true |
58d0b4e31fbec99fb2e0d529ff7b7201a2e8c0c9 | Shell | danssion/txtTips | /linux_doc/doc/makeEnv/script/mcache.server | UTF-8 | 1,644 | 3.609375 | 4 | [] | no_license | #!/bin/sh
export LD_LIBRARY_PATH=$HOME/dev/lib/
ports="11211"
if [ x$2 = 'x' ]
then
if [ x$1 = 'x' ]
then
echo "Usage: $0 (start|stop|restart|status)"
exit;
fi
for port in $ports; do
$0 $1 $port
done
exit;
fi
port=$2
pid_file=$HOME/dev/log/memcached.$port.pid
user=$USER
bin_file=$HOME/dev/bin/memcached
stat_file=$HOME/dev/bin/memstat
tnum=0
test -f $pid_file && pid=`cat $pid_file` && test -n $pid && tnum=`ps -A | grep "^ *$pid" | wc -l`;
case "$1" in
start)
if [ $tnum = "1" ]
then
echo "Memcached @$port is running"
else
echo "Starting Memcached @$port"
$bin_file -d -p $port -U $port -P $pid_file -m128 -t1
fi
;;
stop)
if [ $tnum = "1" ]
then
echo "Shutting down Memcached @$port (pid:$pid)"
kill -3 $pid
rm $pid_file
else
echo "Memcached @$port is not running"
fi
;;
status)
if [ $tnum = "1" ]
then
$stat_file --servers=localhost:$port
else
echo "Memcached @$port is not running"
fi
;;
restart)
$0 stop $2
sleep 1
$0 start $2
;;
*)
echo "Usage: $0 (start|stop|restart|status)"
;;
esac
| true |
fe41f3475d30e57e49d7fe7f88c3bf3ca45cea08 | Shell | gbarrand/osc_vis_for_LHCb | /osc/amanip/mgr/build | UTF-8 | 521 | 3.203125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh -f
#set -x
build_args=
while test $# -ge 1 ; do build_args="${build_args} $1";shift; done
inlib_mgr=../../../inexlib/inlib/mgr
exlib_mgr=../../../inexlib/exlib/mgr
ourex_mgr=../../../inexlib/ourex/mgr
build_pack=aida-config
. ${ourex_mgr}/build_header
if [ ${build_app} = "yes" ] ; then
cppflags="${cppflags} -I../src/cpp"
cppflags="${cppflags} -I${inlib_mgr}/.."
objs=
app_exe=amanip-config
app_src=../applications/amanip_config.cxx
. ${inlib_mgr}/build_app
fi
. ${inlib_mgr}/build_clean
| true |
165873dfcf375f1a4e5fd117ec1e3cbd3281c1c0 | Shell | blindcant/bash | /copy-home-folder-between-servers.sh | UTF-8 | 783 | 2.796875 | 3 | [] | no_license | # https://stackoverflow.com/a/48010623
rsync --archive --executability --progress --stats --ignore-errors --human-readable \
--include=".gitconfig" --include=".bashrc" --include=".vimrc" \
--include=".bash_history" --include=".netrc" \
--include=".config" --include=".config/dconf" --include=".config/dconf/user" \
--include=".vscode" --include=".vscode/***" \ # Need to explicitly state the directory for inclusion other the exclude * will remove it.
--include="Development" --include="Development/***" \ # The triple * here means to copy everything recursively inside of this folder.
--include=".ssh" --include=".ssh/***" \
--exclude="*" \
/home/$USER/ $HOSTNAME:/home/$USER # The slash on the end of dhall/ means to copy the directory contents but excluding the dhall directory.
| true |
2b74bf8de0abe0c311849e161c26c2b8b6a9dffb | Shell | mnmnc/dot | /files/app/dat/x/stp/basic/exe/i2r | UTF-8 | 933 | 3.625 | 4 | [] | no_license | #!/usr/bin/env sh
# ii-reader
while test $# -gt 0; do
case "$1" in
-s|--server) II_SERVER="${2}"; shift 2;;
-c|--channel) II_CHANNEL="${2}"; shift 2;;
-n|--nick) II_NICK="${2}"; shift 2;;
-f|--friend) II_FRIEND="${2}"; shift 2;;
-h|--history) II_HISTORY="${2}"; shift 2;;
*) printf 'Invalid param: %s' "${1}"; exit 1;;
esac
done
II_SERVER="${II_SERVER:-irc.freenode.net}"
II_DIR="${II_DIR:-$HOME/app/dat/irc}"
II_HISTORY="${II_HISTORY:-25}"
if test -z "${II_CHANNEL}"; then
if ! test -z "${II_FRIEND}"; then
test -d "${II_DIR}/${II_SERVER}/${II_FRIEND}" || exit 1
tail -n "${II_HISTORY}" -f "${II_DIR}/${II_SERVER}/${II_FRIEND}/out"
else
test -d "${II_DIR}/${II_SERVER}" || exit 1
tail -n "${II_HISTORY}" -f "${II_DIR}/${II_SERVER}/out"
fi
else
test -d "${II_DIR}/${II_SERVER}/#${II_CHANNEL}" || exit 1
tail -n "${II_HISTORY}" -f "${II_DIR}/${II_SERVER}/#${II_CHANNEL}/out"
fi
| true |
7ee2489746ac16905d17c27c481113f1bbf1a8f1 | Shell | fdesjardins/config | /dotfiles/.zsh/functions.zsh | UTF-8 | 912 | 3 | 3 | [
"MIT"
] | permissive | insert_sudo () { zle beginning-of-line; zle -U "sudo " }
zle -N insert-sudo insert_sudo
insert_git_add () { zle -U "git add "; }
zle -N insert-git-add insert_git_add
insert_git_add_all () { zle -U "git add . --all"; }
zle -N insert-git-add-all insert_git_add_all
insert_git_commit_m () {
zle quote-line
zle beginning-of-line
zle -U "git commit -m "
}
zle -N insert-git-commit-m insert_git_commit_m
insert_git_push () { zle -U "git push " }
zle -N insert-git-push insert_git_push
create_alias () {
echo 'alias '$1'="'$2'"' >> ~/.zsh/aliases.zsh;
source ~/.zsh/aliases.zsh;
}
create_envvar () {
echo 'export '$1'="'$2'"' >> ~/.zsh/environment.zsh;
source ~/.zsh/environment.zsh;
}
bindkey '\e[3~' delete-char
bindkey "^[s" insert-sudo
bindkey '^H' backward-kill-word
bindkey '^[a' insert-git-add
bindkey '^[A' insert-git-add-all
bindkey '^[c' insert-git-commit-m
bindkey '^[p' insert-git-push
| true |
8fc7f3e274774308de5549881c365e04977f7489 | Shell | krlmlr-archive/r-snap-texlive | /texlive/texmf-dist/doc/fonts/hfbright/install.sh | ISO-8859-15 | 2,684 | 2.984375 | 3 | [
"LicenseRef-scancode-tex-live"
] | permissive | PFB="pfb/hfbr10.pfb pfb/hfbras8.pfb pfb/hfbrbx10.pfb pfb/hfbrsl10.pfb \
pfb/hfbrsy8.pfb pfb/hfbr17.pfb pfb/hfbras9.pfb pfb/hfbrmb10.pfb \
pfb/hfbrsl17.pfb pfb/hfbrsy9.pfb pfb/hfbr8.pfb pfb/hfbrbs10.pfb \
pfb/hfbrmi10.pfb pfb/hfbrsl8.pfb pfb/hfsltl10.pfb pfb/hfbr9.pfb \
pfb/hfbrbs8.pfb pfb/hfbrmi8.pfb pfb/hfbrsl9.pfb pfb/hftl10.pfb \
pfb/hfbras10.pfb pfb/hfbrbs9.pfb pfb/hfbrmi9.pfb pfb/hfbrsy10.pfb"
AFM="afm/hfbr10.afm afm/hfbras8.afm afm/hfbrbx10.afm afm/hfbrsl10.afm \
afm/hfbrsy8.afm afm/hfbr17.afm afm/hfbras9.afm afm/hfbrmb10.afm \
afm/hfbrsl17.afm afm/hfbrsy9.afm afm/hfbr8.afm afm/hfbrbs10.afm \
afm/hfbrmi10.afm afm/hfbrsl8.afm afm/hfsltl10.afm afm/hfbr9.afm \
afm/hfbrbs8.afm afm/hfbrmi8.afm afm/hfbrsl9.afm afm/hftl10.afm \
afm/hfbras10.afm afm/hfbrbs9.afm afm/hfbrmi9.afm afm/hfbrsy10.afm"
DVIPS="dvips/config.hfbright dvips/hfbright.map"
TEXMFMAIN=`kpsexpand '$TEXMFMAIN'`
TEXMFLOCAL=`kpsexpand '$TEXMFLOCAL'`
FONTS="$TEXMFLOCAL/fonts/type1/public/hfbright"
METRICS="$TEXMFLOCAL/fonts/afm/public/hfbright"
DVIPSDIR="$TEXMFLOCAL/dvips/config"
ALLESDA=true
echo "Test for existence of all files in pfb/ and dvips/"
for a in $PFB $AFM $DVIPS
do
if [ ! -f $a ]
then
ALLESDA=false
fi
done
if [ "$ALLESDA" != "true" ]
then
echo "Some files are missing: Generating them using generate.sh."
echo ""
echo "Now running generate.sh:"
echo ""
./generate.sh
echo ""
echo "generate.sh is ready."
echo ""
else
echo "All files existing."
fi
echo "Installing files below $TEXMFLOCAL/"
echo "Path $FONTS"
if [ ! -d $FONTS ]
then
echo "does not exist, creating it"
mkdir -p $FONTS
else
echo "already exists, reusing it (first clean it)"
rm -f $FONTS/*
fi
echo "Copy pfb/* to $FONTS"
cp -f pfb/* $FONTS
echo "Path $METRICS"
if [ ! -d $METRICS ]
then
echo "does not exist, creating it"
mkdir -p $METRICS
else
echo "already exists, reusing it (first clean it)"
rm -f $METRICS/*
fi
echo "Copy afm/* to $METRICS"
cp -f afm/* $METRICS
echo "Path $DVIPSDIR"
if [ ! -d $DVIPSDIR ]
then
echo "does not exist, creating it"
mkdir -p $DVIPSDIR
else
echo "already exists, reusing it"
fi
echo "Copy dvips/* to $DVIPSDIR"
cp -f dvips/* $DVIPSDIR
echo "Running texhash to update the TeX file database"
texhash
echo ""
echo "Now, you should add the line Map hfbright.map to the file updmap.cfg which"
echo "should be in $TEXMFMAIN/web2c/ or"
echo "$TEXMFLOCAL/web2c/. Then, run updmap."
| true |
21cb7fdf1ba229ca8f80a2353a9955026438085f | Shell | venoodkhatuva12/Disk-Space-Check | /diskchk.sh | UTF-8 | 820 | 4.09375 | 4 | [] | no_license | #!/bin/sh
#Author: Vinod.N K
#Distro : Linux -Centos, Rhel, and any fedora
# Shell script to monitor or watch the disk space
# It will send an email to $ADMIN, if the (free avilable) percentage
# of space is >= 90%
#Check whether root user is running the script
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
ADMIN="admin@domain.com"
# set alert level 90% is default
ALERT=60
df -H | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{ print $5 " " $1 }' | while read output;
do
#echo $output
usep=$(echo $output | awk '{ print $1}' | cut -d'%' -f1 )
partition=$(echo $output | awk '{ print $2 }' )
if [ $usep -ge $ALERT ]; then
echo "Running out of space \"$partition ($usep%)\" on $(hostname) as on $(date)" |
mail -s "Alert: Almost out of disk space $usep" $ADMIN
fi
done
| true |
ccca8191c2090b58800bea899365cac4ebeb8a9e | Shell | robertwol/uaa-ID-saml-SP | /create-update-saml-idp.sh | UTF-8 | 3,550 | 3.515625 | 4 | [] | no_license | #!/bin/bash -e
# set -v
# set -x
while getopts ":cun:m:sih" opt; do
case $opt in
c)
create="true"
;;
u)
update="true"
;;
n)
origin_name=$OPTARG
;;
m)
saml_metadata_file=$OPTARG
;;
s)
skip_tidy="true"
;;
i)
skip_ssl="true"
;;
h )
echo "Usage:"
echo -e " -h Display this help message.\n"
echo " -c create."
echo " -u update."
echo " -n SAML SP name ."
echo " -m SAML metadata from SP."
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
*)
if [ "$OPTERR" != 1 ] || [ "${OPTSPEC:0:1}" = ":" ]; then
echo "Non-option argument: '-${OPTARG}'" >&2
fi
;;
esac
done
if [[ -z "$origin_name" ]]; then
echo "You must specify the origin name with option -n."
exit 1
fi
set +e
echo $origin_name | grep ^.*[\]\^\:\ \?\/\@\#\[\{\}\!\$\&\'\(\)\*\+\,\;\=\~\`\%\|\<\>\"].*$
#A status code of 0 means that there was a special character in the origin name.
if [ $? == 0 ]; then
echo "Origin name $origin_name contains special characters. Remove the special characters and retry."
exit 1
fi
set -e
if [[ -z "$saml_metadata_file" ]]; then
echo "You must specify the idp config file with option -m."
exit 1
fi
config_left='{"metaDataLocation" : "'
# right='","emailDomain":'"$config_email_domain_file"',"idpEntityAlias":"'"$origin_name"'","nameID":"'"$nameid_format"'","assertionConsumerIndex":0,"metadataTrustCheck":false,"showSamlLink":true,"socketFactoryClassName":"org.apache.commons.httpclient.protocol.DefaultProtocolSocketFactory","linkText":"'"$link_text"'","iconUrl":null,"groupMappingMode":"'"$group_mapping_mode"'","addShadowUserOnLogin":"'"$add_shadow_user_on_login"'","externalGroupsWhitelist":'"$groups_list"',"attributeMappings":'"$config_mapping"'}'
config_right='","metadataTrustCheck" : true}'
esc_left=$(echo ${config_left} | sed 's/"/\\"/g')
esc_right=$(echo ${config_right} | sed 's/"/\\"/g')
# dos2unix for stupid OSX that doesn't have dos2unix
config_middle=$(<$saml_metadata_file)
# formats the xml
# if [[ -z $skip_tidy ]]; then
# echo "Tidy XML"
# esc_middle_1=$(echo "$esc_middle_0" | tidy -xml -i - | col -b)
# ${LINES[@]}
# else
# echo "DO NOT Tidy XML"
# esc_middle_1=$esc_middle_0
# fi
# Replaces all \ with \\\\
esc_middle_1=$(echo "$config_middle" | sed 's/\\/\\\\\\\\/g')
# Replaces all quotes with \\\"
esc_middle_2=$(echo "$esc_middle_1" | sed 's/"/\\\\\\"/g')
# Replaces all newlines with \\n
# esc_middle_3=$(echo "$esc_middle_2" | awk '$1=$1' ORS='\\\\n')
#remove \n at the end of each line
esc_middle_3=$(echo "$esc_middle_2" | tr -d '\n' | tr -d '\r')
esc_middle=$esc_middle_3
config="$esc_left$esc_middle$esc_right"
# data='{"originKey":"'"$origin_name"'","name":"'"$origin_name"'","type":"saml","config":"'"$config"'","active":true}'
data='{"name":"'"$origin_name"'","active":true,"config":"'"$config"'"}'
# echo "$data"
echo "Create: $create, Update: $update"
if [[ $create ]]; then
echo -e "\n################\nCREATING NEW\n"
uaac curl /saml/service-providers -t -X POST -H "Content-Type:application/json;charset=UTF-8" -d "$data"
elif [[ $update ]]; then
echo -e "\n################\nUPDATING\n"
uaac curl /saml/service-providers/da19d61a-46a6-4950-b3b0-648868a6303f -t -X PUT -H "Content-Type:application/json;charset=UTF-8" -d "$data"
fi
| true |
8c0e48c5b6836755bc2b632628f677cb90730a1a | Shell | vahmed/Bash | /storageRpt.sh | UTF-8 | 1,212 | 3.625 | 4 | [] | no_license | #!/bin/bash
# Name: storageRpt.sh Jul-7-2014
#
# This script runs each week and generates storage reports
# for xxxxxx db and emails.
# Updated: 02/04/2010 - Added gzip when archiving reports
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbhome_1
BASE_DIR=/home/oracle
DB=xxxxxx
USER=system
PASS=yyyyyyyy
DATE=`date +%m-%d-%Y`
SQL_1=$BASE_DIR/storage_rpt.sql
cd $BASE_DIR/data
if [ -f storage_rpt.txt ]
then
mv storage_rpt.txt storage_rpt.$DATE.txt
gzip -f storage_rpt.$DATE.txt
fi
$ORACLE_HOME/bin/sqlplus -S $USER/`echo ${PASS}|openssl enc -base64 -d`@$DB @$SQL_1
if [ ! -z storage_rpt.txt ]
then
RPT_DATE=`date +'%B, %Y'`
# Manual override. Leave the line below commented unless you are running this script manually.
#RPT_DATE="Mar, 2011"
/bin/cat $BASE_DIR/email.txt | sed "s/#DATE#/$RPT_DATE/" > /tmp/email.$$.txt
cat $BASE_DIR/storage_rpt.txt | mutt -F $BASE_DIR/muttrc -s "XXXXX - Weekly Storage Report" -a $BASE_DIR/storage_rpt.txt myemail@sample.com < /tmp/email.$$.txt
else
echo "An error occured while running the weekly storage report for XXXXX." | mutt -F $BASE_DIR/scripts/muttrc -s "GTTOTP - Weekly Storage Report ERROR" myemail@sample.com
fi
rm -f /tmp/email.$$.txt
| true |
109eef0caa3e5efe069c15caec8f637c8715d3b2 | Shell | lisy09/spark-dev-box | /vendor/apache-livy-docker/build_scripts/build_livy.sh | UTF-8 | 544 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
PARENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
ROOT_DIR="$( cd $PARENT_DIR/.. >/dev/null 2>&1 && pwd )"
set -x
docker build \
--build-arg PREBUILT_HADOOP=${PREBUILT_HADOOP} \
--build-arg SPARK_SRC_URL=${SPARK_SRC_URL} \
--build-arg SPARK_ASC_URL=${SPARK_ASC_URL} \
--build-arg PREBUILT_LIVY=${PREBUILT_LIVY} \
--build-arg LIVY_SRC_URL=${LIVY_SRC_URL} \
--build-arg LIVY_ASC_URL=${LIVY_ASC_URL} \
-t ${DOCKER_REPO}livy:${DOCKER_TAG} \
${ROOT_DIR}/livy_docker
set +x | true |
89797239547d7b21e9435498d574960b1be93568 | Shell | halcyon/asdf-java | /set-java-home.zsh | UTF-8 | 298 | 3.453125 | 3 | [
"MIT"
] | permissive | asdf_update_java_home() {
local java_path
java_path="$(asdf which java)"
if [[ -n "${java_path}" ]]; then
export JAVA_HOME
JAVA_HOME="$(dirname "$(dirname "${java_path:A}")")"
export JDK_HOME=${JAVA_HOME}
fi
}
autoload -U add-zsh-hook
add-zsh-hook precmd asdf_update_java_home
| true |
93a3d6e595f6087dec897b8eb63bde1168b89561 | Shell | pratikshaghoderao/shell-script | /selectionLoops/unitConversion.sh | UTF-8 | 390 | 2.859375 | 3 | [] | no_license | #!/bin/bash -x
feet=`echo | awk '{printf "%0.02f\n",(42/12);}'`
echo "42 meters in feets: $feet ft";
area=`echo | awk '{printf "%0.02f\n",((60/3.28)*(40/3.28));}'`
echo "Area of plot is:: $area meters";
plots=`echo | awk '{printf "%0.02f\n",(25*2200);}'`
echo "25 plots: $plots feet";
total=`echo | awk '{printf "%0.02f\n",(25000/4047);}'`
echo "Total 25 plots in acers: $total acers";
| true |
3b96608e08c7bef93db5aeaadac62fe376bd0317 | Shell | vmware-automation/solutions | /Oracle-WebLogic-Cluster-Service/managed/Managed_Enroll_Service.sh | UTF-8 | 2,442 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# SCRIPT INTERNAL PARAMETERS -- START
BEA_HOME="$webLogic_home"
WLS_INSTALL_DIR="$BEA_HOME/WebLogic"
DOMAIN="$domain_name"
# SCRIPT INTERNAL PARAMETERS -- END
# FUNTION TO CHECK ERROR
PROGNAME=`basename $0`
function Check_error()
{
if [ ! "$?" = "0" ]; then
Error_exit "$1";
fi
}
# FUNCTION TO DISPLAY ERROR AND EXIT
function Error_exit()
{
echo "${PROGNAME}: ${1:-"UNKNOWN ERROR"}" 1>&2
exit 1
}
# UNPACKING -- START
echo `hostname`
echo "UNPACKING THE $DOMAIN.jar(ADMIN SERVER DOMAIN) ON THE MANAGED SERVER..."
$WLS_INSTALL_DIR/common/bin/unpack.sh -domain=$WLS_INSTALL_DIR/samples/domains/$DOMAIN -template=$WLS_INSTALL_DIR/common/templates/domains/$DOMAIN.jar
Check_error "ERROR:WHILE UNPACKING THE DOMAIN"
echo "DOMAIN UNPACKED SUCCESSFULLY"
# UNPACKING -- END
# ENROLLMENT OF MANAGED SERVER -- START
echo "ENROLLING MANAGED SERVER WITH THE ADMIN SERVER..."
$WLS_INSTALL_DIR/common/bin/wlst.sh $WLS_INSTALL_DIR/samples/server/examples/src/examples/wlst/online/enrollnodemanager.py
Check_error "ERROR:WHILE ENROLLING THE AMAGED SERVER WITH THE ADMIN SERVER"
echo "MANAEGD SERVER ENROLLED SUCCESSFULLY"
# ENROLLMENT OF MANAGED SERVER -- END
# START OF THE NODEMANAGER ON THE MANAGED SERVER -- START
cat << EOF > $WLS_INSTALL_DIR/common/nodemanager/nodemanager.properties
DomainsFile=/disk2/BEA/WebLogic/common/nodemanager/nodemanager.domains
LogLimit=0
PropertiesVersion=10.3
DomainsDirRemoteSharingEnabled=false
javaHome=/disk2/BEA/jdk160_29
AuthenticationEnabled=true
NodeManagerHome=/disk2/BEA/WebLogic/common/nodemanager
JavaHome=/disk2/BEA/jdk160_29/jre
LogLevel=INFO
DomainsFileEnabled=true
StartScriptName=startWebLogic.sh
ListenAddress=
NativeVersionEnabled=true
ListenPort=5556
LogToStderr=true
SecureListener=false
LogCount=1
DomainRegistrationEnabled=false
StopScriptEnabled=false
QuitEnabled=false
LogAppend=true
StateCheckInterval=500
CrashRecoveryEnabled=false
StartScriptEnabled=true
LogFile=/disk2/BEA/WebLogic/common/nodemanager/nodemanager.log
LogFormatter=weblogic.nodemanager.server.LogFormatter
ListenBacklog=50
EOF
echo "STARTING THE NODEMANAGER ON THE MANAGED SERVER..."
cd $WLS_INSTALL_DIR/server/bin
./startNodeManager.sh & sleep 500
Check_error "ERROR:WHILE STARTING THE NODEMANAGER ON THE MANAGED SERVER"
echo "NODEMANAGER STARTED SUCCESSFULLY"
# START OF THE NODEMANAGER ON THE MANAGED SERVER -- END | true |
e0a404d3ae002e2c0d03b53ed8b9ad198473af44 | Shell | mamimu/shellscript | /twittshell.sh | UTF-8 | 682 | 3.4375 | 3 | [] | no_license | #!/bin/bash
ID=hogehoge
PASS=hogehoge
main(){
echo "1:TimeLine 2:Post q:Exit"
read opera
case $opera in
1)
timeline
;;
2)
post
;;
q)
quit
;;
*)
main
;;
esac
}
timeline(){
clear
curl -s -O --basic --user "$ID:$PASS" "http://twitter.com/statuses/friends_timeline.rss"
nkf -w --numchar-input friends_timeline.rss > ftl.rss
cat ftl.rss |grep "title" | sed "s/<[^>]*>//g"
rm friends_timeline.rss
rm ftl.rss
main
}
post(){
echo "input your messeages: "
read mess
curl -s --basic --user "$ID:$PASS" --data-ascii "status=$mess" "http://twitter.com/statuses/update.json"
clear
timeline
}
quit(){
exit 0
}
while true;
do
main
done
| true |
ef290c201fee7ec4b9c6601a0fbad0a92411feb4 | Shell | metambou/project2 | /ubuntu-conf.sh | UTF-8 | 56 | 2.515625 | 3 | [] | no_license | #!/bin/bash
if
[ $? -eq 0 ] ; then
echo "file exist"
| true |
230372abcdfb6c065110de10d7db2a2e954a5073 | Shell | Anon-Exploiter/ThemFatScripts | /typeJugglingDockerStart.sh | UTF-8 | 352 | 2.671875 | 3 | [] | no_license | #!/bin/bash
# Running Exercise: sudo docker run -it --rm uexpl0it/type-juggling:latest
# From github repository: https://github.com/TROUBLE-1/Type-juggling
sed -i 's/None/All/g' /etc/apache2/apache2.conf
service apache2 start > /dev/null 2>&1 && \
echo "[#] Challenge can be accessed at: http://$(hostname -I)" && \
tail -f /dev/null
| true |
d8037b6704d5a157d6fc0d59817d9621a06acd56 | Shell | aiden-chanyoung/git_bash | /ex11_2.sh | UTF-8 | 164 | 2.828125 | 3 | [] | no_license | #!/bin/bash
print_something () {
echo Hello $1
return 5
}
print_something Mars
print_something Jupiter
echo The previous function has a return value of $?
| true |
a48db0cebbe757002216ef8d87e1c26546bbfa17 | Shell | frankies/concourse-java-scripts | /test/run_maven.bats | UTF-8 | 585 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | #!./test/libs/bats/bin/bats
load 'libs/bats-support/load'
load 'libs/bats-assert/load'
source "$PWD/concourse-java.sh"
@test "run_maven() should show limited output" {
cd test/mock/mvnw/success
run run_maven clean install
local lineCount=$( echo "$output" | wc -l)
assert [ "${lines[0]}" = "./mvnw clean install" ]
assert_equal $lineCount 13
}
@test "run_maven() when fails should show tail" {
cd test/mock/mvnw/failure
run run_maven clean install
local lineCount=$( echo "$output" | wc -l)
assert [ "${lines[0]}" = "./mvnw clean install" ]
assert_equal $lineCount 789
}
| true |
b1a75549d5c5d7cafef9ec5abad63f80287e909f | Shell | vmware-automation/solutions | /vfabric-rabbitmq/rabbitmq-install.sh | UTF-8 | 3,712 | 3.640625 | 4 | [] | no_license | #!/bin/sh
# vFabric ApplicationDirector Sample START script for vFabric 5.1 RabbitMQ Server
# This example uses the values posted below as defaults. To change any of these
# values, add the Property Name as shown below as individual properties in your
# service definition in the ApplicationDirector Catalog. The value specified after
# the Property name is the Type to use for the property (i.e. String, Content, Array etc)
# There are two types of properties for this script: Required and Optional. Both are
# listed below.
#
# REQUIRED PROPERTIES:
# These are the properties you must add in order for this sample script to work. The property
# is added when you create your service definition in the ApplicationDirector Catalog.
# Property Description: Property Value settable in blueprint [type]:
# --------------------------------------------------------------------------------------------
# Location of global configuration data global_conf [Content]
# value: https://${darwin.server.ip}:8443/darwin/conf/darwin_global.conf
#
# OPTIONAL PROPERTIES:
# Property Description: Property Name settable in blueprint:
# --------------------------------------------------------------------------------------------
# From ApplicationDirector - Import and source global configuration
. $global_conf
export PATH=$PATH:/usr/sbin:/sbin:/usr/bin:/bin
export HOME=/root
export RMQ_HOME="/opt/vmware"
export RMQ_PACKAGE=${RMQ_PACKAGE:="vfabric-rabbitmq-server"}
export EULA_LOCATION=${EULA_LOCATION:="http://www.vmware.com/download/eula/vfabric_app-platform_eula.html"}
# pre-set the license agreement for rpm
if [ ! -d "/etc/vmware/vfabric" ]; then
mkdir -p /etc/vmware/vfabric
fi
echo "setting up vfabric repo"
echo "I_ACCEPT_EULA_LOCATED_AT=${EULA_LOCATION}" >> /etc/vmware/vfabric/accept-vfabric-eula.txt
echo "I_ACCEPT_EULA_LOCATED_AT=${EULA_LOCATION}" >> /etc/vmware/vfabric/accept-vfabric5.1-eula.txt
if [ -f /etc/redhat-release ] ; then
DistroBasedOn='RedHat'
DIST=`cat /etc/redhat-release |sed s/\ release.*//`
REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*// | awk -F. '{ print $1 }'`
else
echo "Installation only supported on RedHat and CentOS; exiting installation script"
exit
fi
# Install erlang and vFabric RPM repo
if [ -f /bin/rpm ]; then
if [ "$REV" == "5" ]; then
wget http://download.fedoraproject.org/pub/epel/5/i386/epel-release-5-4.noarch.rpm
rpm -Uvh epel-release-5-4.noarch.rpm
wget -O /etc/yum.repos.d/epel-erlang.repo http://repos.fedorapeople.org/repos/peter/erlang/epel-erlang.repo
rpm -Uvh --force http://repo.vmware.com/pub/rhel5/vfabric/5.1/vfabric-5.1-repo-5.1-1.noarch.rpm
elif [ "$REV" == "6" ]; then
wget http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-7.noarch.rpm
rpm -Uvh epel-release-6-7.noarch.rpm
rpm -Uvh --force http://repo.vmware.com/pub/rhel6/vfabric/5.1/vfabric-5.1-repo-5.1-1.noarch.rpm
else
echo "Unsupported version: ${REV}; exiting installation"
exit
fi
else
echo "RPM utility not available; exiting installation script"
exit
fi
if [ "$DistroBasedOn" == "RedHat" ]; then
if [ "$DIST" == "CentOS" ]; then
if [ -x /usr/sbin/selinuxenabled ] && /usr/sbin/selinuxenabled; then
echo 'SELinux is enabled. This may cause installation to fail.'
fi
fi
if [ -f /usr/bin/yum ]; then
yum -y install erlang --skip-broken
yum -y -v install ${RMQ_PACKAGE}
else
echo "ERROR! Unable to locate yum in ${PATH}; exiting installer"
exit
fi
fi
| true |
4b9cfc26c4e53f1ec05b9ee86df3f92f1ad1c397 | Shell | Neato-Nick/fastq-processing | /counts_simple.sh | UTF-8 | 635 | 3.75 | 4 | [] | no_license | #!/bin/bash
#$ -cwd
#$ -S /bin/bash
#$ -N counting_seqs
#$ -e countsErr
#$ -o countsOut
#$ -q !nem
#$ -V
# Count number of reads in all FASTQ files in some directory
# Get working directory as first command line argument
if [[ "$1" != "" ]]
then
wd=$1
fi
if [[ "$1" == "" ]]
then
echo "no directory supplied, using current dir"
wd=$(pwd)
fi
echo "Searching inside $wd"
# for files demultiplexed without compression
#grep -c "@" $wd/*.fastq > $wd/counts.txt
#grep -c "@" $wd/*.fq >> $wd/counts.txt
# for files demultiplexed that are gzipped
zgrep -c "@" $wd/*.fq.gz > $wd/counts.txt
#zgrep -c "@" $wd/*.fastq.gz >> $wd/counts.txt
| true |
fe5605290d0819153e14c2908d38e063774fc887 | Shell | ADALabUCSD/SLAB | /config/install-gpdb.sh | UTF-8 | 449 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
cd ~/gpdb
mkdir -p gpconfigs
source /usr/local/gpdb/greenplum_path.sh
cp $GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config \
~/gpdb/gpconfigs/gpinitsystem_config
cd gpconfigs
sudo ldconfig
export me=`hostname`
sed -i s/MASTER_HOSTNAME=mdw/MASTER_HOSTNAME=${me}/g gpinitsystem_config
echo "${me}" >> hostfile_gpinitsystem
cd ../
yes | gpinitsystem -c gpconfigs/gpinitsystem_config -h \
gpconfigs/hostfile_gpinitsystem
| true |
44ea2f703ae32dc6c047029e4881a57934c09963 | Shell | asafo/kuventory | /inventory.sh | UTF-8 | 1,761 | 4.0625 | 4 | [] | no_license | #!/bin/bash
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
INVENTORY=$SCRIPTPATH/../environments/dev/hosts.moe
NAMESPACE=mongo
LOGFILE=$SCRIPTPATH/log
[ "$1" != "" ] && usage
while [ "$1" != "" ]; do
case $1 in
-l | --list)
list
exit 0
;;
-c | --configure)
configure
exit 0
;;
* ) usage
esac
shift
done
function usage
{
cat <<EOF Usage: $0 options
Options:
-l | --list - list inventory
-h | --host <hostname> - output host variables
-c | --configure - create env file containing inventory configuration
EOF
exit 1
}
function list
{
}
function configure
{
}
function generate_machine
{
NAMESPACE=$NAMESPACE envsubst < $SCRIPTPATH/vmi-preset.yaml | kubectl apply -f - 2>&1 >> $LOGFILE
NAME=$1 NAMESPACE=$NAMESPACE envsubst < $SCRIPTPATH/vm.yaml | kubectl apply -f - 2>&1 >> $LOGFILE
}
function machine_ip
{
kubectl get vms -n $NAMESPACE | grep $1 2>&1>$LOGFILE || generate_machine $1
IP=`kubectl get pods -o wide -n $NAMESPACE | grep $1 | awk '{ print $6 }'`
while ! echo $IP | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b" 2>&1 >> $LOGFILE; do
sleep 0.5s
IP=`kubectl get pods -o wide -n $NAMESPACE | grep $1 | awk '{ print $6 }'`
done
echo $IP
}
echo "START" > $LOGFILE
hostsSection=false
while read line; do
echo ">> $line" >> $LOGFILE
if echo $line | grep "\[.*\]" > /dev/null; then
if echo $line | grep "\[.*\:.*\]" > /dev/null; then
hostsSection=false
else
hostsSection=true
fi
else
if [ "$hostsSection" = true ] && [[ ! -z "${line// }" ]]; then
line="$line ansible_ssh_host="`machine_ip $line`
fi
fi
echo "$line" | tee -a $LOGFILE
done < $INVENTORY
| true |
cc2e8204f7f4359274ca491e6342169f6fdad605 | Shell | cclaude42/push_swap | /swapper_pusher.sh | UTF-8 | 950 | 3.640625 | 4 | [] | no_license | #!/bin/bash
b=$(tput setaf 5)$(tput bold)
r=$(tput sgr0)
make re
if [[ "$OSTYPE" == "darwin"* ]]
then
os=_macos
else
os=_linux
fi
if [[ $1 == "-mine" ]]
then
make bonus
os=""
fi
for testsize in $(ls tests)
do
echo "=== Running tests of size ${b}$(echo $testsize | tr -d -c 0-9)${r} ==="
echo
IFS=$'\n'
i=0
total=0
max=0
for test in $(cat tests/$testsize)
do
IFS=$' '
if ./push_swap ${test[@]} | checkers/checker$os ${test[@]} | grep -q "OK"
then
n=$(./push_swap ${test[@]} | wc -l)
if ((n > max))
then
max=$n
fi
total=$((total+n))
i=$((i+1))
echo "✅ Test passed in ${b}$n${r} moves !"
# exec 4>&2
# t=$( { time -p ./push_swap ${test[@]} 1>/dev/null 2>&4; } 2>&1 | tr '\n' ' ')
# exec 4>&-
# echo " Time : $t"
else
echo "❌ Test failed..."
fi
done
echo
echo " Average ${b}$((total/i))${r} (max ${b}$max${r})"
echo "================================="
done
make fclean
| true |
669b408bd26f299d542805ccbc37224b8a484765 | Shell | franksipoli/integrador-magento | /integrador/bash/integrador.sh | UTF-8 | 663 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
time=3000 # segundos
server_local="2"
if [ $server_local = "1" ]; then
dir_file="/var/www/html/projeto/frank/editorapositivo-ecommerce/Source/trunk/integrador/bash"
else
dir_file="/var/www/html/projeto/frank/editorapositivo-ecommerce/Source/trunk/integrador/bash"
fi
cd $dir_file
count=0
microtime=0
microtimel=0
#while [ $count -eq 0 ]; do
microtime=$(($(date +%s%N)/1000000))
calc=`expr $microtime - $microtimel`
# if [ $calc -gt $time ]; then
#echo "ok $calc $time"
microtimel="$microtime"
$dir_file/run.php "massa"
# fi
#done
| true |
4fa047cdf73415b7a5c06baa1e2979fdc5d4ba5e | Shell | mkouba/arc-benchmarks | /run-benchmarks.sh | UTF-8 | 803 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
VERSIONS="2.16.1.Final 999-SNAPSHOT"
# Set max to use Runtime.getRuntime().availableProcessors()
THREADS="1"
# Benchmarks to run
if [ "$1" ]; then
BENCHMARKS=$1
else
BENCHMARKS="InterceptorBenchmark|SingleDecoratorBenchmark|ReflectionsBenchmark|SubclassInstantiationBenchmark|ApplicationScopedProxyInvocationBenchmark|RequestScopedProxyInvocationBenchmark|ContextProviderBenchmark|RequestContextActivationBenchmark"
fi
echo "Versions: $VERSIONS"
echo "Benchmarks to run: $BENCHMARKS"
mvn clean
VERSIONS_ARRAY=$(echo $VERSIONS)
for i in $VERSIONS_ARRAY
do
mvn package -Dquarkus.version=$i
java -jar target/benchmarks.jar -t $THREADS -rf json -rff target/results-$i.json $BENCHMARKS
java -cp target/benchmarks.jar io.quarkus.arc.benchmarks.chart.ChartGenerator target
done;
| true |
6c01d07e52d509f3f80e03bfb5b1b2c8268cf4e5 | Shell | genus-machina/phage | /build.sh | UTF-8 | 555 | 3.953125 | 4 | [] | no_license | #!/bin/bash -e
COMMAND="${1}"
function fail() {
exit 1
}
function build() {
go build
}
function test() {
go fmt ./...
go test ./...
}
function unknown() {
echo "Unknown command '${COMMAND}'." >&2
}
function update() {
GOPROXY=direct go get -u ./...
go mod tidy
}
function usage() {
echo "$0: [command]"
echo
echo "Commands:"
echo -e "\tbuild"
echo -e "\ttest"
echo -e "\tupdate"
echo
}
case "${COMMAND}" in
"")
test
;;
build)
build
;;
test)
test
;;
update)
update
;;
*)
unknown
echo
usage
fail
;;
esac
| true |
ff99c71475b4c535a592f04915792a237e1e3f05 | Shell | kaoxkrul/bash | /bkupwrt | UTF-8 | 3,122 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# Backup Script for OpenWrt routers
# by Rob Krul
# Routers that will be backed up with "all" param
ROUTERS="wrt1 wrt2 wrt3 wrt4"
# Set Backup Directory
BACKUPDIR="/home/username/wrt-backup"
if [ $# -ne 1 ]; then
echo "Syntax: bkupwrt <routername|all>"
exit
fi
if [ ! -d ${BACKUPDIR} ]; then
echo "ERROR: ${BACKUPDIR} does not exist!"
exit
fi
if [ ! -w ${BACKUPDIR} ]; then
echo "ERROR: ${BACKUPDIR} not writable!"
exit
fi
cd ${BACKUPDIR}
if [ "$1" != "all" ]; then
ROUTERS=$1
fi
for router in $ROUTERS
do
echo "*** Backing up ${router} ***"
if [ -f ${router}-bkup.tgz ]; then
mv -f ${router}-bkup.tgz ${router}-bkup-`date +'%Y%m%d%H%M%S'`.tgz
fi
if [ -f ${router}-cpuinfo.txt ]; then
mv -f ${router}-cpuinfo.txt ${router}-cpuinfo-`date +'%Y%m%d%H%M%S'`.txt
fi
if [ -f ${router}-df.txt ]; then
mv -f ${router}-df.txt ${router}-df-`date +'%Y%m%d%H%M%S'`.txt
fi
if [ -f ${router}-ifconfig.txt ]; then
mv -f ${router}-ifconfig.txt ${router}-ifconfig-`date +'%Y%m%d%H%M%S'`.txt
fi
if [ -f ${router}-linux.trx ]; then
mv -f ${router}-linux.trx ${router}-linux-`date +'%Y%m%d%H%M%S'`.trx
fi
if [ -f ${router}-meminfo.txt ]; then
mv -f ${router}-meminfo.txt ${router}-meminfo-`date +'%Y%m%d%H%M%S'`.txt
fi
if [ -f ${router}-nvram.bin ]; then
mv -f ${router}-nvram.bin ${router}-nvram-`date +'%Y%m%d%H%M%S'`.bin
fi
if [ -f ${router}-ps.txt ]; then
mv -f ${router}-ps.txt ${router}-ps-`date +'%Y%m%d%H%M%S'`.txt
fi
if [ -f ${router}-tar.tgz ]; then
mv -f ${router}-tar.tgz ${router}-tar-`date +'%Y%m%d%H%M%S'`.tgz
fi
ssh -x root@${router} "cat /proc/cpuinfo" > ${router}-cpuinfo.txt
ssh -x root@${router} "df" > ${router}-df.txt
ssh -x root@${router} "/sbin/ifconfig" > ${router}-ifconfig.txt
ssh -x root@${router} "mount -o remount,ro /dev/mtdblock/4 /jffs;dd if=/dev/mtdblock/1;mount -o remount,rw /dev/mtdblock/4 /jffs" > ${router}-linux.trx
ssh -x root@${router} "cat /proc/meminfo" > ${router}-meminfo.txt
ssh -x root@${router} "cat /proc/mtd" > ${router}-mtd.txt
ssh -x root@${router} "dd if=/dev/mtdblock/3" > ${router}-nvram.bin
ssh -x root@${router} "if [ -x /bin/opkg ]; then opkg list_installed; else ipkg list_installed; fi" > ${router}-pkgs.txt
ssh -x root@${router} "ps w" > ${router}-ps.txt
ssh -x root@${router} "cd /tmp;mkdir mnt;mount /dev/mtdblock/4 mnt;cd mnt;tar -czf - *;cd ..;umount mnt;rmdir mnt" > ${router}-tar.tgz
tar -cvzf ${router}-bkup.tgz \
$router-cpuinfo.txt \
$router-df.txt \
$router-ifconfig.txt \
$router-linux.trx \
$router-meminfo.txt \
$router-mtd.txt \
$router-nvram.bin \
$router-pkgs.txt \
$router-ps.txt \
$router-tar.tgz
rm -f \
$router-cpuinfo.txt \
$router-df.txt \
$router-ifconfig.txt \
$router-linux.trx \
$router-meminfo.txt \
$router-mtd.txt \
$router-nvram.bin \
$router-pkgs.txt \
$router-ps.txt \
$router-tar.tgz
if [ -f ${router}-bkup.tgz ]; then
mv -f ${router}-bkup.tgz ${router}-bkup-`date +'%Y%m%d%H%M%S'`.tgz
fi
done
| true |
4eab216d41cffd08b27d1e94132963560bfb027b | Shell | BenjaminSchiller/SparkWrapper | /execution/spark.sh | UTF-8 | 3,983 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [[ "$#" != "6" ]]; then
echo 'expecting 6 arguments:' >&2
echo ' flink.sh $dataset $states $metric $metricArguments $workers $run' >&2
exit
fi
function printTime {
if [[ -d /Users/benni ]]; then
gdate +%s%N
else
date +%s%N
fi
}
source config.cfg
dataset=$1
states=$2
metric=$3
metricArguments=$4
workers=$5
run=$6
# 1: type (cc, dd, sssp, tc)
case $metric in
"cc")
metricId=$ccId
;;
"dd")
metricId=$ddId
;;
"sssp")
metricId=$ssspId
;;
"tc")
metricId=$tcId
;;
*)
echo "invalid metric key: $metric" >&2
exit
;;
esac
datasetDir="${mainDatasetDir}/${dataset}"
runtimesDir="${mainRuntimesDir}/${dataset}/$states/$metric/$workers"
logDir="${mainLogDir}/${dataset}/$states/$metric/$workers"
outputDir="${mainOutputDir}/${dataset}/$states/$metric/$workers"
if [[ ! -d $runtimesDir ]]; then mkdir -p $runtimesDir; fi
if [[ ! -d $logDir ]]; then mkdir -p $logDir; fi
if [[ ! -d outputDir ]]; then mkdir -p $outputDir; fi
runtimes="${runtimesDir}/${run}${runtimesSuffix}"
if [[ -f $runtimes ]]; then echo "$runtimes exists"; exit; fi
./start-master.sh
for s in $(seq 0 $((states-1))); do
datasetPathV="${datasetDir}/${s}${datasetVSuffix}"
datasetPathE="${datasetDir}/${s}${datasetESuffix}"
if [[ ! -f $datasetPathV ]]; then echo "$datasetPathV does not exist" >&2; exit; fi
if [[ ! -f $datasetPathE ]]; then echo "$datasetPathE does not exist" >&2; exit; fi
total_start=$(printTime)
if [[ $metric == "sssp" ]]; then
for vertexId in $(echo $metricArguments | tr "," " "); do
log="${logDir}/${run}-${s}--${vertexId}${logSuffix}"
err="${logDir}/${run}-${s}--${vertexId}${errSuffix}"
output="${outputDir}/${run}-${s}--${vertexId}${outputSuffix}"
# spark-shell -i exec_job.scala --conf spark.driver.extraJavaOptions="-D${datasetPathV},${datasetPathE},${output},${metricId},${vertexId}" --master local[${workers}] --jars $jarPath > >(tee $log) 2> >(tee $err >&2)
# ./submit_spark_job.sh ${datasetPathV} ${datasetPathE} ${output} ${metricId} ${vertexId} ${workers} > >(tee $log) 2> >(tee $err >&2)
$sparkSubmitPath --conf spark.driver.extraJavaOptions="-D${datasetPathV},${datasetPathE},${output},${metricId},${vertexId}" --master local[${workers}] --jars $sparkJarPath --class DGARunner $scalaRunnerPath > >(tee $log) 2> >(tee $err >&2)
done
else
log="${logDir}/${run}-${s}${logSuffix}"
err="${logDir}/${run}-${s}${errSuffix}"
output="${outputDir}/${run}-${s}${outputSuffix}"
# spark-shell -i exec_job.scala --conf spark.driver.extraJavaOptions="-D${datasetPathV},${datasetPathE},${output},${metricId},${metricArguments}" --master local[${workers}] --jars $jarPath > >(tee $log) 2> >(tee $err >&2)
# ./submit_spark_job.sh ${datasetPathV} ${datasetPathE} ${output} ${metricId} ${metricArguments} ${workers} > >(tee $log) 2> >(tee $err >&2)
$sparkSubmitPath --conf spark.driver.extraJavaOptions="-D${datasetPathV},${datasetPathE},${output},${metricId},${metricArguments}" --master local[${workers}] --jars $sparkJarPath --class DGARunner $scalaRunnerPath > >(tee $log) 2> >(tee $err >&2)
fi
total_end=$(printTime)
duration=$((${total_end} - ${total_start}))
ioTime=$(grep "Elapsed time:" $log | head -n1 | sed 's/Elapsed time: //g' | sed 's/ns//g')
executionTime=$(grep "Elapsed time:" $log | tail -n1 | sed 's/Elapsed time: //g' | sed 's/ns//g')
measuredTime=$(($ioTime+$executionTime))
echo "$s $duration $measuredTime $ioTime $executionTime" >> $runtimes
echo "$s $duration $measuredTime $ioTime $executionTime"
done
sumA=$(awk '{ sum += $2; } END { print sum; }' "$runtimes")
sumB=$(awk '{ sum += $3; } END { print sum; }' "$runtimes")
sumC=$(awk '{ sum += $4; } END { print sum; }' "$runtimes")
sumD=$(awk '{ sum += $5; } END { print sum; }' "$runtimes")
echo "TOTAL $sumA $sumB $sumC $sumD" >> $runtimes
echo "TOTAL $sumA $sumB $sumC $sumD"
./stop-master.sh
# Elapsed time: 2337817667ns => IO
# Running connected components...
# Elapsed time: 30468120859ns => EXECUTION
| true |
eeb8a22d798abbe93f988aebc3e33feaba68858f | Shell | armarquez/dotfiles | /zsh/zsh/.zshrc.d/functions.local | UTF-8 | 3,189 | 3.609375 | 4 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | #!/bin/bash
##
# Copyright 2020 Anthony Marquez <@boogeymarquez>
#
# BSD licensed, see LICENSE.txt in this repository
# Check if a command exists
function can_haz() {
which "$@" > /dev/null 2>&1
}
# Clean up dead branches
# From https://gist.git.musta.ch/bruce-sherrod/ad232024768413dad95f2a009b39852c
function delete_dead_branches() {
# Thanks Toland.Hon@airbnb.com for finding this:
# https://github.com/not-an-aardvark/git-delete-squashed
git checkout -q master
git for-each-ref refs/heads/ "--format=%(refname:short)" | while read -r branch; do
mergebase=$(git merge-base master "$branch")
mergepoint=$(git rev-parse "$branch^{tree}")
tempcommit=$(git commit-tree "$mergepoint" -p "$mergebase" -m _)
cherry=$(git cherry master "$tempcommit")
if [[ $cherry == "-"* ]] ; then
if [ "$1" == "-f" ]; then
git branch -D "$branch"
else
echo git branch -D "$branch"
fi
fi
done
git checkout -q -
}
# General Update
function update_brew() {
if [[ "$(uname -s)" == "Darwin" ]]; then
if can_haz brew; then
brew update
brew upgrade
fi
fi
}
# Brew switch is deprecated, so here is an alternative specified here: https://github.com/Homebrew/discussions/discussions/339#discussioncomment-350814
function brew_switch() {
if [[ "$(uname -s)" == "Darwin" ]]; then
if can_haz brew; then
pkg=$1
version=$2
brew unlink "$pkg"
(
pushd "$(brew --prefix)/opt"
rm -f "$pkg"
ln -s "../Cellar/$pkg/$version" "$pkg"
)
brew link "$pkg"
fi
fi
}
# This is not commented out for `work-airbnb` branch
#####################################
########## AIRBNB SPECIFIC ##########
#####################################
# Updating AirBnB Tech Stack
# Need to be on VPN
# function update_airbnb() {
# if can_haz yk; then
# # Prompt for passphrase so we can access git.musta.ch
# yk
# # Update homebrew
# update_brew
# # Update AirBnB specific tooling
# if can_haz airlab; then
# # Update AWS session token for default role
# if can_haz remfa; then
# remfa --aws-role default
# fi
# # Update airlab tools (specifically kube-gen)
# airlab update
# # Kubernetes certificate access using IAM
# if can_haz k; then
# AWS_PROFILE_NOMFA=airbnb-users-kubernetes k certs-iam
# fi
# fi
# fi
# }
# function yaateeh(){
# # Update your AirBnB stack
# update_airbnb
# # Separator
# # https://stackoverflow.com/questions/5947742/how-to-change-the-output-color-of-echo-in-linux
# echo "\n"
# echo "$(tput bold)$(tput setaf 5)$(tput setab 0)## WHAT'S ON YOUR AGENDA FOR TODAY? ##$(tput sgr 0)\n"
# # Output today's agenda
# today
# }
# function gwr () {
# $(git rev-parse --show-toplevel)/gradlew $@
# }
###########
### END ###
########### | true |
c49e0873f2d9cd586319b3b4dd1519e92193ef1f | Shell | munish-b/IA-Hardware-Composer | /tests/hwc-val/tests/hwc/host_scripts/valimport | UTF-8 | 1,891 | 3.953125 | 4 | [] | no_license | #!/bin/bash
#
# Usage: source valimport $1
#
# Extracts archive $1 to directory $1
# Pushes the necessary HWC validation scripts and binaries to the client.
# Adds host_scripts directory to the path on the host.
#
echo "$0 $@"
function pushdir()
{
# This is a sanity check on the directory
if [ ! -d $1 ]; then
echo >&2 "No directory $1 found"
return 1
fi
# This is a adb-version-independent function to push the contents of directory $1 on the host to $2 on the target.
# if you use adb "push a/b c", some versions will push all files in a/b to c, some will push all files in a/b to c/b.
SAVED_DIR=$PWD
cd $1
$ADB shell mkdir -p $2
$ADB push . $2
cd $SAVED_DIR
}
if [ "$ADB" == "" ]
then
export ADB=`which adb`
fi
TARFILE_FULLPATH=`readlink -f $1`
TARFILE="$(basename ${TARFILE_FULLPATH})"
TARFILE_EXT="${TARFILE##*.}"
TARFILE_NAME="${TARFILE_FULLPATH%.$TARFILE_EXT}"
TARFILE_DIR="${TARFILE_NAME%.tar}"
mkdir -p ${TARFILE_DIR}
tar xz -C ${TARFILE_DIR} -f ${TARFILE_FULLPATH}
export HWCVAL_ROOT=$TARFILE_DIR/val_hwc
export PATH=$HWCVAL_ROOT/host_scripts:$PATH
export HWCVAL_TARGET_DIR=/data/validation/hwc
$ADB root>/dev/null
$ADB wait-for-device
# NB $ADB remount does NOT necessarily work
$ADB shell mount -o rw,remount /system
$ADB shell mkdir -p ${HWCVAL_TARGET_DIR}
# We are overwriting coreu executable
$ADB shell setenforce 0
pushdir ${HWCVAL_ROOT}/bin /vendor/bin
$ADB shell setenforce 1
pushdir ${HWCVAL_ROOT}/lib /vendor/lib
pushdir ${HWCVAL_ROOT}/lib64 /vendor/lib64
pushdir ${HWCVAL_ROOT}/client_scripts ${HWCVAL_TARGET_DIR}
pushdir ${HWCVAL_ROOT}/images ${HWCVAL_TARGET_DIR}/images
# Make exes and scripts executable
$ADB shell chmod 777 /vendor/bin/*
$ADB shell chmod 777 ${HWCVAL_TARGET_DIR}/*.sh
# Make sure the logs go somewhere
if [[ -z $CLIENT_LOGS ]]
then
export CLIENT_LOGS=$HOME/client_logs
fi
| true |
a4620ebcfd08aacd03b982ed5079360deeeed072 | Shell | jmptrader/gthomas-bashrc | /projects/git-change-origin.sh | UTF-8 | 310 | 3.40625 | 3 | [] | no_license | #!/bin/sh
PATTERN=${PATTERN:-gthomas\.homelinux\.org}
REPLACEMENT=${REPLACEMENT:-gthomas.eu}
EXT=${EXT:-.$(date '+%Y%m%d%H%M%S')}
find . -name .git -type d | while read gitdn; do
fn="$gitdn/config"
grep -q "$PATTERN" $fn && {
echo "$fn"
sed -i"${EXT}" -e "s/$PATTERN/$REPLACEMENT/g" "$fn"
}
done
| true |
952d989981ef53440628f8d8c54890911e9e1628 | Shell | jirikvita/Semiboosted_ttbar | /scripts/renameNEW.sh | UTF-8 | 140 | 2.921875 | 3 | [] | no_license | #!/bin/bash
echo "Renaming NEW files..."
for i in `ls *_NEW*.root` ; do
j=`echo $i | sed "s|_NEW||g"`
echo $i $j
mv $i $j
done
| true |
7b88b53bed803bdc06d098d29adfdeb42574adde | Shell | bashfunc/bashTool | /findDevByVendorName/uninstall | UTF-8 | 211 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Removing files..."
if [ -d ~/.devByVendor ]; then
$(rm -r ~/.devByVendor)
fi
if [ -f /usr/local/bin/findDevByVendorName ]; then
$(sudo rm /usr/local/bin/findDevByVendorName)
fi
echo "Done." | true |
241393e8d82ac934d4241efb7e8c759325f78a2a | Shell | JPGOMEZP/Linux_Core_Kernel | /otc_lck_gdc_mx_test_suite-lck_suite/export_paths.sh | UTF-8 | 7,297 | 3.234375 | 3 | [] | no_license | #!/bin/bash
###############################################################################
#
# Copyright (C) 2015 Intel - http://www.intel.com/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
#
# This program is distributed "as is" WITHOUT ANY WARRANTY of any
# kind, whether express or implied; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
###############################################################################
############################ CONTRIBUTORS #####################################
# Author: Juan Carlos Alonso <juan.carlos.alonso@intel.com>
#
# Jan, 2016.
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Initial draft.
# - modified script to align it to LCK standard.
# Mar, 2016
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Updated script to remove unncessary and duplicated lines.
# Apr, 2016
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Added paths for caps lock, num lock and scroll lock to export them for
# EC tests.
# - Get platform name in order to set the correct paths for EC tests,
# since there are some differences between platforms.
# May, 2016
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Modified the way to get CAPS, NUM and SCROLL lock keys/leds for EC
# dinamically
# Jun, 2016
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Added USB paths
# - Added SDHCI paths
# - Added PWM paths
# Juan, 2016
# Juan Pablo Gomez <juan.p.gomez@intel.com>
# - Added LPC paths
# Jul, 2016
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Added SPI paths
# Sep. 2016
# Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Added $SPI_SCRIPTS path
# - Deleted 'export LOGS' variable from this script
# - Added '$USB_SCRIPTS' path to export it
# Octi, 2016
# Juan Pablo Gomez <juan.p.gomez@intel.com>
# - Added Thermal paths
#
############################# DESCRIPTION #####################################
# This script exports both paths and variables for a corresponding driver.
############################# FUNCTIONS #######################################
############################ DO THE WORK ######################################
DRIVER=$1
export PATH="$PATH:$PWD"
# EXPORT COMMON PATHS
export PATH="$PATH:$PWD/LCK-Test/testcases/scripts/common"
export PATH="$PATH:$PWD/LCK-Test/testcases/scripts/$DRIVER"
export RUNTEST="$PWD/LCK-Test/runtest/$DRIVER"
export WHAT_DRIVERS="$PWD/what_drivers"
export TEST_SUITE="$PWD/test_suite"
# EXPORT PATHS AND VARIABLES FOR WDT
if [ $DRIVER == "wdt" ]; then
export PATH="$PATH:$PWD/LCK-Test/testcases/wdt_test_suite"
export WDT_SCRIPTS="$PWD/LCK-Test/testcases/scripts/wdt"
export WDAT_DIR="/sys/bus/platform/drivers/wdat_wdt"
export WDT_SUITE="$PWD/LCK-Test/testcases/wdt_test_suite"
export wdat_wdt="wdat_wdt"
export iTCO_wdt="iTCO_wdt"
export i2c_smbus="i2c-smbus"
export i2c_i801="i2c-i801"
# EXPORT PATHS AND VARIABLES FOR SATA
elif [ $DRIVER == "sata" ]; then
export PATH="$PATH:$PWD/LCK-Test/testcases/filesystem_test_suite"
export AHCI_DIR="/sys/bus/pci/drivers/ahci"
export SATA_SCRIPTS="$PWD/LCK-Test/testcases/scripts/sata"
export TEST_DIR="$PWD/LCK-Test/testcases/scripts/sata/test_dir"
export TEST_MNT_DIR_1="$TEST_DIR/mnt_dev_1"
export TEST_MNT_DIR_2="$TEST_DIR/mnt_dev_2"
export TEST_TMP_DIR="$TEST_DIR/tmp"
# EXPORT PATHS AND VARIABLES FOR PCIe
elif [ $DRIVER == "pcie" ]; then
export PCI_DIR="/sys/bus/pci/drivers/pcieport"
export PATH="$PATH:$PWD/LCK-Test/testcases/scripts/sata"
export PATH="$PATH:$PWD/LCK-Test/testcases/filesystem_test_suite"
export AHCI_DIR="/sys/bus/pci/drivers/ahci"
export TEST_DIR="$PWD/LCK-Test/testcases/scripts/sata/test_dir"
export TEST_MNT_DIR_1="$TEST_DIR/mnt_dev_1"
export TEST_MNT_DIR_2="$TEST_DIR/mnt_dev_2"
export TEST_TMP_DIR="$TEST_DIR/tmp"
# EXPORT PATHS AND VARIABLES FOR LPC
elif [ $DRIVER == "lpc" ]; then
export LPC_PSMOUSE_DIR="/sys/bus/serio/drivers/psmouse"
export LPC_ATKBD_DIR="/sys/bus/serio/drivers/atkbd"
# EXPORT PATHS AND VARIABLES FOR EC
elif [ $DRIVER == "ec" ]; then
export EC_DIR="/sys/bus/acpi/drivers/ec"
export LID_BUTTON="/proc/acpi/button/lid/LID0/state"
S_CAPS_DIR=`ls -l /sys/class/leds/ | grep caps | grep serio | awk -F"->" '{print $1}' | awk '{print $9}'`
P_CAPS_DIR=`ls -l /sys/class/leds/ | grep caps | grep pci | awk -F"->" '{print $1}' | awk '{print $9}'`
S_NUM_DIR=`ls -l /sys/class/leds/ | grep num | grep serio | awk -F"->" '{print $1}' | awk '{print $9}'`
P_NUM_DIR=`ls -l /sys/class/leds/ | grep num | grep pci | awk -F"->" '{print $1}' | awk '{print $9}'`
S_SCROLL_DIR=`ls -l /sys/class/leds/ | grep scroll | grep serio | awk -F"->" '{print $1}' | awk '{print $9}'`
P_SCROLL_DIR=`ls -l /sys/class/leds/ | grep scroll | grep pci | awk -F"->" '{print $1}' | awk '{print $9}'`
export SER_CAPS_LOCK_DIR="/sys/class/leds/${S_CAPS_DIR}"
export PCI_CAPS_LOCK_DIR="/sys/class/leds/${P_CAPS_DIR}"
export SER_NUM_LOCK_DIR="/sys/class/leds/${S_NUM_DIR}"
export PCI_NUM_LOCK_DIR="/sys/class/leds/${P_NUM_DIR}"
export SER_SCROLL_LOCK_DIR="/sys/class/leds/${S_SCROLL_DIR}"
export PCI_SCROLL_LOCK_DIR="/sys/class/leds/${P_SCROLL_DIR}"
# EXPORT PATHS AND VARIABLES FOR MEI
elif [ $DRIVER == "mei" ]; then
export PATH="$PATH:$PWD/LCK-Test/testcases/scripts/mei/mei"
export MEI_DIR="/sys/bus/pci/drivers/mei_me"
# EXPORT PATH AND VARIABLES FOR RTC
elif [ $DRIVER == "rtc" ]; then
export PROC_RTC="/proc/driver/rtc"
export SYS_RTC="/sys/class/rtc/rtc0"
export ALARM="/sys/class/rtc/rtc0/wakealarm"
# EXPORT PATH AND VARIABLES FOR USB
elif [ $DRIVER == "usb" ]; then
export USB_SCRIPTS="$PWD/LCK-Test/testcases/scripts/usb"
export MODULE_PATH="/sys/module"
export USB_PATH="/sys/bus/usb/devices"
export DEVICE_PATH="/sys/kernel/debug/usb/devices"
export PATH="$PATH:$PWD/LCK-Test/testcases/scripts/sata"
export PATH="$PATH:$PWD/LCK-Test/testcases/filesystem_test_suite"
export AHCI_DIR="/sys/bus/pci/drivers/ahci"
export TEST_DIR="$PWD/LCK-Test/testcases/scripts/sata/test_dir"
export TEST_MNT_DIR_1="$TEST_DIR/mnt_dev_1"
export TEST_MNT_DIR_2="$TEST_DIR/mnt_dev_2"
export TEST_TMP_DIR="$TEST_DIR/tmp"
# EXPORT PATH AND VARIABLES FOR SDHCI
elif [ $DRIVER == "sdhci" ]; then
export MMC_PATH="/sys/bus/mmc/drivers/mmcblk"
export DEBUGFS_MNT="/sys/kernel/debug"
export PATH="$PATH:$PWD/LCK-Test/testcases/scripts/sata"
export TEST_DIR="$PWD/LCK-Test/testcases/scripts/sata/test_dir"
export PATH="$PATH:$PWD/LCK-Test/testcases/filesystem_test_suite"
export TEST_MNT_DIR_1="$TEST_DIR/mnt_dev_1"
export TEST_MNT_DIR_2="$TEST_DIR/mnt_dev_2"
export TEST_TMP_DIR="$TEST_DIR/tmp"
# EXPORT PATH AND VARIABLES FOR PWM
elif [ $DRIVER == "pwm" ]; then
export PWM_DEVICE="/sys/class/pwm"
export PWM_PCI="/sys/bus/pci/drivers"
export PWM_PLATFORM="/sys/bus/platform/drivers"
# EXPORT PATH AND VARIABLES FOR SPI
elif [ $DRIVER == "spi" ]; then
export SPI_SCRIPTS="$PWD/LCK-Test/testcases/scripts/spi"
export SPI_DRIVERS="/sys/bus/platform/drivers/pxa2xx-spi"
export SPI_MASTER="/sys/class/spi_master"
elif [ $DRIVER == "ith" ]; then
export ITH_SCRIPTS="$PWD/LCK-Test/testcases/scripts/ith"
# EXPORT PATH AND VARIABLES FOR THERMAL
elif [ $DRIVER == "thermal" ]; then
export THERMAL_DIR="/sys/bus/acpi/drivers/thermal"
fi
| true |
1bb6abc73694fa7b1f35fc3323656795bcd21ef7 | Shell | dunkelowe/monada | /sht21.sh | UTF-8 | 599 | 2.625 | 3 | [] | no_license | #!/bin/sh
#LogInterval=600
while true
do
TimeString=$(date +"%d.%m.%Y %H:%M:%S")
Timestamp=$(date +%s)
TimeOffset=$(date -d '1970-01-01 0 sec' +%s)
Timestamp=$(($Timestamp - TimeOffset))
if [ $(($Timestamp % 5)) -eq 0 ]
then
Sht21Data=$(./rpi S)
echo "$TimeString\t$Sht21Data"
#python /home/pi/Raspi-SHT21/Tela.py
node posted.js $(./rpi S) $(date +"%d.%m.%Y") $(date +"%H:%M:%S")
./led
#if [ $(($Timestamp % $LogInterval)) -eq 0 ]
#then
#echo "$TimeString\t$Timestamp\t$Sht21Data" >> sht21-data.csv
#./function-ftp-upload.sh
#fi
fi
#sleep 1
done
| true |
319251894274aed3ac1ecf4abb96c4f4f9dca92a | Shell | GrigLars/setiboinc | /boinc_docker_init | UTF-8 | 2,004 | 3.78125 | 4 | [] | no_license | #!/bin/sh
# Simplified init for docker
# /etc/init.d/boinc
# Start/stop/restart/attach
# For docker, I simplified this since Docker is a very ephemeral thing.
# This has been tested on a vagrant Debian 8 (Jessie) 64 bit, was not installed as
# a service (no point), and just runs from the dockerfile
# For later: maybe have this log mount outisde the box for debugging purposes, but right now
# the various "tee" commands dump it to the TTY anyway, and won't let go until either docker
# crashes or is stopped from another terminal interface. There's probably a better way to do
# this, but it "works" and I got other shit to do anyway.
BOINC_LOG="/var/log/boinc.log"
BOINC_PATH="/opt/BOINC"
boinc_start() {
if [ -x /opt/BOINC/run_client ]; then
# echo "Starting BOINC..."
${BOINC_PATH}/run_client --allow_remote_gui_rpc | tee -a ${BOINC_LOG}
else
echo "FAIL: cannot find ${BOINC_PATH}/run_client. Exiting." | tee -a ${BOINC_LOG}
exit 1
fi
}
boinc_attach(){
# In a few cases, "attach" stops and doesn't get more work units unless I restart it, which in Docker world
# means the container stops and then you have to do this all over again. This is a crude workaround for now.
${BOINC_PATH}/boinc -no_gui_rpc -attach_project http://setiathome.berkeley.edu d14af2375aa9c85dd1251f0f04d09654 | tee -a ${BOINC_LOG} && boinc_start
# echo "Starting BOINC with Grig's account attached ..."
}
boinc_stop() {
# echo "Stopping BOINC..."
echo "Boinc stopped manually by user $0" >> ${BOINC_LOG} &
# Found out the hard way that "kill all" needs the "psmisc" pacakge :/
killall boinc
}
boinc_restart() {
boinc_stop
sleep 3
boinc_start
}
case "$1" in
'start')
boinc_start
;;
'stop')
boinc_stop
;;
'restart')
boinc_restart
;;
'attach')
boinc_attach
;;
*)
echo "$0 start|stop|restart|attach"
esac
exit 0
| true |
1bda6774703422f0b7ba22d1bf6d7445056c6f89 | Shell | Luzifer/cfg | /bin/config-git-status.sh | UTF-8 | 160 | 2.890625 | 3 | [] | no_license | #!/bin/bash
for i in $(ls -1 ${HOME}/.cfg); do
[ $(git --git-dir=$HOME/.cfg/$i/ --work-tree=$HOME status --porcelain | wc -l) -eq 0 ] || exit 1
done
exit 0
| true |
d756585b343ac8077fa892c35e764cbff5112136 | Shell | rbs28/shared-admissions-college-summary | /scripts/zipLambdaSource.sh | UTF-8 | 498 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
script_path="`dirname \"$BASH_SOURCE\"`"
cd $script_path/..
here=`pwd`
cd responders/fromTable
lambda_files=$(ls *.js)
if [ -f /tmp/college-SummaryResponderLambda.zip ]; then
zip -rf /tmp/college-SummaryResponderLambda.zip $lambda_files package.json node_modules
else
zip -r /tmp/college-SummaryResponderLambda.zip $lambda_files package.json node_modules
fi
aws s3 cp /tmp/college-SummaryResponderLambda.zip s3://shared-admissions-college-transcript-config/SummaryResponderLambda.zip
| true |
0d9fae5da8c80ff572c93f9e0f5884a933b32b4a | Shell | alvin921/FishChatServer | /r.sh | UTF-8 | 4,082 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
OS=`uname|awk '{$1=tolower($1);print $1}'`
#echo "OS=$OS"
PREFIX=""
if [ $OS != "linux" ]
then
PREFIX=".exe"
fi
#echo "PREFIX=$PREFIX"
function help_clean {
echo -e "$0 clean"
echo -e " clean exe files of gateway/msg_server/manager/router/monitor/client"
}
function help_build {
echo -e "$0 build <nil>|server|gateway|msg_server|manager|router|monitor|client"
echo -e " <nil>|server: means to build all: gateway/msg_erver/manager/router/monitor/client"
}
function help_start {
echo -e "$0 start <nil>|server|redis|mongo"
echo -e " <nil>|server: means to start all: msg_erver/gateway/manager/router/monitor"
}
function help_stop {
echo -e "$0 stop <nil>|server|redis|mongo"
echo -e " <nil>|server: means to stop all: msg_erver/gateway/manager/router/monitor"
}
function Usage {
echo -e "Usage: $0 <cmd> <arg>"
echo -e "<cmd> : clean|build|start|stop"
echo -e "<arg> : <nil>|server|redis|mongo|gateway|msg_server|manager|router|monitor|client"
echo -e "Descriptions:"
help_clean
help_build
help_start
help_stop
}
function proc_help {
case $1 in
clean)
help_clean ;;
build)
help_build ;;
start)
help_start ;;
stop)
help_stop ;;
*)
Usage ;;
esac
}
function clean {
rm -f $1/$1$PREFIX
}
function proc_clean {
clean gateway
clean monitor
clean msg_server
clean router
clean manager
clean client
}
function build {
case "$1" in
gateway|msg_server|router|manager|monitor|client)
echo -e "===>building $1..."
cd $1
go build -v
cd ..
;;
esac
}
function proc_build {
case "a$1" in
agateway|amsg_server|arouter|amanager|amonitor|aclient)
build $1
;;
aserver|a)
build gateway
build msg_server
build router
build manager
build monitor
build client
;;
*)
proc_help ;;
esac
}
function start {
echo "#======================================="
read -p "start $1?[y|n]" ANS
case $ANS in
n|N|no|NO|No) exit 0 ;;
y|Y|yes|Yes) ;;
*) ;;
esac
case "x$1" in
"xmanager"|"xmonitor"|"xrouter"|"xgateway")
./$1/$1$PREFIX -conf_file=./$1/$1.json &
;;
"xmsg_server")
./$1/$1$PREFIX -conf_file=./$1/$1.19001.json &
./$1/$1$PREFIX -conf_file=./$1/$1.19000.json &
;;
"xredis")
if [ $OS == "linux" ]
then
sudo /etc/init.d/redis_6379 start
else
net start redis
fi
;;
"xmongo")
if [ $OS == "linux" ]
then
$DIR=$HOME/RDAWatchServer
if [ ! -d $DIR/db ]; then
mkdir $DIR/db
fi
mongod --dbpath=$DIR/db --storageEngine=mmapv1 --logpath=$DIR/mongod.log --logappend --fork &
else
net start mongodb
fi
;;
esac
}
function proc_start {
case "x$1" in
"x"|"xserver")
start msg_server
start gateway
start router
start manager
start monitor
;;
"xredis"|"xmongo")
start $1
;;
*)
proc_help ;;
esac
}
function stop {
pids=`ps -ef | grep $1 | awk '{print $2}'`
for item in ${pids[*]}; do
echo "kill $1:$item"
kill -9 $item
done
}
function proc_stop {
case "x$1" in
"x"|"xserver")
stop monitor
stop manager
stop router
stop gateway
stop msg_server
;;
"xmanager"|"xmonitor"|"xrouter"|"xgateway"|"xmsg_server")
stop $1
;;
"xredis")
if [ $OS == "linux" ]
then
sudo /etc/init.d/redis_6379 stop
else
net stop redis
fi
;;
"xmongo")
if [ $OS == "linux" ]
then
stop mongod
else
net stop mongodb
fi
;;
*)
proc_help ;;
esac
}
function status {
pids=`ps -ef | grep $1 | awk '{print $2}'`
for item in ${pids[*]}; do
echo "$1:$item"
done
}
function proc_status {
case "x$1" in
"x"|"xserver")
status monitor
status manager
status router
status gateway
status msg_server
;;
"xmanager"|"xmonitor"|"xrouter"|"xgateway"|"xmsg_server")
status $1
;;
"xredis")
#if [ $OS == "linux" ]
#then
#else
#fi
;;
"xmongo")
#if [ $OS == "linux" ]
#then
#else
#fi
;;
esac
}
case "$1" in
clean)
proc_clean $2 ;;
build)
proc_build $2 ;;
start)
proc_start $2 ;;
stop)
proc_stop $2 ;;
status)
proc_status $2 ;;
help)
proc_help $2 ;;
*)
proc_help;;
esac
| true |
37b26f558fc08c37afac91c8f8fe16de8981d03b | Shell | paperbag-zz/adhara-images | /resticw/resticw.sh | UTF-8 | 2,979 | 4.03125 | 4 | [] | no_license | #! /usr/bin/env bash
set -eo pipefail
trap 'catch $?' EXIT
catch(){
if [[ "$action" == "backup" && -n "$pushgateway_url" ]]; then
# checking exit code to generate the proper value
if [[ $1 == 0 ]]; then
pushgateway 1
else
pushgateway 0
fi
fi
}
help() {
cat <<-EOF
AWS S3:
AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables are required
Microsoft Azure Blob Storage:
AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY environment variables are required
Usage:
RESTIC_REPOSITORY and RESTIC_PASSWORD environment variables are required.
To generate accurate prometheus metric consider POD_NAME and NAMESPACE_NAME environment variables
resticw.sh <backup|restore> --path <absolute_path> --delete-snapshots --snapshots-to-keep=4
-p | --path backup directory
-d | --delete-snapshots if you want to delete old snapshots ( default false )
-k | --snapshots-to-keep num of snapshots to keep ( default 2 )
--snapshot-id snapshot id to restore
--pushgateway-url pushgateway url if you want generate prometheus metrics ( default empty )
--metric-labels labels that you want to add to the metric (default kubernetes_pod_name, kubernetes_namespace)
EOF
exit 1
}
action=""
path=""
delete_snapshots=false
snapshots_to_keep=2
snapshot_id=""
pushgateway_url=""
metric_labels="kubernetes_pod_name=\"$POD_NAME\",kubernetes_namespace=\"$NAMESPACE_NAME\""
for arg in "$@"
do
case $arg in
backup|restore)
action=$arg
shift
;;
-p=*|--path=*)
path="${arg#*=}"
shift
;;
-d|--delete-snapshots)
delete_snapshots=true
shift
;;
-k=*|--snapshots-to-keep=*)
snapshots_to_keep=${arg#*=}
shift
;;
--snapshot-id=*)
snapshot_id=${arg#*=}
shift
;;
--pushgateway-url=*)
pushgateway_url=${arg#*=}
shift
;;
--metric-labels=*)
metric_labels=${arg#*=}
shift
;;
esac
done
pushgateway(){
# You can activate pushgateway to generate prometheus metrics (using pushgateway)
[[ -z "$POD_NAME" ]] && POD_NAME=resticw
[[ -z "$NAMESPACE_NAME" ]] && NAMESPACE_NAME=resticw
cat <<-EOF | curl --data-binary @- http://${pushgateway_url}/metrics/job/besu-snapshot/instance/$POD_NAME
# TYPE resticw_snapshot_status gauge
resticw_snapshot_status{$metric_labels} $1
EOF
}
backup(){
# try to init restic repo on each execution, is the repo already exist
# restic does not create anything and continue with the backup
! restic init 2> /dev/null
(cd $path; restic backup --no-cache ./)
}
restore(){
restic restore --no-cache --target $path $snapshot_id --verify
}
if [[ -z "$RESTIC_REPOSITORY" && -z "$RESTIC_PASSWORD" ]]; then
help;
fi
if [[ "$action" == "backup" ]]; then
backup
if [[ $delete_snapshots == true ]]; then
restic forget --keep-last $snapshots_to_keep
fi
elif [[ "$action" == "restore" ]]; then
restore
fi
| true |
a0d2fee42097ea10063a70fc5146237bc45b1eab | Shell | eraserhd/dotfiles | /bin/nexus-rebase | UTF-8 | 4,748 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
set -eo pipefail
showOutputOnFail() {
local errorCode tmp=$(mktemp)
"$@" >"$tmp" 2>&1
errorCode=$?
if (( errorCode > 0 )); then
echo 'Command failed: ' "$@" >&2
cat "$tmp"
rm -f "$tmp"
return $errorCode
fi
rm -f "$tmp"
}
cloneTempNexus() {
printf 'Cloning nexus...\n'
rm -rf /tmp/nexus
cd /tmp
showOutputOnFail git clone git@github.com:2uinc/nexus.git
cd nexus
}
applyPatches() {
local patch
for patch in "$@"; do
showOutputOnFail git cherry-pick "origin/staging^{/$patch}"
done
}
findManagedBranches() {
git log --reverse --format='%s' origin/master..origin/staging |sed -e 's/ .*$//'
git for-each-ref --format='%(refname:strip=3)' 'refs/remotes/origin/staging-*'
printf 'staging\n'
printf 'tools\n'
}
makeSteps() {
(
local patch
for patch in "$@"; do
printf 'applyPatch "%s"\n' "$patch"
done
local branch
for branch in $(cat .git/managed-branches); do
printf 'rebaseBranch "%s"\n' "$branch"
done
) >.git/nexus-steps
}
do_applyPatch() {
local name="$1"
git cherry-pick "origin/$name"
}
do_rebaseBranch() {
local branch="$1"
git checkout "$branch"
git reset --hard "origin/$branch"
local base='master'
git rebase "$base"
}
cmd_continue() {
# FIXME: Refactor so queue is on disk!
local root="$(git rev-parse origin/master)"
local queue=( "$root" )
local queueStart=0 queueEnd=1
local commit='' nextCommit=''
touch .git/nexus-rebase/done/"$root" # root is done by the reset below
while (( queueStart < queueEnd )); do
commit="${queue[$queueStart]}"
queueStart=$(( queueStart + 1 ))
for nextCommit in $(ls -1 .git/nexus-rebase/reverse-deps/"$commit" 2>/dev/null); do
queue[$queueEnd]="$nextCommit"
queueEnd=$(( queueEnd + 1 ))
done
if [ -e .git/nexus-rebase/done/"$commit" ]; then
continue
fi
printf '>> %s\n' "$commit"
local branches=( $(ls -1 .git/nexus-rebase/branches/"$commit" 2>/dev/null) )
local branch="${branches[0]}"
git checkout "$branch"
if ! git cherry-pick "$commit" >.git/nexus-rebase/cherry.txt 2>&1; then
if grep -q '^The previous cherry-pick is now empty,' .git/nexus-rebase/cherry.txt; then
git cherry-pick --skip
else
cat .git/nexus-rebase/cherry.txt >&2
exit 1
fi
fi
local newCommit="$(git rev-parse HEAD)"
local first=true
for branch in "${branches[@]}"; do
printf ' -- %s\n' "$branch"
if ! $first; then
git branch -f "$branch" "$newCommit"
fi
first=false
done
touch .git/nexus-rebase/done/"$commit"
done
}
cmd_start() {
cloneTempNexus
#FIXME: Make sure there's no local changes on said branches.
# Set up commit dependencies for topological sort. The files in reverse-deps
# list all the commits based on top of the commit that is the name of the file.
# The files in branches name all of the branches that contain the commit that is
# the name of the file.
# The files in done are completion flags so that we can restart where we left off.
mkdir -p .git/nexus-rebase/{reverse-deps,branches,done}
local root="$(git rev-parse origin/master)" commit previousCommit branch
local branches=( $(findManagedBranches) )
for branch in "${branches[@]}"; do
previousCommit="$root"
exec 4< <(git log --reverse --format='%H' "origin/master..origin/$branch")
while read -r -u 4 commit; do
mkdir -p .git/nexus-rebase/reverse-deps/"$previousCommit"
touch .git/nexus-rebase/reverse-deps/"$previousCommit"/"$commit"
mkdir -p .git/nexus-rebase/branches/"$commit"
touch .git/nexus-rebase/branches/"$commit"/"$branch"
previousCommit="$commit"
done
exec 4<&-
done
# Check out all branches to ensure we have local copies and they are tracking origin
for branch in "${branches[@]}"; do
git checkout "$branch"
#FIXME: Make sure none have local modifications
done
# Reset all local copies of managed branches to master
git checkout master
for branch in "${branches[@]}"; do
git branch -f "$branch" "$root"
done
# Start the queue
cmd_continue
}
cmd_push() {
cd /tmp/nexus
git push origin master
git push --force-with-lease origin $(managedBranches)
}
main() {
local cmd="$1"
shift
"cmd_$cmd" "$@"
}
main "$@"
| true |
fa8da57d1b6a933040c1675340718cbe81c54d5e | Shell | rabeehk/Advanced-System-Lab | /code/bash_scripts/run_stability_experiment.sh | UTF-8 | 1,614 | 2.984375 | 3 | [] | no_license | #!/bin/bash
key=rabeeh.pem
ssh_db_host=ubuntu@52.29.25.245
server_host1="ubuntu@52.29.40.194"
server_inet1="172.31.27.69"
server_host2="ubuntu@52.29.45.206"
server_inet2="172.31.21.205"
DATABASE_URL="jdbc:postgresql://52.29.25.245:5432/asl_db?user=postgres&password=`cat PW.txt`"
db_ad="/home/ubuntu/postgres/bin/"
function reset_db {
ssh -i $key $ssh_db_host "killall java"
ssh -i $key $ssh_db_host "PGPASSWORD=`cat PW.txt` $db_ad/dropdb -U postgres asl_db -p 5432 -h /home/ubuntu "
ssh -i $key $ssh_db_host "PGPASSWORD=`cat PW.txt` $db_ad/createdb -O postgres asl_db -p 5432 -h /home/ubuntu"
ssh -i $key $ssh_db_host "cd ~/asl && PGPASSWORD=`cat PW.txt` $db_ad/psql -U postgres -d asl_db -f create_tables.sql -p 5432 -h /home/ubuntu"
ssh -i $key $ssh_db_host "cd ~/asl && PGPASSWORD=`cat PW.txt` $db_ad/psql -U postgres -d asl_db -f create_stored_procedures.sql -p 5432 -h /home/ubuntu"
ssh -i $key $ssh_db_host "cd ~/asl && PGPASSWORD=`cat PW.txt` $db_ad/psql -U postgres -d asl_db -f create_indices.sql -p 5432 -h /home/ubuntu"
}
# it runs the server
function reset_server1 {
ssh -i $key $server_host1 "killall java"
ssh -i $key $server_host1 'screen -dm java -jar /home/ubuntu/asl/server_main.jar 4444 '"$DATABASE_URL"
sleep 2
}
function reset_server2 {
ssh -i $key $server_host2 "killall java"
ssh -i $key $server_host2 'screen -dm java -jar /home/ubuntu/asl/server_main.jar 7777 '"$DATABASE_URL"
sleep 2
}
reset_db
reset_server1
reset_server2
java -jar stability_bench.jar 40 10 $server_inet1 4444 $server_inet2 7777 $DATABASE_URL
| true |
2fadf0e19e9b36cc0e740f081a2886e134311aa3 | Shell | mehulsbhatt/mrtg-maker | /mrtg-maker.sh | UTF-8 | 1,104 | 3.6875 | 4 | [] | no_license | #!/bin/sh
# MRTG configuration and index maker
# Created by Winter Faulk 2013
# http://faulk.me
# http://github.com/faulker/mrtg-maker
# Create a file with one device per line, include the SNMP community (exp. public@192.168.1.1)
# and change the variable DEVLIST to point to it.
DEVLIST="device.list"
CFG="/etc/mrtg.cfg" # Path to MRTG config file.
BIN="/usr/bin" # Path to bin that is holding cfgmaker.
WEBUSER="www-data"
WWWPATH="/var/www/mrtg"
env LANG=C
clear
# Adds all the devices from the $DEVLIST to a variable
NETWORK="$(
while read LINE
do
echo -n ${LINE} | tr "\r" " "
done < $DEVLIST)"
echo "Creating MRTG configuration file..."
$BIN/cfgmaker\
--no-down\
--ifref=nr\
--ifdesc=descr\
--global "WorkDir: ${WWWPATH}"\
--global "options[_]: bits"\
--subdirs=HOSTNAME_SNMPNAME\
$NETWORK > $CFG
echo "Done creating configuration file."
echo
echo "Creates the MRTG index.html file..."
$BIN/indexmaker\
--columns=2\
--show=day\
$CFG > ${WWWPATH}/index.html
echo "Done creating index.html file"
chown -R $WEBUSER:$WEBUSER $WWWPATH
echo
echo "-----------------------"
echo "Done!"
| true |
cb554f3d5cb9f435c92fd2a71c3709b1c2e93d35 | Shell | ascagnel/dotfiles | /zshrc | UTF-8 | 5,033 | 2.96875 | 3 | [
"MIT"
] | permissive | # Path to your oh-my-zsh installation.
export ZSH=~/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="jreese"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
ZSH_CUSTOM=$HOME/dotfiles/zsh_custom
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(vi-mode)
# User configuration
export PATH=$PATH:bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X12/bin:/opt/boxen/bin:${HOME}/.bin:./node_modules/.bin
# per-system configuration
[ -f "${HOME}/.zshrc_local" ] && source "${HOME}/.zshrc_local"
source $ZSH/oh-my-zsh.sh
export EDITOR='nvim'
export FZF_TMUX_OPTS='-p 80%'
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
alias vi="nvim"
alias ta="tmux a -t "
alias jsonpp="json_pp -json_opt pretty,utf8"
#alias tc="tmux new-session -t"
#alias edit="open -a TextEdit"
if hash trash 2>/dev/null; then
alias rm="trash"
fi
if hash rg 2>/dev/null; then
alias grep="rg"
fi
if hash yt-dlp 2>/dev/null; then
alias youtube-dl="yt-dlp"
fi
export FZF_DEFAULT_COMMAND='ag --hidden --ignore .git -g ""'
export DEFAULT_USER="$(whoami)"
export HOSTNAME=`hostname -s`
prompt_context() {
if [[ "$USER" != "$DEFAULT_USER" || -n "$SSH_CLIENT" ]]; then
prompt_segment black default "%(!.%{%F{yellow}%}.)$USER@$HOSTNAME"
fi
}
# fshow - git commit browser (enter for show, ctrl-d for diff, ` toggles sort)
fshow() {
local out shas sha q k
while out=$(
git log --graph --color=always \
--format="%C(auto)%h%d %s %C(black)%C(bold)%cr" "$@" |
fzf-tmux -p --ansi --multi --no-sort --reverse --query="$q" \
--print-query --expect=ctrl-d --toggle-sort=\`); do
q=$(head -1 <<< "$out")
k=$(head -2 <<< "$out" | tail -1)
shas=$(sed '1,2d;s/^[^a-z0-9]*//;/^$/d' <<< "$out" | awk '{print $1}')
[ -z "$shas" ] && continue
if [ "$k" = ctrl-d ]; then
git diff --color=always $shas | less -R
else
for sha in $shas; do
git show --color=always $sha | less -R
done
fi
done
}
fd() {
preview="git diff $@ --color=always -- {-1}"
git diff $@ --name-only | fzf-tmux -m -p 80% --ansi --preview $preview
}
is_in_git_repo() {
git rev-parse HEAD > /dev/null 2>&1
}
gf() {
is_in_git_repo || return
git -c color.status=always status --short |
fzf-tmux -p 80% -m --ansi --nth 2..,.. \
--preview '(git diff --color=always -- {-1} | sed 1,4d; cat {-1}) | head -500' |
cut -c4- | sed 's/.* -> //'
}
# if on a Mac, use touchID/watchID to authenticate commands
# (appears to be broken on 12.3.1)
# if [ "(uname -s)" = "Darwin" ]; then
# sudo() {
# unset -f sudo
# if [[ "$(uname)" == 'Darwin' ]] && ! grep 'pam_tid.so' /etc/pam.d/sudo --quiet; then
# sudo sed -i -e '1s;^;auth sufficient pam_tid.so\n;' /etc/pam.d/sudo
# fi
# if [[ "$(uname)" == 'Darwin' ]] && ! grep 'pam_watchid.so' /etc/pam.d/sudo --quiet; then
# sudo sed -i -e '1s;^;auth sufficient pam_watchid.so "reason=execute a command as root"\n;' /etc/pam.d/sudo
# fi
# sudo "$@"
# }
# fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true |
008a6b4603fd50a3b743d5ac211d1106b94b91a8 | Shell | merrilymeredith/dotfiles | /bin/setup-neomutt | UTF-8 | 1,374 | 3.40625 | 3 | [] | no_license | #!/bin/sh
set -eu
case ${1:-} in *help|-h)
exec perldoc -T $0;;
esac
cd
mkdir -p sandbox
if [ ! -d sandbox/neomutt ]; then
git clone --depth 1 https://github.com/neomutt/neomutt.git sandbox/neomutt
else
make -C sandbox/neomutt clean >/dev/null 2>&1 || true
git -C sandbox/neomutt pull
fi
cd sandbox/neomutt
case "$(uname -s)" in
Darwin)
CONFIGURE_OPTIONS='--with-ssl=/opt/local --with-gpgme=/opt/local'
export EXTRA_CFLAGS="-iquote $(pwd)"
;;
*)
CONFIGURE_OPTIONS=''
;;
esac
autoreconf -i --force || true
./configure \
--with-mailpath=/var/mail \
--ssl \
--sasl \
--lmdb \
--gpgme \
$CONFIGURE_OPTIONS \
--disable-doc
make -s -j3
cd ~/bin
for BIN in neomutt pgpewrap; do
cp ../sandbox/neomutt/$BIN .
done
ln -sf neomutt mutt
:<<=cut
=head1 NAME
setup-neomutt - Clone/update and build neomutt
=head1 DESCRIPTION
Run to do the above, incl. copying binaries into C<~/bin> after the neomutt
build.
=head1 REQUIREMENTS
=head2 Debian
apt install autoconf links pandoc gnupg2 gpgsm urlscan libncursesw5-dev \
libssl-dev libsasl2-dev liblmdb-dev libgpgme11-dev
=head2 Cygwin / Babun
pact install autoconf links libncursesw-devel openssl-devel libsasl2-devel \
liblmdb-devel libgpgme-devel
gpg2 isn't available in cygwin, but there is a windows build. using it with
cygwin paths requires a wrapper though.
=cut
| true |
cda2a2108be572d25db5d7fb41eb4ce99c604e83 | Shell | Islamshafeek/Embedded_Linux_Z2H | /Assignement_1_bash/phonebook_v1.sh | UTF-8 | 2,918 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#set -x
file_path="./phonebook_v1.sh"
op=$1
op=${op,,} #Convert input paramter to lowercase
#Display all Contacts Function...........................................................................
function displayAll {
grep -E "^[a-zA-Z\ ]+,[0-9]+$" $file_path || echo "No Contacts!"
}
#Add new contact Function...........................................................................
function insert {
read -p "Enter New Name: " newcontact
if [[ $newcontact =~ ^[a-zA-Z\ ]+$ ]]
then
if grep -E "^$newcontact,[0-9]+$" $file_path > /dev/null
then
echo "Contact already exists!"
else
read -p "Enter New Number: " newnumber
if [[ $newnumber =~ ^[0-9]+$ ]] && [ ${#newnumber} -eq 11 ]
then
if grep -E "^[a-zA-Z\ ]+,$newnumber$" $file_path > /dev/null
then
echo "Number already exists!"
else
echo "$newcontact,$newnumber" >> $file_path
fi
else
echo "Not Valid Number!"
fi
fi
else
echo "Not Valid Name!"
fi
}
#................................................................................................
#Search Function................................................................
function search {
read -p "Enter Name: " searchcontact
grep -E "^$searchcontact,[0-9]+$" $file_path || echo "Not Found!"
#grep -E "^$searchcontact" $file_path || echo "Not Found!"
}
#...............................................................................
#deleteAll Functiom.............................................................
function deleteAll {
read -p "Are you Sure? [y] [n]: " check
check=${check,,}
case $check in
y)
sed -i -r '/^[a-zA-Z\ ]+,[0-9]+/,/^$/d' $file_path
echo "All contacts are deleted!!!";;
n)exit 0;;
*)echo "Not Valid!!...Enter [y] or [n]"
deleteAll;;
esac
}
#..............................................................................
#delete By Name Function.......................................................
function deleteName {
read -p "Enter Name: " deletecontact
if grep -E "^$deletecontact,[0-9]+$" $file_path > /dev/null
then
sed -i -r "/^$deletecontact,[0-9]/d" $file_path
else
echo "Not Found!"
fi
}
#..............................................................................
#Check on Input Parameters Function............................................
function checkInput {
case $op in #check if input is valid
"-i")
insert;;
"-v")
displayAll;;
"-s")
search;;
"-e")
deleteAll;;
"-d")
deleteName;;
*)
echo "Wrong Input";;
esac
}
#..............................................................................
#Start of code:
#check on number of input parameters...........................................
case $# in
1)checkInput;;
0)echo "No Input Parameters";;
*)echo "Exceeds parameters limit!";;
esac
#..............................................................................
#set +x
exit 0
#Beginning of PhonebookDB......................................................
| true |
06c2bbba79d8d80b735d2e6f124a8419d1513d79 | Shell | patperry/iproc | /tools/Lindent | UTF-8 | 667 | 2.765625 | 3 | [] | no_license | #!/bin/sh
INDENT=gindent
PARAM="-npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1 -T bool -T int8_t -T int16_T -T int32_t -T int64_t -T intptr_t -T intmax_t -T ptrdiff_t -T size_t -T ssize_t -T uint8_t -T uint16_t -T uint32_t -T uint64_t -T uintptr_t -T uintmax_t -T f77int"
RES=`$INDENT --version`
V1=`echo $RES | cut -d' ' -f3 | cut -d'.' -f1`
V2=`echo $RES | cut -d' ' -f3 | cut -d'.' -f2`
V3=`echo $RES | cut -d' ' -f3 | cut -d'.' -f3`
if [ $V1 -gt 2 ]; then
PARAM="$PARAM -il0"
elif [ $V1 -eq 2 ]; then
if [ $V2 -gt 2 ]; then
PARAM="$PARAM -il0";
elif [ $V2 -eq 2 ]; then
if [ $V3 -ge 10 ]; then
PARAM="$PARAM -il0"
fi
fi
fi
$INDENT $PARAM $@
| true |
74d2f662e5d723174d2a445dcff3aa4c096cdda8 | Shell | grvsoniuk/appd_scripts | /add_watchers.sh | UTF-8 | 917 | 3.265625 | 3 | [] | no_license | TICKET=$1
TEAM=$2
USER="<USER>"
PASSWORD="<PASSWORD>"
if [ -z "$1" ]
then
echo "Usage: sh ./add_watchers.sh <JIRA Ticket> <Team File Path>"
echo "Example: sh ./add_watchers.sh CORE-77004 ./eum-team.txt"
exit 1
fi
IFS=$'\r\n' GLOBIGNORE='*' command eval 'array=($(cat $TEAM))'
nohup echo "------------------------------------------------------------------------------------------------------------"
nohup echo "Date : $(date)"
nohup echo "Ticket : $TICKET"
nohup echo "------------------------------------------------------------------------------------------------------------"
for i in "${array[@]}"
do
:
ADD_WATCHER="nohup curl -i -u $USER:$PASSWORD -H 'Content-Type: application/json' -H 'Accept: application/json' -X POST -d '\"$i\"' https://singularity.jira.com/rest/api/2/issue/$TICKET/watchers &"
nohup echo ">> Adding $i as watcher..."
eval "$ADD_WATCHER"
done
echo "DONE!!!" | true |
f8f8e26bac794fdf3c57359fbda74eff9656b412 | Shell | jdriscoll98/aoiisem | /make-devfixtures | UTF-8 | 919 | 3.5625 | 4 | [] | no_license | #!/bin/bash
echo
echo "*************************************************"
echo "This script will dump the current relevant data in the database to fixtures!"
echo "*************************************************"
echo
read -p "Are you sure you want to continue? (yes/no) "
if [ "$REPLY" != "yes" ]; then
exit 0
fi
echo
echo "Creating fixtures ..."
# Start virtual env
source ~/venv/bin/activate
python manage.py dumpdata --format=json auth.User > config/fixtures/development/Users.json
python manage.py dumpdata --format=json Application > config/fixtures/development/Application.json
python manage.py dumpdata --format=json Employment > config/fixtures/development/Employment.json
python manage.py dumpdata --format=json House > config/fixtures/development/House.json
python manage.py dumpdata --format=json Scheduling > config/fixtures/development/Scheduling.json
# Stop virtual env
deactivate
echo "Done"
| true |
e1abea3a5f67681f1bccb434f9b18eade89db640 | Shell | delkyd/alfheim_linux-PKGBUILDS | /wayland-ivi-extension-git/PKGBUILD | UTF-8 | 822 | 2.6875 | 3 | [] | no_license | # Maintainer: Gabriel Laskar <gabriel@lse.epita.fr>
pkgname=wayland-ivi-extension-git
pkgver=1.9.1
pkgrel=1
pkgdesc=""
arch=('x86_64')
url="http://git.projects.genivi.org/?p=wayland-ivi-extension.git"
license=('Apache')
groups=()
depends=('weston')
makedepends=('git')
provides=("${pkgname%-git}")
conflicts=("${pkgname%-git}")
replaces=()
backup=()
options=()
install=
source=('wayland-ivi-extension::git://git.projects.genivi.org/wayland-ivi-extension.git#tag=1.9.1')
noextract=()
md5sums=('SKIP')
pkgver() {
cd "$srcdir/${pkgname%-git}"
# Git, tags available
printf "%s" "$(git describe --tags)"
}
prepare() {
cd "$srcdir/${pkgname%-git}"
mkdir -p build
}
build() {
cd "$srcdir/${pkgname%-git}/build"
cmake -DCMAKE_INSTALL_PREFIX=/usr ..
make
}
package() {
cd "$srcdir/${pkgname%-git}/build"
make DESTDIR="$pkgdir/" install
}
| true |
197eb385ee630de03068ccd23c3d7ca922781711 | Shell | chatuu/PhDThesis | /cpp/prod5.1/saveSpectra/saveSpectra/NeutrinoKinematicsStudies/startGridJob.sh | UTF-8 | 743 | 3.03125 | 3 | [] | no_license | #!/bin/bash
location=$(pwd)
echo "#!/bin/bash
submit_cafana.py -n \$1 -ss -r development -o /pnfs/nova/scratch/users/ckuruppu --user_tarball XSec_Testing.tar ${location}/createSpectra.C \$2" >submitJob.sh
declare -a fileList=("createSpectra.C"
"vars.h"
"headers.h"
"structs.h"
"cuts.h"
"switches.h"
"switches.cxx"
"submitJob.sh")
echo -e "\n"
echo "copying the files:"
for i in "${fileList[@]}"; do
echo "$i"
cp $i ../../../gridJob
done
echo -e "Accessing gridJob2 folder:\n"
cd ../../../gridJob/
ls -lrt
tar -zc -f XSec_Testing.tar --exclude='*.root' --exclude='*.png' --exclude='*.out' --exclude='*.tar' --exclude-vcs --exclude='*.o' --exclude='*.d' --exclude='tmp/*' --exclude='*debug' --exclude='*.tar.bz2' *
echo "done..!"
| true |
1e422de894e3b39fd39ecd90b2ea5d80357ddd74 | Shell | champ73/lobby | /setup_bot | UTF-8 | 4,156 | 3.671875 | 4 | [] | no_license | #!/bin/bash
BOT_SERVICE_SCRIPT_URL="https://raw.githubusercontent.com/triplea-game/lobby/master/files/bot/triplea-bot%40.service"
RUN_BOT_SCRIPT="https://raw.githubusercontent.com/triplea-game/lobby/master/files/bot/run_bot"
UNINSTALL_BOT_SCRIPT="https://raw.githubusercontent.com/triplea-game/lobby/master/files/bot/uninstall_bot"
if [[ $USER != "root" ]]; then
echo "This script must be run as root"
echo "Type 'sudo $0'"
exit 1
fi
function usage() {
echo "$(basename $0) <version> <bot_name> <bot_start_number> <number_of_bots>"
echo " version: 'something like '1.9.0.0.3521'"
echo " bot_name: name used for the bot, typically matches server region, eg: 'NJ_US'"
echo " bot_start_number: bots are numbered, numbers should be unique, this is the start number for the bots \
installed on this server"
echo " number_of_bots: how many bot instances are to be installed/running on this server."
echo ""
echo "Notes:"
echo " - files are installed to /home/triplea/bots/<version>"
echo " - bot maps are installed to /home/triplea/maps"
exit 1
}
VERSION=$1
BOT_NAME=$2
START_NUMBER=$3
BOT_COUNT=$4
set -eux
if [ -z "$BOT_COUNT" ]; then
usage
fi
echo "Install system packages and dependencies"
apt-get update
apt -y install openjdk-8-jre icedtea-8-plugin openjfx python3 curl unzip \
cowsay htop iftop tiptop fail2ban unattended-upgrades vim
echo "Install bot version: $VERSION" | cowsay
HELP_FILE="/home/triplea/README.txt"
## install bot java executable
mkdir -p /home/triplea/bots
cd /home/triplea/bots
FILE_SUFFIX="all_platforms.zip"
DOWNLOAD_URL="https://github.com/triplea-game/triplea/releases/download/$VERSION/triplea-$VERSION-$FILE_SUFFIX"
echo "Download from $DOWNLOAD_URL"
wget $DOWNLOAD_URL
FILE=$(ls | grep "$VERSION-$FILE_SUFFIX$")
mkdir -p /home/triplea/bots/$VERSION
unzip -d /home/triplea/bots/$VERSION $FILE
rm *zip
cd /home/triplea/bots/$VERSION
wget $RUN_BOT_SCRIPT
chmod +x run_bot
wget $UNINSTALL_BOT_SCRIPT
chmod +x uninstall_bot
echo "install bot service scripts" | cowsay
SERVICE_SCRIPT="/lib/systemd/system/triplea-bot@.service"
rm -f "triplea-bot@.service"
wget "$BOT_SERVICE_SCRIPT_URL"
mv "triplea-bot@.service" "$SERVICE_SCRIPT"
sed -i "s|BOT_DIR|/home/triplea/bots/$VERSION|" "$SERVICE_SCRIPT"
sed -i "s|BOT_NAME|$BOT_NAME|" "$SERVICE_SCRIPT"
systemctl daemon-reload
grep -q "$VERSION" "$SERVICE_SCRIPT" || echo "ERROR, $VERSION was not updated in /lib/systemd/system/triplea-bot@.service"
grep -q "$VERSION" "$SERVICE_SCRIPT" || exit 1
grep -q "^triplea" /etc/sudoers || echo "triplea ALL=(ALL) /usr/sbin/service triplea-bot@*" >> /etc/sudoers
grep -q "^triplea.*htop" /etc/sudoers || echo "triplea ALL=(ALL) /usr/bin/htop*" >> /etc/sudoers
grep -q "^triplea.*iftop" /etc/sudoers || echo "triplea ALL=(ALL) /usr/bin/iftop" >> /etc/sudoers
echo "open bot ports and drop start scripts in /home/triplea" | cowsay
## make sure we allow ssh port
ufw allow 22
rm -f /home/triplea/start_all /home/triplea/stop_all
for i in $(seq 1 $BOT_COUNT); do
BOT_NUMBER=$((START_NUMBER-1+i))
BOT_PORT="40$BOT_NUMBER"
ufw allow $BOT_PORT
## create bot start/stop scripts
echo "Opened port $BOT_PORT for bot number $BOT_NUMBER"
echo "sudo service triplea-bot@$BOT_NUMBER start" > /home/triplea/start_bot_$BOT_NUMBER
echo "sudo service triplea-bot@$BOT_NUMBER restart" > /home/triplea/restart_bot_$BOT_NUMBER
echo "sudo service triplea-bot@$BOT_NUMBER stop" > /home/triplea/stop_bot_$BOT_NUMBER
chmod +x /home/triplea/stop_bot* /home/triplea/start_bot* /home/triplea/restart_bot*
echo "You can restart bot $BOT_NUMBER by running the comand '/home/triplea/restart_bot_$BOT_NUMBER'" > $HELP_FILE
## TODO: add a status script
echo "sudo service triplea-bot@$BOT_NUMBER start" >> /home/triplea/start_all
echo "sudo service triplea-bot@$BOT_NUMBER stop" >> /home/triplea/stop_all
done
chmod +x /home/triplea/start_all /home/triplea/stop_all
echo "y" | ufw enable
chown -R triplea:triplea /home/triplea
echo "Finished - Installed binaries for $VERSION, log back in as triplea user and run /home/triplea/start_all" | cowsay
| true |
91d89ff38f1f1fbba60c65ef7ce3699a0c32cfc2 | Shell | liujiekm/gockerize | /root/run.sh | UTF-8 | 599 | 3.625 | 4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e
IMAGE=$1
SERVICE=$2
tee >/${GOPATH}/Dockerfile
if [ -s "${GOPATH}/Dockerfile" ] ; then
DOCKERFILE=${GOPATH}/Dockerfile
else
DOCKERFILE=${GOPATH}/src/${SERVICE}/Dockerfile
fi
# apply optional stdlib patches
if [ -d "${GOPATH}/src/${SERVICE}/patches" ] ; then
pushd /usr/local/go/ >>/dev/null
for p in ${GOPATH}/src/${SERVICE}/patches/*.patch ; do
patch -p1 < $p
done
popd >>/dev/null
fi
CGO_ENABLED=0 GO15VENDOREXPERIMENT=1 go get $GOARGS -a -installsuffix cgo -ldflags '-d -s -w' ${SERVICE}
docker build -t ${IMAGE} -f ${DOCKERFILE} ${GOPATH}
| true |
e884476efe813ad6d84e4ef7646209a36d839c34 | Shell | MedicineYeh/hsa-micro-benchmarks | /run_set.sh | UTF-8 | 3,386 | 4.25 | 4 | [] | no_license | #!/bin/bash
#This is set in the argument of individual test set
TIMES=0
#This should be the same as the program loop size
LOOP_SIZE=$((1000 * 1000))
#These three are color code in terminal
GREEN="\033[1;32m"
BLUE="\033[1;36m"
NC="\033[0;00m"
TEST_SET=./test_set
DEFAULT_NATIVE_ISA_FILE=amdhsa001.isa
#This file would be aotomatically removed after preparing hsail code
HSAIL_TEMP_FILE=./.hsail_tmp_file
main() {
if [ ! "$1" == "" ]; then
# Override the directory name
TEST_SET="$1"
fi
mkdir -p output isa
FILES=$(cd ${TEST_SET} && find ./)
for file in ${FILES}; do
path="${TEST_SET}/${file}"
# Skip parsing directories
if [ -d ${path} ]; then
# Here, $file is a directory (name)
# Create directories in output folder for putting output files in the same order
mkdir -p ./output/${file};
mkdir -p ./isa/${file};
continue;
fi
TIMES=0
clean_redundant_files
prepare_hsail ${path}
# Occur some errors while preparing test file
if [ "$?" != "0" ]; then
continue;
fi
make_n_execute
# Occur some errors while making/executing
if [ "$?" != "0" ]; then
continue;
fi
cp ${DEFAULT_NATIVE_ISA_FILE} isa/${file}.isa
parse_result ./output/${file}
clean_redundant_files
done
}
single_test() {
mkdir -p output isa
file=$1
if [ -f $file ]; then
TIMES=0
clean_redundant_files
prepare_hsail ${file}
# Occur some errors while preparing test file
if [ "$?" != "0" ]; then
return -1;
fi
make_n_execute
cp ${DEFAULT_NATIVE_ISA_FILE} isa/$(basename ${file}).isa
parse_result ./output/$(basename ${file})
clean_redundant_files
else
echo "Could not locate file $1"
fi
}
prepare_hsail() {
echo "preparing hsail for ${1}";
#Get arguments from files
TIMES="$(grep "TIMES" ${1} | awk '{print $2}')"
if [ "${TIMES}" == "" ]; then
echo "Missing argument TIMES in the test set - ${1}. Skipped";
return -1;
fi;
#Get the line number of TAG in file
LINE=$(grep -n "TAG_REPLACEMENT" sample_hsail | awk '{print $1}')
#Remove redundant :
LINE=$(echo ${LINE} | sed -e "s/://g")
[[ -f ${HSAIL_TEMP_FILE} ]] && rm ${HSAIL_TEMP_FILE}
for (( i=0; i<$((${TIMES})); i++ ));
do
cat ${1} >> ${HSAIL_TEMP_FILE}
done
sed ''${LINE}'r '${HSAIL_TEMP_FILE}'' sample_hsail > vector_copy.hsail
[[ -f ${HSAIL_TEMP_FILE} ]] && rm ${HSAIL_TEMP_FILE}
return 0;
}
make_n_execute() {
echo "Make and execute"
make dump -s
if [ "$?" != "0" ]; then
echo "Fail to build. Skipped"
return -1;
fi
}
parse_result() {
T=$(cat ./result.log | awk '{print $3}')
#This result is in nano seconds
T=$(echo "scale=3; 1000 * 1000 * (${T}-194.3) / ${TIMES} / ${LOOP_SIZE}" | bc)
#Add 0 to the start of the string if 0 is missing
if [[ ${T:0:1} == "." ]] ; then T="0${T}"; fi
echo "${T} ns" > ${1}.out
echo -e "${GREEN}$(basename ${1})${NC} takes ${BLUE}${T} ns${NC}"
}
clean_redundant_files() {
if [ -f result.log ]; then rm result.log; fi
}
if [ "$1" == "" ] || [ -d "$1" ]; then
main "$1"
else
single_test $1
fi
| true |
31f31d062abcb00efb2a4555acbbe81e58381165 | Shell | hpcugent/openstack-templates | /terraform/scripts/modify_variable.sh | UTF-8 | 7,180 | 3.5625 | 4 | [] | no_license | #!/bin/bash
#script logging to modify_variable.log file
test x$1 = x$'\x00' && shift || { set -o pipefail ; ( exec 2>&1 ; $0 $'\x00' "$@" ) | tee -a modify_variable.log ; exit $? ; }
. ./modify_variable.config &>/dev/null
[ -z ${IMAGE_NAME+x} ] && echo "Variable IMAGE_NAME is not set. Exiting.." 1>&2 && exit 1
[ -z ${FLAVOR_NAME+x} ] && echo "Variable FLAVOR_NAME is not set. Exiting.." 1>&2 && exit 1
[ -z ${SHARE_NAME+x} ] && echo "Variable SHARE_NAME is not set. Exiting.." 1>&2 && exit 1
[ -z ${SHARE_SIZE+x} ] && echo "Variable SHARE_SIZE is not set. Exiting.." 1>&2 && exit 1
[ -z ${VM_BASE_NAME+x} ] && echo "Variable VM_BASE_NAME is not set. Exiting.." 1>&2 && exit 1
[ -z ${vm_floating_ip_cidr+x} ] && echo "Variable vm_floating_ip_cidr is not set. Exiting.." 1>&2 && exit 1
[ -z ${vsc_floating_ip_cidr+x} ] && echo "Variable vsc_floating_ip_cidr is not set. Exiting.." 1>&2 && exit 1
[ -z ${OS_CLOUD+x} ] && echo "Variable OS_CLOUD is not set. Using openstack as a value." && export OS_CLOUD=openstack
openstack catalog list &>/dev/null
[ $? -ne 0 ] && echo "Unable to list openstack catalog. Exiting.." 1>&2 && exit 1
openstack image show "$IMAGE_NAME" &>/dev/null
[ $? -ne 0 ] && echo "Unable to locate image $IMAGE_NAME. Exiting.." 1>&2 && exit 1
image_id="$(openstack image show "$IMAGE_NAME" -c id -f value)"
echo "Image id: $image_id. (Image name: $IMAGE_NAME)"
echo "Flavor name: $FLAVOR_NAME."
root_fs_volume_size="$(openstack flavor show $FLAVOR_NAME -f value -c disk)"
echo "Root FS volume size based on flavor disk size: $root_fs_volume_size."
vm_network_id="$(openstack network list -f value -c ID -c Name|grep '_vm'|cut -d ' ' -f1)" && \
echo "VM network id: $vm_network_id."
vm_subnet_id="$(openstack network list -c Subnets -c Name|grep '_vm'|awk '{print $4}')" && \
echo "VM subnet id: $vm_subnet_id."
nfs_network_id="$(openstack network list -f value -c ID -c Name|grep '_nfs'|cut -d ' ' -f1)" && \
echo "NFS network id: $nfs_network_id."
nfs_subnet_id="$(openstack network list -c Subnets -c Name|grep '_nfs'|awk '{print $4}')" && \
echo "NFS subnet id: $nfs_subnet_id."
vsc_network_id="$(openstack network list -f value -c ID -c Name|grep '_vsc'|cut -d ' ' -f1)" && \
echo "VSC network id: $vsc_network_id."
vsc_subnet_id="$(openstack network list -c Subnets -c Name|grep '_vsc'|awk '{print $4}')" && \
echo "VSC subnet id: $vsc_subnet_id."
access_key="$(openstack keypair list -c Name -f value|head -1)"
[ -z "$access_key" ] && echo "Unable to find ssh access key. Exiting.." 1>&2 && exit 1
echo "Using first ssh access key \"$access_key\"."
while read line
do
ip="$(echo "$line"|awk '{print $2}')"
ip_id="$(echo "$line"|awk '{print $1}')"
python3 -c "import ipaddress; ip = ipaddress.ip_address('$(echo "$ip")') in ipaddress.ip_network('$(echo "$vm_floating_ip_cidr")'); \
print (ip);"|grep "True" &>/dev/null && export floating_ip_id="$ip_id" && export floating_ip="$ip" && \
break
done < <(openstack floating ip list -f value -c "Floating IP Address" -c ID -c "Port"|grep None)
[ -z "$floating_ip_id" ] && echo "Unable to find floating ip address. Exiting.." 1>&2 && exit 1
echo "Using floating ip id: $floating_ip_id. (floating ip: $floating_ip)"
while read line
do
ip="$(echo "$line"|awk '{print $1}')"
python3 -c "import ipaddress; ip = ipaddress.ip_address('$(echo "$ip")') in ipaddress.ip_network('$(echo "$vsc_floating_ip_cidr")'); \
print (ip);"|grep "True" &>/dev/null && export vsc_floating_ip="$ip" && \
break
done < <(openstack floating ip list -f value -c "Floating IP Address" -c "Port"|grep None)
[ -z "$vsc_floating_ip" ] && echo "Unable to find VSC floating ip address. Exiting.." 1>&2 && exit 1
echo "Using VSC floating ip: $vsc_floating_ip."
generate_new_free_port () {
allocated_ports="$(openstack floating ip port forwarding list "$floating_ip_id" -f value -c "External Port"|sort|uniq)"
for i in $(seq 100)
do
port="$(shuf -i 51001-59999 -n 1)"
echo "$allocated_ports"|grep "$port" &>/dev/null
[ $? -ne 0 ] && new_port="$port" && break
done
}
generate_new_free_port && ssh_forwarded_port1="$new_port"
generate_new_free_port && ssh_forwarded_port2="$new_port"
generate_new_free_port && ssh_forwarded_port3="$new_port"
generate_new_free_port && ssh_forwarded_port4="$new_port"
generate_new_free_port && http_forwarded_port="$new_port"
echo "Using ssh forwarded ports: $ssh_forwarded_port1 $ssh_forwarded_port2 $ssh_forwarded_port3 $ssh_forwarded_port4."
echo "Using http forwarded port: $http_forwarded_port."
echo "Modifying ../environment/main.tf file."
verify_variable() {
#usage: verify_variable "variable to check" "module to comment out if variable is empty"
variable="$1"
module_to_comment_out="$2"
if [ "${!variable}" == "" ]
then
echo "WARNING: Missing \$$variable. Commenting out $module_to_comment_out module in ../environment/main.tf file."
awk "/module.*$module_to_comment_out/,/}/{\$0=\"#\"\$0}1" ../environment/main.tf > ../environment/main.tf_new
mv ../environment/main.tf_new ../environment/main.tf
fi
}
verify_variable "nfs_network_id" "vm_with_pf_rules_with_ssh_access_with_nfs_share"
verify_variable "vsc_network_id" "vm_with_pf_rules_with_ssh_access_with_vsc_net"
sed -i "s/_FLAVOR_NAME_/$FLAVOR_NAME/g" ../environment/main.tf
sed -i "s/_SHARE_NAME_/$SHARE_NAME/g" ../environment/main.tf
sed -i "s/_SHARE_SIZE_/$SHARE_SIZE/g" ../environment/main.tf
sed -i "s/_VM_BASE_NAME_/$VM_BASE_NAME/g" ../environment/main.tf
sed -i "s/_ROOT_FS_VOLUME_SIZE_/$root_fs_volume_size/g" ../environment/main.tf
sed -i "s/_IMAGE_ID_/$image_id/g" ../environment/main.tf
sed -i "s/_VM_NETWORK_ID_/$vm_network_id/g" ../environment/main.tf
sed -i "s/_VM_SUBNET_ID_/$vm_subnet_id/g" ../environment/main.tf
sed -i "s/_NFS_NETWORK_ID_/$nfs_network_id/g" ../environment/main.tf
sed -i "s/_NFS_SUBNET_ID_/$nfs_subnet_id/g" ../environment/main.tf
sed -i "s/_VSC_NETWORK_ID_/$vsc_network_id/g" ../environment/main.tf
sed -i "s/_VSC_SUBNET_ID_/$vsc_subnet_id/g" ../environment/main.tf
sed -i "s/_ACCESS_KEY_/$access_key/g" ../environment/main.tf
sed -i "s/_SSH_FORWARDED_PORT1_/$ssh_forwarded_port1/g" ../environment/main.tf
sed -i "s/_SSH_FORWARDED_PORT2_/$ssh_forwarded_port2/g" ../environment/main.tf
sed -i "s/_SSH_FORWARDED_PORT3_/$ssh_forwarded_port3/g" ../environment/main.tf
sed -i "s/_SSH_FORWARDED_PORT4_/$ssh_forwarded_port4/g" ../environment/main.tf
sed -i "s/_HTTP_FORWARDED_PORT_/$http_forwarded_port/g" ../environment/main.tf
sed -i "s/_FLOATING_IP_ID_/$floating_ip_id/g" ../environment/main.tf
sed -i "s/_VSC_FLOATING_IP_/$vsc_floating_ip/g" ../environment/main.tf
echo "Modifying provider.tf files."
find ../* -name *provider.tf -exec sed -i "s/_OS_CLOUD_/$OS_CLOUD/g" {} \;
echo "SSH commands for VMs access:"
echo "(myvm) ssh -p $ssh_forwarded_port1 <user>@$floating_ip"
echo "(myvm-nginx) ssh -p $ssh_forwarded_port2 <user>@$floating_ip"
echo "(myvm-vsc_net) ssh -p $ssh_forwarded_port3 <user>@$floating_ip"
echo "(myvm-nfs_share) ssh -p $ssh_forwarded_port4 <user>@$floating_ip"
| true |
af4852b97f3c6c4fae1708876da50730513ed508 | Shell | cue108/KPNScripts | /usr/local/bin/minerScripts/miningPoolHub/startMultiAlgo.sh | UTF-8 | 2,500 | 2.578125 | 3 | [] | no_license | #!/bin/bash
API="0.0.0.0:1880"
USERNAME=cue.rig1
PASS=yeshe
#while getopts c:a: option
#do
# case "${option}"
# in
# c) CASHCOIN=${OPTARG};;
# a) CASHADDRESS=${OPTARG};;
# esac
#done
while :
do
echo ""
echo "*****************************"
echo "* switching to algo lyra2v2 *"
echo "*****************************"
echo ""
/usr/local/bin/alexis78ccminer -r 0 --max-temp=80 -a lyra2v2 -o stratum+tcp://hub.miningpoolhub.com:12018 -u $USERNAME -p $PASS -b $API
echo ""
echo "*******************************"
echo "* switching to algo neoscrypt *"
echo "*******************************"
echo ""
/usr/local/bin/alexis78ccminer -r 0 --max-temp=80 -a neoscrypt -o stratum+tcp://hub.miningpoolhub.com:12012 -u $USERNAME -p d=0.03125 -b $API
echo ""
echo "*******************************"
echo "* switching to algo equihash *"
echo "*******************************"
echo ""
/usr/local/bin/bminer -max-network-failures 1 -max-temperature 81 -uri stratum://$USERNAME:$PASS@europe.equihash-hub.miningpoolhub.com:12023 -api $API -watchdog=true
echo ""
echo "*********************************"
echo "* switching to algo cryptonight *"
echo "*********************************"
echo ""
/usr/local/bin/nanashiccminer -r 0 --max-temp=80 -a cryptonight -o stratum+tcp://europe.cryptonight-hub.miningpoolhub.com:12024 -u $USERNAME -p $PASS -b $API
echo ""
echo "*********************************"
echo "* switching to algo groestl *"
echo "*********************************"
echo ""
/usr/local/bin/nanashiccminer -r 0 --max-temp=80 -a groestl -o stratum+tcp://hub.miningpoolhub.com:12004 -u $USERNAME -p $PASS -b $API
echo ""
echo "****************************"
echo "* switching to algo lyra2z *"
echo "****************************"
echo ""
/usr/local/bin/alexis78ccminer -r 0 --max-temp=80 -a lyra2 -o stratum+tcp://europe.lyra2z-hub.miningpoolhub.com:12025 -u $USERNAME -p $PASS -b $API
echo ""
echo "************************************"
echo "* switching to algo Myriad-Groestl *"
echo "************************************"
echo ""
/usr/local/bin/alexis78ccminer -r 0 --max-temp=80 -a myr-gr -o stratum+tcp://hub.miningpoolhub.com:12005 -u $USERNAME -p $PASS -b $API
echo ""
echo "************************************"
echo "* switching to algo skein *"
echo "************************************"
echo ""
/usr/local/bin/alexis78ccminer -r 0 --max-temp=80 -a skein -o stratum+tcp://hub.miningpoolhub.com:12016 -u $USERNAME -p d=12.6 -b $API
sleep 5
done
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.