blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
54b469e7ead6ca6f5894a7ff79705cc6ef1c7d91
|
Shell
|
mmartinsOps/prj-cicd
|
/create-argo.sh
|
UTF-8
| 1,578
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
invalid() {
echo "Error: Invalid parameters!"
echo
}
usage() {
echo "$ create-argo.sh <CD>"
echo
echo "Argumentos:"
echo
echo " CD: Nome base do projeto a ser criado / atualizado"
echo
}
if [ "$CD" == "" ]; then
invalid
usage
exit 1
fi
echo "------------------------------------------------------------------------------------"
echo "- VERIFICANDO PROJETOS -"
echo "------------------------------------------------------------------------------------"
oc get project $CD > /dev/null
if [ "$?" == "1" ]; then
echo "Criando namespace"
# oc create namespace $CD
oc new-project $CD > /dev/null
oc -n $CD apply -f https://raw.githubusercontent.com/argoproj/argo-cd/v1.2.2/manifests/install.yaml
ARGOCD_SERVER_PASSWORD=$(oc -n $CD get pod -l "app.kubernetes.io/name=argocd-server" -o jsonpath='{.items[*].metadata.name}')
echo "Criando Rotas"
PATCH='{"spec":{"template":{"spec":{"$setElementOrder/containers":[{"name":"argocd-server"}],"containers":[{"command":["argocd-server","--insecure","--staticassets","/shared/app"],"name":"argocd-server"}]}}}}' > /dev/null
oc -n $CD patch deployment argocd-server -p $PATCH > /dev/null
oc -n $CD create route edge argocd-server --service=argocd-server --port=http --insecure-policy=Redirect > /dev/null
echo $ARGOCD_SERVER_PASSWORD
fi
| true
|
5f104df018870055dd6ffab10a4c048aa8820b76
|
Shell
|
yennanliu/utility_shell
|
/k8s/install_minikube.sh
|
UTF-8
| 466
| 2.546875
| 3
|
[] |
no_license
|
# https://kubernetes.io/docs/tasks/tools/install-minikube/
# for Mac OSX
# Step 1) validate if VMX exists
sysctl -a | grep -E --color 'machdep.cpu.features|VMX'
# Step 2) Installing minikube
# install kubectl
# install a Hypervisor : VirtualBox
# install minikube
brew install minikube
# Step 3) Confirm Installation
minikube start --driver=virtualbox
# Step 4) Check status
minikube status
# Step 5) Stop & (delete) minikube
minikube stop
minikube delete
| true
|
022ac169eeb89ca1d8bf2d467e14105856a21637
|
Shell
|
oduerr/wradlib
|
/scripts/install.sh
|
UTF-8
| 1,861
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2016, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
# print the vars
echo "TRAVIS_PULL_REQUEST " $TRAVIS_PULL_REQUEST
echo "TRAVIS_SECURE_ENV_VARS " $TRAVIS_SECURE_ENV_VARS
echo "TRAVIS_TAG " $TRAVIS_TAG ${TRAVIS_TAG:1}
wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
-O miniconda.sh
chmod +x miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH=$HOME/miniconda/bin:$PATH
# Add conda-forge channel
conda config --add channels conda-forge
conda update --yes conda
conda update --yes conda
# Create a testenv with the correct Python version
conda create -n wradlib --yes pip python=$PYTHON_VERSION
source activate wradlib
# Install wradlib dependencies
conda install --yes -vv gdal numpy scipy matplotlib netcdf4 h5py
conda list
ls -lart $HOME/miniconda/envs/wradlib/share/gdal
# Install optional wradlib dependencies
conda install --yes xmltodict
# Install wradlib-data
git clone https://github.com/wradlib/wradlib-data.git $HOME/wradlib-data
echo $PWD
ls -lart $HOME
ls -lart $HOME/wradlib-data
# Install nbconvert
conda install --yes notebook nbconvert
# Install wradlib docu dependencies
if [[ "$DOC_BUILD" == "true" ]]; then
conda install --yes sphinx numpydoc
conda install --yes sphinx_rtd_theme
pip install sphinxcontrib-bibtex
# install notebook dependencies
conda install --yes runipy pandoc
# install nbsphinx
conda install --yes nbsphinx
fi
# Install flake8 PEP checker
conda install --yes flake8
# Install coverage modules
if [[ "$COVERAGE" == "true" ]]; then
conda install --yes coverage
pip install codecov
fi
python setup.py install
# print some stuff
python --version
pip --version
python -c "import numpy; print(numpy.__version__)"
python -c "import numpy; print(numpy.__path__)"
| true
|
78f4d4431cdd7079e027f96697b77b0956f78a4e
|
Shell
|
afifsohaili/dotfiles-extensions
|
/booster/fetch_rebase.zsh
|
UTF-8
| 347
| 3.390625
| 3
|
[] |
no_license
|
function fetch_rebase() {
branch="origin/master"
if [ -n "$1" ]; then
branch="$1"
fi
git fetch && git rebase $branch
}
alias gfrb='fetch_rebase'
function branch_reset() {
if [ -n "$1" ]; then
git fetch && git reset --hard origin/$1
else
git fetch && git reset --hard origin/$(current_branch)
fi
}
alias br=branch_reset
| true
|
23a519f3ec7b4d8c9725cbbdb48ac0b781579587
|
Shell
|
TracyBallinger/SV_pipeline
|
/scripts/combine/lumpy_to_bedpe.sh
|
UTF-8
| 2,099
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# SU is the cutoff for the read coverage for a break
#$ -N
#$ -cwd
#$ -j y
#$ -l h_rt=24:00:00
#$ -l h_vmem=1G
#$ -o /exports/eddie/scratch/tballing/errorlogs/$JOB_NAME.o$JOB_ID.$TASK_ID
unset MODULEPATH
. /etc/profile.d/modules.sh
# You really just need bedtools, but bedtools is in bcbio
module load igmm/apps/bcbio/20160916
# module load igmm/apps/python/2.7.10
lumpyvcf=$1
format=$2
# output=$2
# Note that Lumpy seems to only detect interchromosomal structural variants, so intrachromosomal SVs aren't there. The second chromosome is always the same as the first.
function lumpy_to_bedpe {
lumpyvcf=$1
gzip -dc $lumpyvcf |\
bcftools query -f '%CHROM\t%POS\t%INFO/CIPOS\t%ALT\t%INFO/END\t%INFO/CIEND\t%ID\t%QUAL\t%INFO/STRANDS\t.\t%INFO/SVTYPE:[%GT:.:%PE:%SR\t]\n' - \
| awk 'BEGIN{OFS="\t"}{
split($3, a, ","); s1=$2+a[1]-1; e1=$2+a[2];
split($6, b, ","); s2=$5+b[1]-1; e2=$5+b[2];
split($9, c, ""); str1=c[1]; str2=c[2];
split($4, c, "[][]");
if(length(c)>1) {
n=split(c[2], d, ":");
s2=d[n]+b[1]-1; e2=d[n]+b[2];
chrom2=d[1]; for (i=2; i<n; i++){chrom2=chrom2":"d[i]}}
else chrom2=$1;
split($11, d, ":"); pr=d[3]; sr=d[4]; su=pr+sr;
s1 = (s1 < 0 ? 0 : s1);
s2 = (s2 < 0 ? 0 : s2);
e1 = (e1 < 0 ? 0 : e1);
e2 = (e2 < 0 ? 0 : e2);
$2=s1; $3=e1;
$5=s2; $6=e2;
$9=str1; $10=str2;
$8=su; $4=chrom2;
$7="LUMPY:"$7;
print $0}' | \
grep -v "_2"
}
function lumpy_to_bed {
lumpyvcf=$1
gzip -dc $lumpyvcf | \
bcftools query -f '%CHROM\t%POS\t%INFO/CIPOS\t%ID\t%QUAL\t%INFO/STRANDS\t%INFO/SVTYPE:[%GT:.:%PE:%SR\t]\n' - \
| awk 'BEGIN{OFS="\t"; FS="\t"}{
split($3, a, ","); s1=$2+a[1]-1; e1=$2+a[2];
split($6, c, ""); str1=c[1]; str2=c[2];
split($7, d, ":"); pr=d[3]; sr=d[4]; su=pr+sr;
id="LUMPY:"$4;
s1 = (s1 < 0 ? 0 : s1);
e1 = (e1 < 0 ? 0 : e1);
print $1,s1,e1,id,su,str1,$7}'
}
if [ "$format" == "bedpe" ]; then
lumpy_to_bedpe $lumpyvcf
elif [ "$format" == "bed" ]; then
lumpy_to_bed $lumpyvcf
fi
| true
|
512f8a51e2b348728f804f95a1a44e36d8a6e3a3
|
Shell
|
xiaoweiruby/edocs
|
/code/ubuntu_install.sh
|
UTF-8
| 2,727
| 3.34375
| 3
|
[] |
no_license
|
clear
echo
echo "=== HI $USER, You are running on UBUNTU: " `lsb_release -c -r`
echo """
this script helps you install your essential tools, let's go
"""
sudo apt-get update # on a fresh system, this is a MUST
sudo apt-get -y install tig xclip git curl tree vim openssh-server
config_ssh()
{
ssh-keygen -t dsa
echo -n "=== copying public key to clipboard..."
echo "=== done"
echo "=== now paste your public key to github->account->sshkey "
echo "=== and press Enter to continue"
read AAA
}
echo "=== now config ssh..."
if [ -f ~/.ssh/id_dsa ] || [ -f ~/.ssh/id_rsa]
then
echo === old keys found
echo "=== do nothing..."
else
config_ssh
fi
config_git()
{
echo "=== configuring git ..."
echo """
[user]
name = Peter Wang
email = happypeter1983@gmail.com
[core]
editor = vim
[alias]
ci = commit -a -v
co = checkout
st = status
br = branch
throw = reset --hard HEAD
throwh = reset --hard HEAD^
[color]
ui = true
[push]
default = current
""" >~/.gitconfig
echo "git config ... done!"
}
if [ -f ~/.gitconfig ] ;then
echo -n ".gitconfig exsits, overwrite? (Y/n): "
read AAA
if [ "${AAA:-y}" = "y" ];then
config_git
fi
else
echo -n "~/.gitconfig not found, create it? (Y/n): "
read AAA
if [ "${AAA:-y}" = "y" ];then
config_git
fi
fi
cd ~
rm -rf Music Templates Videos Public Pictures Documents Downloads examples.desktop # rm folders I do not love
config_vim()
{
cd ~
if [ -d peter-vim ]
then
rm -rf peter-vim
fi
git clone git@github.com:happypeter/peter-vim.git && mv peter-vim .vim \
&& cd .vim && git checkout peter-private
cd ~
rm -rf ~/.vimrc
ln -s ~/.vim/vimrc ~/.vimrc
sudo apt-get -y install ctags
}
if [ -d ~/.vim ] ;then
echo -n "=== .vim exsits, replace? (Y/n): "
read AAA
if [ "${AAA:-y}" = "y" ];then
rm -rf ~/.vim
config_vim
fi
else
echo -n "=== ~/.vim not found, create it? (Y/n): "
read AAA
if [ "${AAA:-y}" = "y" ];then
config_vim
fi
fi
echo "gnome-terminal --full-screen" >~/ggg
sudo mv ~/ggg /bin/ # this may not work if you put it into ~/bin
chmod +x /bin/ggg
clone_github_repo()
{
mkdir ~/bin 2>/dev/null
echo "git clone git@github.com:happypeter/'$1'" >~/bin/git_my_repo
chmod +x ~/bin/git_my_repo
}
clone_github_repo
######################################
#
#to enable bash vi mode, we need to add the following to $HOME/.bashrc
#
# set -o vi
# bind -m vi-insert "\C-l":clear-screen
#
#and we may need to add this for vim launcing as well
#
# alias e='vim'
#
# have this for .inputrc
#
# set completion-ignore-case on
# set completion-prefix-display-length 2
#
| true
|
010568af4f61ac24c3250bb9fdac2bff733a9ba9
|
Shell
|
iheanyi1989/aws-eks-workshop
|
/installeksctl.sh
|
UTF-8
| 653
| 2.640625
| 3
|
[
"MIT-0"
] |
permissive
|
#!/bin/bash
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
eksctl version
curl -o kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/amd64/kubectl
curl -o kubectl.sha256 https://amazon-eks.s3.us-west-2.amazonaws.com/1.21.2/2021-07-05/bin/linux/amd64/kubectl.sha256
openssl sha1 -sha256 kubectl
chmod +x ./kubectl
mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$PATH:$HOME/bin
echo 'export PATH=$PATH:$HOME/bin' >> ~/.bashrc
kubectl version --short --client
source ~/.bashrc
| true
|
d20024a5d7af27bb5a4dbc012ff2add5144711a3
|
Shell
|
dmitriy-korotayev/gotbletu-dotfiles
|
/scripts/.scripts/shoutcast_tuner.sh
|
UTF-8
| 8,718
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# tutorial video: https://www.youtube.com/watch?v=9T4VtC5Bhmo
# http://crunchbang.org/forums/viewtopic.php?pid=396202#p396202
# shoutcast_radionomy_search.sh
# search shoutcast and radionomy,
# send url to radiotray, mpg123, mplayer or another player
# send url to streamripper to record
#
# version 3.1
#
# needs curl, [radiotray, dbus | mpg123 | mplayer], streamripper, [xsel], [perl]
# xsel enables pasting from the X selection (to a config file etc.)
# Comment out line 288 "printf '%s'..." if you don't use it.
# perl is used to urlencode the query.
# Comment out line 246 and uncomment line 245 to escape spaces only
# if your system doesn't have perl.
#
# KEYBOARD SHORTCUTS:
# Ctrl+C to exit normally
# Ctrl+\ to terminate and close player
# Ctrl+Z to start recording current station (handles SIGTSTP)
##### choose from radiotray, mpg123 or mplayer #####
# player=radiotray
#player=mpg123
player=mplayer
# Set this to something other than 'true'
# to have audio player exit with script.
# Otherwise player will continue till closed separately.
# Even with 'keep_player=true', if script is stopped with Ctrl+\
# then player will exit too.
keep_player=true
##### code to record a radio stream (url is $1) in a new terminal #####
# Add your own options to streamripper's command line,
# edit ~/.config/streamripper/streamripper.ini,
# change urxvt to another terminal
# or use a different command altogether.
recorder() {
( setsid urxvt -e streamripper "$1" >/dev/null 2>&1 & )
}
# where to put player control fifo
# (radiotray doesn't use this)
rpipe=/tmp/radio_pipe
HELP="This is an interactive script to query the Shoutcast and Radionomy listings,
put the results in a menu,
and load the chosen radio station in radiotray, mpg123 or mplayer.
There is also an option to record with streamripper.
If you exit the script and leave mpg123 or mplayer running,
you can close either of them with the command:
echo quit >$rpipe
KEYBOARD SHORTCUTS:
Ctrl+C to exit normally
Ctrl+\ to terminate and close player
Ctrl+Z to start recording current station (handles SIGTSTP)"
##########################################################################
case $1 in
--help|-h)
echo "$HELP"
exit
;;
esac
case $player in
##### RADIOTRAY SETTINGS #####
radiotray)
required_commands='curl streamripper radiotray'
start_player() {
if pgrep radiotray >/dev/null
then
echo "$player is already running"
else
( setsid radiotray >/dev/null 2>&1 & )
fi
}
radioplay() {
radiotray "$1"
}
cleanup() { # run just before exit
[[ $player_ok = true ]] && [[ $keep_player = true ]] && {
echo "$player will continue to play.
You can control it from the system tray icon
or run the script again to choose another station."
sleep 4
return
}
pkill radiotray && echo "Closed radiotray."
sleep 4
}
;;
##### END RADIOTRAY #####
##### MPLAYER SETTINGS #####
mplayer)
required_commands='curl streamripper mplayer'
player_regex="^mplayer .*-input file=$rpipe"
launch_player() {
[[ -p $rpipe ]] || { mkfifo "$rpipe" || error_exit "cannot make fifo $rpipe"; }
( setsid sh -c "mplayer -really-quiet -idle -slave -input file=$rpipe; rm -f $rpipe;" >/dev/null 2>&1 & )
sleep 4 & launching_player=$!
}
load_url() {
echo "loadlist $1" >"$rpipe"
}
;;&
##### END MPLAYER #####
##### MPG123 SETTINGS #####
mpg123)
required_commands='curl streamripper mpg123'
player_regex="^mpg123 .*--fifo $rpipe"
launch_player() { # mpg123 will make fifo if necessary
( setsid sh -c "mpg123 --remote --fifo $rpipe; rm -f $rpipe;" >/dev/null 2>&1 & )
(sleep 2; echo 'silence' >"$rpipe") & launching_player=$!
}
load_url() {
echo "loadlist 1 $1" >"$rpipe"
}
;;&
##### END MPG123 #####
##### COMMON TO MPLAYER AND MPG123 #####
mplayer|mpg123)
start_player() {
if pgrep -f "$player_regex" >/dev/null
then
echo "$player is already running"
[[ -p $rpipe ]] || error_exit "fifo missing $rpipe"
(:>"$rpipe") & test_pipe=$!
(sleep 2; kill $test_pipe 2>/dev/null && kill -s SIGPIPE $selfpid) &
else
launch_player
fi
}
radioplay() {
wait $launching_player
[[ -p $rpipe ]] || error_exit "fifo missing $rpipe"
pgrep -f "$player_regex" >/dev/null || error_exit "$player not running"
load_url "$1"
}
cleanup() { # run just before exit
[[ -p $rpipe ]] || { player_ok=false; echo "Script error: fifo $rpipe does not exist." >&2 ;}
pgrep -f "$player_regex" >/dev/null || { player_ok=false; echo "Script error: $player not running" >&2 ;}
[[ $player_ok = true ]] && {
[[ $keep_player = true ]] && {
echo "$player will continue to play.
You can stop it with the command:
echo quit >$rpipe
or run the script again to choose another station."
sleep 4
return
}
echo "closing $player..."
echo 'quit' >"$rpipe" # try to close player nicely
sleep 2 # time for player to quit
}
pkill -f "$player_regex" && echo "$player close forced."
echo "removing $rpipe"
rm -f "$rpipe" # in case it has become a normal file
}
;;
##### END COMMON TO MPLAYER AND MPG123 #####
*)
echo "$0: chosen player $player has not been configured.
Please check line 17 of the script" >&2
exit 1
;;
esac
##########################################################################
selfpid=$$
player_ok=true
error_exit() {
echo "Script error: $1" >&2
player_ok=false
exit 1
}
trap 'cleanup' EXIT
trap 'echo " Exit script
Goodbye..."; exit' SIGHUP SIGINT
trap 'echo " Exit script
($player will be shut down)
Goodbye..."; keep_player=false; exit' SIGQUIT
trap 'error_exit "script terminated"' SIGTERM
trap 'error_exit "broken pipe"' SIGPIPE
trap 'recorder "${playing_url%.m3u}"' SIGTSTP
missing_commands=
for i in $required_commands
do
hash $i || missing_commands+=" $i"
done
[[ $missing_commands ]] && error_exit "This script requires the following commands: $missing_commands
Please install the packages containing the missing commands
and rerun the script."
query_shoutcast() {
curl -s --data "query=$1" "http://www.shoutcast.com/Search/UpdateSearch" | awk '
BEGIN {
RS="},{"
}
{
url = name = $0
if($0=="[]") {exit}
sub(/^.*\"ID\":/,"",url)
sub(/,.*$/,"",url)
url = "http://yp.shoutcast.com/sbin/tunein-station.pls?id=" url
sub(/^.*\"Name\":\"/,"",name)
sub(/\".*$/,"",name)
print url,name
}
'
}
query_radionomy() {
curl -sL "http://www.radionomy.com/en/search?q=$1" |awk '
BEGIN {
RS="<h2 class=\"radio-title-list\"><a href=\"/en/radio/"
FS="</a></h2>"
}
NR < 2 {next}
{
url = name = $1
sub(/^.*>/,"",name)
sub(/\/index\".*$/,"",url)
url="http://listen.radionomy.com/" url ".m3u"
print url,name
}
'
}
start_player
unset playing_name playing_url
while true
do
echo "Please enter keyword(s)"
read keyword
#keyword_esc="${keyword// /%20}" # escape spaces for url
keyword_esc=$(perl -MURI::Escape -e 'print uri_escape($ARGV[0]);' "$keyword")
results_sh=$( query_shoutcast "$keyword_esc" )
results_ra=$( query_radionomy "$keyword_esc" )
if [[ $results_sh ]] && [[ $results_ra ]]
then
results="$results_sh"$'\n'"$results_ra"
elif [[ $results_sh ]]
then
echo "No results for $keyword on radionomy"
results="$results_sh"
elif [[ $results_ra ]]
then
echo "No results for $keyword on shoutcast"
results="$results_ra"
else
echo "Sorry, no results for $keyword"
continue
fi
unset list
declare -A list # make associative array
while read -r url name # read in awk's output
do
list["$name"]="$url"
done <<< "$results"
PS3='Please enter the number of your choice > '
while true
do
menu=("${!list[@]}")
[[ $playing_name && $playing_url ]] && menu+=("RECORD \"$playing_name\"")
select station in "${menu[@]}" 'SEARCH AGAIN' QUIT
do
[[ $station = "RECORD \"$playing_name\"" ]] && {
recorder "${playing_url%.m3u}" # streamripper won't take m3u urls
break
}
[[ $station = 'SEARCH AGAIN' ]] && break 2
[[ $station = QUIT ]] && { echo 'Goodbye...'; exit; }
[[ $station ]] && {
# comment out next line if you don't use xsel
printf '%s' "${list[$station]}" | xsel --input #--clipboard # can paste url
radioplay "${list[$station]}"
playing_name=$station
playing_url=${list[$station]}
break
}
done
echo "
Station last chosen was \"$playing_name\" ( $playing_url )
"
done # closes loop started at line 274
done # closes loop started at line 241
exit
| true
|
541d9e7394ee38cc96188d079264b079cb7444e8
|
Shell
|
ResearchIT/molcas_pipeline
|
/molcas_pipeline.sh
|
UTF-8
| 2,850
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#notes
#PROJECT naming convention is molecule_XXsolv_temp (on the folder)
#the files additionally have a timestep added e.g. molecule_XXsolv_temp_timestep
#assumptions
# project folder name will be used as the prefix of the input,prm,key,etc. filenames
# e.g. project folder will be : HN3_73solv_298K, files in the folder may be HN3_73solv_298K_114
# see http://www.molcas.org/documentation/manual/node77.html for DYNAMIX input options
#arguments:
# r) root folder (the base folder that your molcas project folder is located inside of), e.g. /work/LAS/some-lab/user/
# p) project (the name of the project folder)
# b) begintimestep
# e) endtimestep
# s) timestep size
# d) dt (typically 41. 41 atomic units (a.u.) ~ 1 femtosecond) this will be used to find the appropriate timestep in the log file
# so it needs to represent the timestep used for the previous run (the project level), it can be tuned in the input template for this experiment
# t) template - full path to the input template
#example
while getopts r:p:b:e:s:d:h:t: option
do
case "${option}"
in
r) ROOT=${OPTARG};;
p) PROJECT=${OPTARG};;
b) BEGINTIMESTEP=${OPTARG};;
e) ENDTIMESTEP=${OPTARG};;
s) STEPSIZE=${OPTARG};;
d) DT=${OPTARG};;
t) TEMPLATE=${OPTARG};;
esac
done
if [ ! -d "$ROOT/$PROJECT" ]; then
echo ERROR: PROJECT folder path is incorrect
exit 1
fi
if [ ! -f "$TEMPLATE" ]; then
echo ERROR: template.input path is incorrect
exit 1
fi
for ((STEP=$BEGINTIMESTEP; STEP<=$ENDTIMESTEP; STEP+=$STEPSIZE)); do
DOWNSTREAM=Trajectory_${STEP}fs
if [ -d "$ROOT/$PROJECT/$DOWNSTREAM" ]; then
echo ERROR: Downstream folder path already exists
exit 1
fi
mkdir $ROOT/$PROJECT/$DOWNSTREAM
cp $TEMPLATE $ROOT/$PROJECT/$DOWNSTREAM/$DOWNSTREAM.input
find $ROOT/$PROJECT -maxdepth 1 -name $PROJECT*.prm -exec cp {} $ROOT/$PROJECT/$DOWNSTREAM/$DOWNSTREAM.prm \;
find $ROOT/$PROJECT -maxdepth 1 -name $PROJECT*.key -exec cp {} $ROOT/$PROJECT/$DOWNSTREAM/$DOWNSTREAM.key \;
sed -i 's/'"$PROJECT"'.*.prm/'"$DOWNSTREAM"'.prm/g' $ROOT/$PROJECT/$DOWNSTREAM/$DOWNSTREAM.key
#might want to consider changing the job name in each of the sbatch scripts, or make a sbatch array
cp $ROOT/$PROJECT/molcas_sub $ROOT/$PROJECT/$DOWNSTREAM/molcas_sub
sed -i 's/file_name/'"$DOWNSTREAM"'/g' $ROOT/$PROJECT/$DOWNSTREAM/molcas_sub
find $ROOT/$PROJECT/TMP -name $PROJECT*.$STEP -exec cp {} $ROOT/$PROJECT/$DOWNSTREAM/$DOWNSTREAM.xyz \;
sed -n '/Velocities [(]*time.*'"$(echo "$STEP*$DT" | bc)"'/,/^$/{//!p}' $ROOT/$PROJECT/$PROJECT*.log | tail -n +4 | head -n -1 | cut -b 17-56 > $ROOT/$PROJECT/$DOWNSTREAM/$DOWNSTREAM.velocity.xyz
cd $ROOT/$PROJECT/$DOWNSTREAM
sbatch molcas_sub
cd -
done
| true
|
147767d9a2533e288a01ab811b775e9963b14729
|
Shell
|
greatmobile/vpn
|
/add-host.sh
|
UTF-8
| 617
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Tukar Domain?"
select yn in "Yes" "No"; do
case $yn in
Yes ) make install; break;;
No ) exit;;
esac
done
clear
echo -e "Masukkan Domain"
read -p "Hostname / Domain: " host
rm -f /var/lib/crot-script/ipvps.conf
rm -f /var/lib/premium-script/ipvps.conf
rm -f /etc/v2ray/domain
clear
mkdir /etc/v2ray
mkdir /var/lib/premium-script;
mkdir /var/lib/crot-script;
clear
echo -e "Masukkan Domain Sekali Lagi"
read -p "Hostname / Domain: " host
echo "IP=$host" >> /var/lib/crot-script/ipvps.conf
echo "IP=$host" >> /var/lib/premium-script/ipvps.conf
echo "$host" >> /etc/v2ray/domain
| true
|
35f94d57d9b0a7585f2d91944180324d72c786ed
|
Shell
|
sjlongland/lib_mysqludf_sys
|
/mkdeb.sh
|
UTF-8
| 3,625
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
# Debian package build script
set -e
# Set some directory locations
: ${SRCDIR:=$( realpath $( dirname "$0" ))}
: ${WORKDIR:=${SRCDIR}/work}
: ${DISTDIR:=${SRCDIR}/dist}
# Begin by ensuring we have up-to-date sources
if [ -z "${SKIP_FETCH}" ]; then
make -C ${SRCDIR} fetch
fi
# Take the version from the mtime of the file
: ${VERSION:=$( date -r lib_mysqludf_sys.c +%Y%m%d )}
# Debian version information
: ${DEBVER:=1}
# Maintainer information
if [ -z "${MAINTAINER}" ]; then
MAINTAINER="$(
sed -ne '/^Maintainer:/ { s/^Maintainer: //; p; }' \
${SRCDIR}/debian/control
)"
fi
# Build flags
: ${BUILD_FLAGS:=-us -uc}
# Parse command-line arguments
: ${RECYCLE_ORIG:=n}
: ${CLEAN_DIST:=y}
: ${CLEAN_WORK:=y}
: ${APPEND_RELEASE:=y}
: ${DOCKER:=n}
while [ $# -gt 0 ]; do
case "$1" in
--work)
WORKDIR="$2"
shift
;;
--dist)
DISTDIR="$2"
shift
;;
--debver)
DEBVER="$2"
shift
;;
--recycle-orig)
RECYCLE_ORIG=y
CLEAN_DIST=n
;;
--docker)
DOCKER=y
;;
--docker-image)
DOCKER_IMAGE="$2"
shift
;;
--no-append-release)
APPEND_RELEASE=n
;;
--no-clean)
case $2 in
work)
CLEAN_WORK=n
;;
dist)
CLEAN_DIST=n
;;
*)
echo "Unknown area $2"
exit 1
;;
esac
shift
;;
esac
shift
done
if [ ${CLEAN_WORK} = y ]; then
# Create a work directory
[ ! -d ${WORKDIR} ] || rm -fr ${WORKDIR}
mkdir ${WORKDIR}
else
[ -d ${WORKDIR} ] || mkdir ${WORKDIR}
fi
if [ ${CLEAN_DIST} = y ]; then
# Re-create the distribution directory
[ ! -d ${DISTDIR} ] || rm -fr ${DISTDIR}
mkdir ${DISTDIR}
else
[ -d ${DISTDIR} ] || mkdir ${DISTDIR}
fi
# Are we using `docker` for this?
if [ "${DOCKER}" = y ]; then
exec docker run --rm \
-v ${SRCDIR}:/tmp/src \
-v ${DISTDIR}:/tmp/dist \
-e MAINTAINER="${MAINTAINER}" \
-e VERSION=${VERSION} \
-e DEBVER=${DEBVER} \
-e BUILD_FLAGS="${BUILD_FLAGS}" \
-e RECYCLE_ORIG="${RECYCLE_ORIG}" \
-e WORKDIR=/tmp/work \
-e DISTDIR=/tmp/dist \
-e SRCDIR=/tmp/src \
${DOCKER_IMAGE:-sjlongland/debian-pkg-build-env} \
/usr/sbin/gosu $( id -u ):$( id -g ) \
/bin/sh -xe /tmp/src/mkdeb.sh --no-clean dist
fi
# Package name and version
PACKAGE_VER=lib-mysqludf-sys-${VERSION}
if [ ${APPEND_RELEASE} = y ]; then
# Append the Debian release information
DEBVER=${DEBVER}$( \
lsb_release -si | tr A-Z a-z \
)$( \
lsb_release -sr | tr . p \
)
fi
if [ ${RECYCLE_ORIG} = n ] || \
[ ! -f ${DISTDIR}/lib-mysqludf-sys_${VERSION}.orig.tar.xz ]
then
# Create the package source directory
mkdir ${WORKDIR}/${PACKAGE_VER}
cp ${SRCDIR}/lib_mysqludf_sys.c ${SRCDIR}/Makefile \
${WORKDIR}/${PACKAGE_VER}
# Create the "original" tarball
tar -C ${WORKDIR} \
-cvf ${WORKDIR}/lib-mysqludf-sys_${VERSION}.orig.tar \
${PACKAGE_VER}
xz -9 ${WORKDIR}/lib-mysqludf-sys_${VERSION}.orig.tar
else
# Copy the pre-made tarball to our work area
cp ${DISTDIR}/lib-mysqludf-sys_${VERSION}.orig.tar.xz \
${WORKDIR}
# Unpack it here
tar -C ${WORKDIR} -xJvf \
${WORKDIR}/lib-mysqludf-sys_${VERSION}.orig.tar.xz
fi
# Now put the Debian package files in
tar -C ${SRCDIR} -cf - debian \
| tar -C ${WORKDIR}/${PACKAGE_VER} -xf -
# Generate the changelog
cat > ${WORKDIR}/${PACKAGE_VER}/debian/changelog <<EOF
lib-mysqludf-sys (${VERSION}-${DEBVER}) unstable; urgency=low
* Automatic build from source
-- ${MAINTAINER} $( date -R )
EOF
# Build the package
( cd ${WORKDIR}/${PACKAGE_VER} && dpkg-buildpackage ${BUILD_FLAGS} )
# Clean up the source tree
rm -fr ${WORKDIR}/${PACKAGE_VER}
# Move out the distributed files
mv ${WORKDIR}/* ${DISTDIR}
| true
|
cf38f82f096372e0e605c11a6e52df700d84a32a
|
Shell
|
BeWe11/dotfiles
|
/.zshrc
|
UTF-8
| 2,096
| 2.859375
| 3
|
[] |
no_license
|
eval $(/opt/homebrew/bin/brew shellenv)
printf '\n%.0s' {1..100}
autoload -U compinit; compinit
# prevent zsh from catching `CTRL-X` keys
bindkey -e
if [ -f ~/.zsh_aliases ]; then
. ~/.zsh_aliases
fi
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export PATH="$PATH:/Users/ben/bin"
export PATH="$PATH:/Applications/Postgres.app/Contents/Versions/latest/bin"
export PATH=/usr/local/opt/ruby/bin:$PATH
export HOMEBREW_CASK_OPTS="--appdir=/Applications"
export EDITOR=nvim
export FZF_DEFAULT_COMMAND='rg --files --hidden --follow --smart-case --color=never --glob "!.git/*"'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
export HISTFILE=~/.zsh_history
export HISTFILESIZE=1000000000
export HISTSIZE=1000000000
setopt INC_APPEND_HISTORY
export HISTTIMEFORMAT="[%F %T] "
setopt EXTENDED_HISTORY
setopt HIST_IGNORE_ALL_DUPS
export PROMPT_COMMAND="history -a; history -c; history -r; $PROMPT_COMMAND"
# Git branch in prompt.
function parse_git_branch() {
git branch 2> /dev/null | sed -n -e 's/^\* \(.*\)/[\1]/p'
}
COLOR_DEF=%{$'\e[0m'%}
COLOR_USR=%{$'\e[38;5;243m'%}
COLOR_DIR=%{$'\e[38;5;197m'%}
COLOR_GIT=%{$'\e[38;5;39m'%}
setopt PROMPT_SUBST
export PROMPT='${COLOR_USR}%n ${COLOR_DIR}%~ ${COLOR_GIT}$(parse_git_branch)${COLOR_DEF} $ '
# Setup GPG
GPG_TTY=$(tty)
export GPG_TTY
# Setup Poetry and pyenv
export PATH="$HOME/.local/bin:$PATH"
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
# Setup for rust
export PATH="$HOME/.cargo/bin:$PATH"
# Setup for nvm
export NVM_DIR="$HOME/.nvm"
[ -s "$(brew --prefix)/opt/nvm/nvm.sh" ] && \. "$(brew --prefix)/opt/nvm/nvm.sh" # This loads nvm
[ -s "$(brew --prefix)/opt/nvm/etc/bash_completion.d/nvm" ] && \. "$(brew --prefix)/opt/nvm/etc/bash_completion.d/nvm" # This loads nvm bash_completion
if [ -f ~/.vimprofilename ]; then
if [[ $(< ~/.vimprofilename) = "dark" ]]; then
echo -e "\033]1337;SetProfile=dark\033\\"
elif [[ $(< ~/.vimprofilename) = "light" ]]; then
echo -e "\033]1337;SetProfile=light\033\\"
fi
fi
| true
|
7254529463dd36879b3454e2e794d596019d2442
|
Shell
|
Franco-Poveda/bapro-STP-dashboard
|
/provision/install-docker.sh
|
UTF-8
| 440
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(id -u) -ne 0 ]] ; then echo "Please run as root" ; exit 1 ; fi
apt-get update
apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
apt-key fingerprint 0EBFCD88
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt-get install -y docker-ce
| true
|
bec8c896ab625bc0464dcee03b0bdb895fe8c7ef
|
Shell
|
hupili/2C-Web-Research
|
/feature-extraction/run.sh
|
UTF-8
| 123
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
rm -f log.*
for i in `ls -1 ../data/raw`
do
./extractor.sh ../data/raw/$i >> log.stdout 2>> log.stderr
done
| true
|
30364efbc67ce1b9df6c27eba54c181708834834
|
Shell
|
chipster/chipster-openshift
|
/scripts/costs.bash
|
UTF-8
| 4,225
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
core_hour="0.5"
ram_gb_hour="1"
storage_tb_hour="3"
bu_eur=$(echo "scale=3; 420 / 20000" | bc -l)
hours=$((24 * 365))
echo "Using prices:"
echo "Pod core hour: $core_hour"
echo "Pod RAM GB hour: $ram_gb_hour"
echo "Storage TiB hour: $core_hour"
echo "1 BU: $bu_eur €"
echo "Calculating prices for: $hours hours"
echo ""
function print_row () {
type="$1"
name="$2"
cpu="$3"
memory="$4"
storage="$5"
printf "%-20s %-30s " "$type" "$name"
bu="0"
if [ -n "$cpu" ]; then
bu=$(echo "$cpu / 1000 * $core_hour + $memory / 1000 * $ram_gb_hour" | bc -l)
fi
if [ -n "$storage" ]; then
bu=$(echo "$bu + $storage / 1000000 * $storage_tb_hour" | bc -l)
fi
if [ -n "$cpu" ] || [ -n "$storage" ]; then
round_bu=$(echo "scale=2; $bu/1" | bc)
eur=$(echo "$bu * $bu_eur * $hours" | bc -l)
round_eur=$(echo "$eur/1" | bc)
printf "%-10s %-10s %-10s %-10s %-10s" "$cpu" "$memory" "$storage" "$round_bu" "$round_eur"
fi
printf "\n"
}
projects=$(oc get project -o json | jq '.items[].metadata.name' -r)
total_cpu=0
total_memory=0
total_storage=0
printf "%-20s %-30s %-10s %-10s %-10s %-10s %-10s\n" TYPE NAME millicores "RAM MiB" "PVC MiB" BU €
for project in $(echo "$projects"); do
project_cpu=0
project_memory=0
project_storage=0
pods=$(oc -n $project get pod -o json)
while read -r pod_index; do
# while runs once even with empty array
if [ -n "$pod_index" ]; then
pod_cpu=0
pod_memory=0
pod=$(echo $pods | jq .items[$pod_index])
phase=$(echo $pod | jq .status.phase -r)
if [ $phase = "Running" ]; then
pod_name=$(echo $pod | jq .metadata.name -r)
while read -r container_index; do
# while runs once even with empty array
if [ -n "$container_index" ]; then
container=$(echo $pod | jq .spec.containers[$container_index])
container_name=$(echo $container | jq .name -r)
cpu=$(echo $container | jq .resources.requests.cpu -r)
memory=$(echo $container | jq .resources.requests.memory -r)
# convert cores to millicores (and keep millicores as it is)
cpu=$(echo $cpu | sed "s/$/000/g" | sed "s/m000//g")
# convert gibibytes to mebibytes
memory=$(echo $memory | sed "s/Gi/000Mi/g" | sed "s/Mi//g")
print_row " container" $container_name $cpu $memory
pod_cpu=$(($pod_cpu + $cpu))
pod_memory=$(($pod_memory + $memory))
fi
done <<< "$(echo $pod | jq '.spec.containers | keys | .[]' -r)"
print_row " pod total" $pod_name $pod_cpu $pod_memory
project_cpu=$(($project_cpu + $pod_cpu))
project_memory=$(($project_memory + $pod_memory))
fi
fi
done <<< "$(echo $pods | jq '.items | keys | .[]' -r)"
pvcs=$(oc -n $project get pvc -o json)
while read -r pvc_index; do
# while runs once even with empty array
if [ -n "$pvc_index" ]; then
pvc=$(echo $pvcs | jq .items[$pvc_index])
pvc_name=$(echo $pvc | jq .metadata.name -r)
storage=$(echo $pvc | jq .spec.resources.requests.storage -r)
# convert mebibytes
storage=$(echo $storage | sed "s/Ti/000Gi/g" | sed "s/Gi/000Mi/g" | sed "s/Mi//g")
print_row " pvc" $pvc_name "" "" $storage
project_storage=$(($project_storage + $storage))
fi
done <<< "$(echo $pvcs | jq '.items | keys | .[]' -r)"
print_row " project total" $project $project_cpu $project_memory $project_storage
total_cpu=$(($total_cpu + $project_cpu))
total_memory=$(($total_memory + $project_memory))
total_storage=$(($total_storage + $project_storage))
done
print_row "total" "" $total_cpu $total_memory $total_storage
| true
|
e518b5d7f9addc84d23cd1931e05f57293823149
|
Shell
|
svikramjeet/git
|
/deploy
|
UTF-8
| 214
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
content=$(wget http://production-review-tool.herokuapp.com/api/checkReadyToDeploy?app_name=clientshare-web -q -O -)
echo $content
if [ "${content:1:14}" == '"success":true' ]; then
exit 0
fi
exit 1
| true
|
5456c1f94005eb338c3c9a47ecab07a952197a4c
|
Shell
|
JeepGuy/bash_scripts
|
/script_files_ex/logging/logging_sol_1.sh
|
UTF-8
| 1,370
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Author: jcbrent
# Created Date: 27_Jul_2017
# Modified by:
# Write a shell script that displays one random number to the screen and also generates a syslog
# message with that random number. Use the "user" facility and the "info" facility for your
# messages.
#
# Hint: Use $RANDOM
# ------------------------------------------------------------------------------------
# Set Global Variables first so they are always defined before being called.
#------------- Sample below
RANDOM_VAR=$RANDOM
echo " Starting the \"Logging Solution 1\" Script - Named: \"logging_sol_1.sh "
echo " ----------------------------------------------------------------------------- "
# Main body of the shell script starts here.
#
# Enter the script and create a random number. Then log it to the syslog = messages.log
echo " The random number is ${RANDOM_VAR}"
echo " "
#logger -p user.info $RANDOM_VAR
logger -s -i -t logging_sol_1_script -p user.info " The random number is ${RANDOM_VAR}"
# ------------------------------------------------------------------------------------
echo " ----------------------------------------------------------------------------- "
# ------------------------------------------------------------------------------------
# Exit with an explicit exit status last.
echo "Script ran successfully with an exit status of 0 (zero)."
exit 0
| true
|
2cb9f046fd3c5b0d01c57f10b8952a8b925c71d2
|
Shell
|
spagetti12/geba_dwnld
|
/geba_v1-1_20171024.sh
|
UTF-8
| 4,232
| 3.578125
| 4
|
[] |
no_license
|
#set -x
# 0. BEFORE YOU START
# go to http://wrdc.mgo.rssi.ru/ and log in:
# user: pavle
# password: pavle
# the script requires an argument or 2 arguments
# 1. DIFFUSE RADIATION:
# sh geba.sh difrad
# 2. SUNSHINE DURATION:
# sh geba.sh sundur
# 3. GLOBAL RADIATION:
# --- REQUIRES 2 ARGUMENTS ---
# parallel sh geba.sh ::: glbrad ::: $(seq 1 7)
# this downloads all the global radiation files (takes too long)
# if you want to download only one group of stations, run the script like this:
# sh geba.sh glbrad 5 (eg for group of station #5)
# depending on the argument(s), we define 3 'names', thats how they are named on wrdc website:
# d - diffuse radiation
# s - sunshine duration
# t - global radiation
if [ "$1" = 'difrad' ]; then
name="d"
ct="44"
elif [ "$1" = 'sundur' ]; then
name="s"
ct="44"
elif [ "$1" = 'glbrad' ]; then
name="t""$2"
ct="45"
else echo 'no such variable'; exit
fi
# START OF THE SCRIPT
# make the working directory; temp dir (to be deleted later) and dir for daily data
mkdir "$1""$2"; cd "$1""$2"; mkdir temp; mkdir daily
curl -d "login=pavle" -d "password=pavle" http://wrdc.mgo.rssi.ru/wrdccgi/protect.exe?wrdc/wrdc_new.html
# get the files from the GEBA website
wget --quiet -r -np -nH --cut-dirs=3 -R "$name".html http://wrdc.mgo.rssi.ru/wrdccgi/protect.exe?data_list_full_protected/"$name"/"$name".html
# take the URLs from the downloaded files
find . -name "protect*" -type f | xargs grep http > down_"$name".log
# delete the downloaded files
find . -name "protect.exe*" -type f | xargs rm
# clean the URLs from downloaded files - remove everything before http
sed 's/^.*http/http/' down_"$name".log > temp1_"$name".log
# remove everything after html
if [ "$1" = 'difrad' ] || [ "$1" = 'glbrad' ] ; then
sed 's/\".*//' temp1_"$name".log |grep _"$name".html > temp2_"$name".log
elif [ "$1" = 'sundur' ] ; then
sed 's/\".*//' temp1_"$name".log |grep .html > temp2_"$name".log
fi
curl -d "login=pavle" -d "password=pavle" http://wrdc.mgo.rssi.ru/wrdccgi/protect.exe?wrdc/wrdc_new.html
wget --quiet -i temp2_"$name".log
# download the new cleaned URLs
if [ "$1" = 'sundur' ] ; then
find . -name "protect*" -type f | xargs grep http > down_take2_"$name".log
find . -name "protect.exe*" -type f | xargs rm
sed 's/^.*http/http/' down_take2_"$name".log > temp3_"$name".log
sed 's/\".*//' temp3_"$name".log |grep _"$name".html > temp4_"$name".log
curl -d "login=pavle" -d "password=pavle" http://wrdc.mgo.rssi.ru/wrdccgi/protect.exe?wrdc/wrdc_new.html
wget --quiet -i temp4_"$name".log
fi
# rename the new files, there is now a name of station in the filename
for fn in protect.exe*; do newfn="$(echo "$fn" | cut -c"$ct"-)"; mv "$fn" "$newfn";done
# replace %2F string from the filename with _
for i in ./*%2F*;do mv -- "$i" "${i//%2F/_}";done
# convert the new files from html to txt
for i in ./*.html;do html2text "$i" > "$i".txt;done
# change the extension of the new files from txt to html (this does not change the content)
if [ "$1" = 'sundur' ] ; then
for i in ./*_s.html.txt;do mv -- "$i" "${i//.html/}";done
else
for i in ./*.html.txt;do mv -- "$i" "${i//.html/}";done
fi
# take only first 8 lines of the file with the station info (header)
# take the only monthly mean values from each file
for i in *.txt;do head -8 "$i" > temp/"$i";grep -e Year -e DATE -e MEAN $i > zzzz_"$i";done
cd temp
# HEADERS - remove the year in the end of every filename
# this is done because all the headers for the same stations are the same, not depending on year
if [ "$1" = 'difrad' ] || [ "$1" = 'sunrad' ] ; then
ls *txt| awk -F. '{printf "mv %s %s\n",$0,substr($1,1,length($1)-7);}' |ksh
else ls *txt| awk -F. '{printf "mv %s %s\n",$0,substr($1,1,length($1)-8);}' |ksh
fi
# join together headers with the values and change extension to csv
for file in *;do cat $file ../zzzz_"$file"* > ../"$file".csv;sed -i 's/_/ /g' ../"$file".csv;sed -i 's/|/ /g' ../"$file".csv;done
cd -
mv *txt daily
rm -rf temp *html *log
# when the script is over, there will be folder named like the argument you've chosen
# in this folder you'll find files with the montly data and folder daily with the daily data
# written by Pavle Arsenovic January 2017
| true
|
ddef90db4dd3cf770d4464bd47d0f0011852d6e5
|
Shell
|
docker-mailserver/docker-mailserver
|
/target/bin/listmailuser
|
UTF-8
| 3,182
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck source=../scripts/helpers/index.sh
source /usr/local/bin/helpers/index.sh
# Workaround to support ENABLE_QUOTAS toggling during tests:
# shellcheck source=/dev/null
source /etc/dms-settings 2>/dev/null
function _main() {
local DATABASE_ACCOUNTS='/tmp/docker-mailserver/postfix-accounts.cf'
local DATABASE_VIRTUAL='/tmp/docker-mailserver/postfix-virtual.cf'
_list_entries "${DATABASE_ACCOUNTS}"
}
function _list_entries() {
local DATABASE=${1}
_db_should_exist_with_content "${DATABASE}"
local ENTRY_TO_DISPLAY
while read -r LINE; do
ENTRY_TO_DISPLAY=$(_format_list_item "${LINE}")
echo -e "* ${ENTRY_TO_DISPLAY}\n"
done < <(_get_valid_lines_from_file "${DATABASE}")
}
function _format_list_item() {
local LINE=${1}
local MAIL_ACCOUNT
MAIL_ACCOUNT=$(echo "${LINE}" | cut -d'|' -f1)
local WITH_QUOTA
WITH_QUOTA=$(_quota_show_for "${MAIL_ACCOUNT}")
local WITH_ALIASES
WITH_ALIASES=$(_alias_list_for_account "${MAIL_ACCOUNT}")
local ACCOUNT_ENTRY="${MAIL_ACCOUNT}"
[[ -n ${WITH_QUOTA} ]] && ACCOUNT_ENTRY+=" ${WITH_QUOTA}"
[[ -n ${WITH_ALIASES} ]] && ACCOUNT_ENTRY+="\n [ aliases -> ${WITH_ALIASES} ]"
echo "${ACCOUNT_ENTRY}"
}
function _quota_show_for() {
local MAIL_ACCOUNT=${1}
[[ ${ENABLE_QUOTAS} -ne 1 ]] && return 0
local QUOTA_INFO
# Matches a line where the 3rd column is `type='STORAGE'` - returning the next three column values:
IFS=' ' read -r -a QUOTA_INFO <<< "$(doveadm quota get -u "${MAIL_ACCOUNT}" | tail +2 | awk '{ if ($3 == "STORAGE") { print $4" "$5" "$6 } }')"
local CURRENT_SIZE SIZE_LIMIT PERCENT_USED
# Format the extracted quota storage columns:
CURRENT_SIZE="$(_bytes_to_human_readable_size "${QUOTA_INFO[0]}")"
SIZE_LIMIT="$(_bytes_to_human_readable_size "${QUOTA_INFO[1]}")"
PERCENT_USED="${QUOTA_INFO[2]}%"
echo "( ${CURRENT_SIZE} / ${SIZE_LIMIT} ) [${PERCENT_USED}]"
}
function _bytes_to_human_readable_size() {
# `-` represents a non-applicable value (eg: Like when `SIZE_LIMIT` is not set):
if [[ ${1:-} == '-' ]]; then
echo '~'
# Otherwise a value in KibiBytes (1024 bytes == 1k) is expected (Dovecots internal representation):
elif [[ ${1:-} =~ ^[0-9]+$ ]]; then
# kibibytes to bytes, converted to approproate IEC unit (eg: MiB):
echo $(( 1024 * ${1} )) | numfmt --to=iec
else
_exit_with_error "Supplied non-number argument '${1:-}' to '_bytes_to_human_readable_size()'"
fi
}
# Returns a comma delimited list of aliases associated to a recipient (ideally the recipient is a mail account):
function _alias_list_for_account() {
local GREP_OPTIONS
local MAIL_ACCOUNT=${1}
# postfix-virtual.cf sample lines:
#
# all@example.com foo@example.com
# all@example.com foo@example.com,another@example.com
# all@example.com another@example.com,foo@example.com,yetanother@example.com
# all@example.com another@example.com,foo@example.com
GREP_OPTIONS=(
--ignore-case
--extended-regexp
-e "\s${MAIL_ACCOUNT}($|,)" # match first and second sample line
-e ",${MAIL_ACCOUNT}($|,)" # match third and fourth sample line
"${DATABASE_VIRTUAL}"
)
if grep --quiet --no-messages "${GREP_OPTIONS[@]}"; then
grep "${GREP_OPTIONS[@]}" | awk '{print $1;}' | sed ':a;N;$!ba;s/\n/, /g'
fi
}
_main
| true
|
74c00c44ba237d062ae52320869bc7dbcd2c60db
|
Shell
|
maxjonata/ScreenTranslator
|
/scripts/make_iss.sh
|
UTF-8
| 1,634
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source ./options.sh $@
cleanupDirInNeeded $ISS_DIR
cp -r $SRC_DISTR_DIR/iss/* $ISS_DIR
CONTENT_DIR=$ISS_DIR/content
mkdir -p $CONTENT_DIR
echo "Making ISS"
TESSDATA_DIR="$DOWNLOAD_DIR/tessdata"
$(cd $ISS_DIR && ./make_tess_iss.sh $TESSDATA_DIR out="$ISS_DIR/tessdata.iss")
#setup
VERSION=`grep "versionString" $SRC_DIR/version.json | cut -d'"' -f4`
sed "s/#define MyAppVersion.*$/#define MyAppVersion \"$VERSION\"/" -i $ISS_DIR/InnoSetup.iss
cp $SRC_DIR/images/icon.ico $ISS_DIR/icon.ico
cp $SRC_DIR/LICENSE.md $ISS_DIR/LICENSE_en.md
cp $SRC_DISTR_DIR/Changelog_en.txt $ISS_DIR/Changelog_en.txt
cp $SRC_DISTR_DIR/Changelog_ru.txt $ISS_DIR/Changelog_ru.txt
#app
cp $APP_DIR/ScreenTranslator.exe $CONTENT_DIR/ScreenTranslator.exe
cp -r $SRC_DIR/translators $CONTENT_DIR/translators
#libs
QT_LIBS="Qt5WebKitWidgets Qt5Widgets Qt5WebKit Qt5Gui Qt5Network Qt5Core Qt5Sensors Qt5Positioning Qt5PrintSupport
Qt5OpenGL Qt5Sql Qt5Quick Qt5Qml Qt5WebChannel Qt5Multimedia Qt5MultimediaWidgets"
for i in $QT_LIBS; do
cp -d $QT_LIB_DIR/$i.dll $CONTENT_DIR
done
mkdir -p $CONTENT_DIR/platforms
cp -d $QT_LIB_DIR/../plugins/platforms/qwindows.dll $CONTENT_DIR/platforms
MINGW_LIBS="libgcc_s_sjlj-1 libstdc++-6 icuin55 icuuc55 icudt55 libwinpthread-1 ssleay32 libeay32"
for i in $MINGW_LIBS; do
cp -d $MINGW_DIR/lib/$i.dll $CONTENT_DIR
done
cp -d $DEPS_DIR/lib/liblept*.dll $CONTENT_DIR
cp -d $DEPS_DIR/lib/libtesseract*.dll $CONTENT_DIR
find $CONTENT_DIR -name '*.exe' -exec $STRIP -s {} \;
find $CONTENT_DIR -name '*.dll' -exec $STRIP -s {} \;
cd $ISS_DIR
wine "C:\Program Files\Inno Setup 5\iscc.exe" "InnoSetup.iss"
| true
|
cade70b493a6d9702e4255256173be129e82ac4b
|
Shell
|
borispopoff/fvwm-min
|
/set-style
|
UTF-8
| 963
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Safely change the style for FVWM-min. This script just changes a symlink and
# restarts FVWM. However, it does so safely. It can also give help on which
# styles are available, usage information, etc. This should work on any POSIX
# compliant shell.
my_dir=$(dirname "$0")
my_name=$(basename "$0")
print_usage ()
{
echo "Usage: $my_name STYLE" 1>&2
echo
echo 'Styles:'
echo
cd "${my_dir}/styles" || return 1
ls */* | cat
echo
return 0
}
main ()
{
mystyle_path="${my_dir}/mystyle"
if [ "$#" != 1 ]; then
print_usage
return 1
fi
if [ -f "$mystyle_path" ] && [ ! -h "$mystyle_path" ]; then
printf 'Error: "%s" is not a symlink!' "$mystyle_path" 1>&2
return 1
fi
match=$(find "${my_dir}/styles" -name "$1" -type f | sort | head -n 1)
if [ ! -f "$match" ]; then
print_usage
return 1
fi
rm -f "$mystyle_path" || return 1
ln -sf "$match" "$mystyle_path" || return 1
FvwmCommand Restart 2>/dev/null
return 0
}
main "$@"
| true
|
a2fbdb7355cace6555246ccd3ba2f66c2a1b7f33
|
Shell
|
mczwier/westpa_py3
|
/lib/examples/analysis_mdanalysis/westpa_scripts/runseg.sh
|
UTF-8
| 3,692
| 3.71875
| 4
|
[
"GPL-3.0-only",
"MIT"
] |
permissive
|
#!/bin/bash
#
# runseg.sh
#
# WESTPA runs this script for each trajectory segment. WESTPA supplies
# environment variables that are unique to each segment, such as:
#
# WEST_CURRENT_SEG_DATA_REF: A path to where the current trajectory segment's
# data will be stored. This will become "WEST_PARENT_DATA_REF" for any
# child segments that spawn from this segment
# WEST_PARENT_DATA_REF: A path to a file or directory containing data for the
# parent segment.
# WEST_CURRENT_SEG_INITPOINT_TYPE: Specifies whether this segment is starting
# anew, or if this segment continues from where another segment left off.
# WEST_RAND16: A random integer
#
# This script has the following three jobs:
# 1. Create a directory for the current trajectory segment, and set up the
# directory for running pmemd/sander
# 2. Run the dynamics
# 3. Calculate the progress coordinates and return data to WESTPA
# If we are running in debug mode, then output a lot of extra information.
if [ -n "$SEG_DEBUG" ] ; then
set -x
env | sort
fi
######################## Set up for running the dynamics #######################
# Set up the directory where data for this segment will be stored.
cd $WEST_SIM_ROOT
mkdir -pv $WEST_CURRENT_SEG_DATA_REF
cd $WEST_CURRENT_SEG_DATA_REF
# Make a symbolic link to the topology file. This is not unique to each segment.
ln -sv $WEST_SIM_ROOT/common_files/P53.MDM2.prmtop .
# Either continue an existing tractory, or start a new trajectory. In the
# latter case, we need to do a couple things differently, such as generating
# velocities.
#
# First, take care of the case that this segment is a continuation of another
# segment. WESTPA provides the environment variable
# $WEST_CURRENT_SEG_INITPOINT_TYPE, and we check its value.
if [ "$WEST_CURRENT_SEG_INITPOINT_TYPE" = "SEG_INITPOINT_CONTINUES" ]; then
# The weighted ensemble algorithm requires that dynamics are stochastic.
# We'll use the "sed" command to replace the string "RAND" with a randomly
# generated seed.
sed "s/RAND/$WEST_RAND16/g" $WEST_SIM_ROOT/common_files/md.in > md.in
# This trajectory segment will start off where its parent segment left off.
# The "ln" command makes symbolic links to the parent segment's rst file.
# This is preferable to copying the files, since it doesn't
# require writing all the data again.
ln -sv $WEST_PARENT_DATA_REF/seg.rst ./parent.rst
# Now take care of the case that the trajectory is starting anew.
elif [ "$WEST_CURRENT_SEG_INITPOINT_TYPE" = "SEG_INITPOINT_NEWTRAJ" ]; then
# Again, we'll use the "sed" command to replace the string "RAND" with a
# randomly generated seed.
sed "s/RAND/$WEST_RAND16/g" $WEST_SIM_ROOT/common_files/md.in > md.in
# For a new segment, we only need to make a symbolic link to the .rst file.
ln -sv $WEST_PARENT_DATA_REF ./parent.rst
fi
############################## Run the dynamics ################################
# Propagate segment using pmemd (or sander)
pmemd -O -i md.in -p P53.MDM2.prmtop -c parent.rst \
-r seg.rst -x seg.nc -o seg.log -inf seg.nfo
# Set the arguments for rmsd.py and call the script to calculate progress
# coordinate(s) for the current trajectory segment.
# Arguments:
# ref: path to initial state coordinate file.
# top: path to topology file.
# mob: path to trajectory file.
# for: we are evaluating a trajectory segment, so for = 'NCDF'
$WEST_SIM_ROOT/rmsd.py \
--ref $WEST_SIM_ROOT/bstates/P53.MDM2.rst \
--top $WEST_SIM_ROOT/amber_config/P53.MDM2.prmtop \
--mob $WEST_CURRENT_SEG_DATA_REF/seg.nc \
--for NCDF \
cat rmsd.dat > $WEST_PCOORD_RETURN
# Clean up
rm -f $TEMP md.in parent.rst seg.nfo seg.pdb
| true
|
feb3b911ce2d8b7820f480f360979e7e2db41a08
|
Shell
|
yoko17k/my_launcher
|
/l.sh
|
UTF-8
| 1,508
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
launcher()
{
if [ $# -lt 1 ]; then
launcher help
return
fi
case "$1" in
"w" )
cd ~/vagrant/wordpress45/ &&\
vagrant up &&\
vagrant ssh -- docker-compose up
;;
"w_s" )
cd ~/vagrant/wordpress45/ &&\
vagrant ssh
;;
"w_h" )
cd ~/vagrant/wordpress45/ &&\
vagrant halt
;;
"gulp" )
cd ~/Tools/Scripts/gulp/ &&\
npm start
;;
"mv" )
# TODO : launcherの中に入れる実行ファイルはchmod 700をやってから。
mv $2 ~/Tools/Scripts/launcher/
;;
"l" )
cd $HOME/Tools/Scripts/launcher/
;;
"d" )
cd $HOME/Desktop
;;
"v" )
cd $HOME/vagrant/
;;
'help' )
cat <<-EOF
This is command launcher!
you can choose next commands.
1) ヘルプ起動
l help
2) gulp起動
l gulp
3) wordpressを起動
l w
4) wordpressサーバーにssh
l w_s
5) wordpressサーバーをshutdown
l w_h
6) launcherの中に現在のパスにあるファイルを移動。
l mv "<filename>"
7) launcherフォルダに移動。
l l
8) Desktopに移動。
l d
EOF
;;
* )
# launcherフォルダの中にあるshファイルの名前を入力すると、
# そのシェルが実行される。
#
# 例:
# l add => launcher/add.shが実行される。
if [ -e ~/Tools/Scripts/launcher/$1.sh ]; then
cd ~/Tools/Scripts/launcher/&&
./$1.sh
else
launcher help
fi
;;
esac
}
launcher $@
| true
|
fd8971f27fa963def2e5da5077e7bcb09670e99b
|
Shell
|
Kraysent/covid-statistics
|
/update_info.sh
|
UTF-8
| 535
| 3.28125
| 3
|
[] |
no_license
|
if ! [ -d data ]; then
mkdir data
fi
cd data/
if ! [ -d COVID-19 ]; then
echo "COVID-19 folder does not exists. Cloning..."
git clone https://github.com/CSSEGISandData/COVID-19.git
fi
if ! [ -d covid19-russia-data ]; then
echo "covid19-russia-data folder does not exists. Cloning..."
git clone https://github.com/k0ka/covid19-russia-data.git
fi
cd COVID-19/
echo "Scanning for changes in COVID-19 folder: "
git pull
cd ../covid19-russia-data
echo "Scanning for changes in covid19-russia-data folder: "
git pull
| true
|
59622913c07e66a77fee1e13f6972047c407f5e5
|
Shell
|
rfischer01/ShedRom
|
/tools/scripts/system/bootsd/su.d/11sd
|
UTF-8
| 11,916
| 2.96875
| 3
|
[] |
no_license
|
#!/system/bin/sh
BB=/system/bin/busybox
# Mounting #
$BB mount -o remount,rw /
$BB mount -o remount,rw / /
$BB mount -o remount,rw rootfs
$BB mount -o remount,rw /system
$BB mount -o remount,rw /system /system
# SD R/W Tweaks #
SD=/data/tweakslog/11SDT.log
# Kernel Tweaks #
KR=/data/tweakslog/13KER.log
# System Tweaks #
SYS=/data/tweakslog/09SYS.log
if [ -e $SD ]; then
rm $SD;
fi;
if [ -e $SYS ]; then
rm $SYS;
fi;
if [ -e $KR ]; then
rm $KR;
fi;
$BB echo "" | $BB tee -a $SD;
$BB echo "SD Tweaks started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SD;
if [ -e /sys/devices/virtual/bdi/179:0/read_ahead_kb ]; then
$BB echo "1024" > /sys/devices/virtual/bdi/179:0/read_ahead_kb;
fi;
if [ -e /sys/devices/virtual/bdi/179:8/read_ahead_kb ]; then
$BB echo "1024" > /sys/devices/virtual/bdi/179:8/read_ahead_kb;
fi;
if [ -e /sys/devices/virtual/bdi/179:28/read_ahead_kb ]; then
$BB echo "1024" > /sys/devices/virtual/bdi/179:28/read_ahead_kb;
fi;
if [ -e /sys/devices/virtual/bdi/179:32/read_ahead_kb ]; then
$BB echo "1024" > /sys/devices/virtual/bdi/179:32/read_ahead_kb;
fi;
if [ -e /sys/devices/virtual/bdi/default/read_ahead_kb ]; then
$BB echo "256" > /sys/devices/virtual/bdi/default/read_ahead_kb;
fi;
L=1024
MMC="/sys/block/mmc*"
for S in $MMC
do
if [ -e "$S/queue/read_ahead_kb" ]; then
$BB echo "$L" > "$S/queue/read_ahead_kb";
fi
if [ -e "$S/bdi/read_ahead_kb" ]; then
$BB echo "$L" > "$S/bdi/read_ahead_kb";
fi
if [ -e "$S/queue/add_random" ]; then
$BB echo "0" > "$S/queue/add_random";
fi
done;
BDI=`ls -d /sys/devices/virtual/bdi/*`
SDReadTweak=$((((mem/1024)/64+1)*128))
for i in $BDI
do
if [ -e $i/read_ahead_kb ]; then
$BB echo "$SDReadTweak" > $i/read_ahead_kb;
fi
done;
$BB echo "SD Tweaks completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SD;
$BB echo "=>Modding Low Memory Killer" | $BB tee -a $SYS;
if [ -e /sys/module/lowmemorykiller/parameters/cost ]; then
$BB echo "48" > /sys/module/lowmemorykiller/parameters/cost;
fi
if [ -e /sys/module/lowmemorykiller/parameters/debug_level ]; then
$BB echo "0" > /sys/module/lowmemorykiller/parameters/debug_level;
fi
$BB echo "LMK mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "" | $BB tee -a $SYS;
$BB echo "=>Modding I/O" | $BB tee -a $SYS;
for S in /sys/block/*
do
if [ -f "$S/queue/rq_affinity" ]; then
$BB echo "1" > $S/queue/rq_affinity;
fi
if [ -f "$S/queue/rotational" ]; then
$BB echo "0" > $S/queue/rotational;
fi
if [ -f "$S/queue/iostats" ]; then
$BB echo "0" > $S/queue/iostats;
fi
if [ -f "$S/queue/nomerges" ]; then
$BB echo "1" > $S/queue/nomerges;
fi
if [ -e "$i/iosched/low_latency" ]; then
$BB echo "1" > $i/iosched/low_latency;
fi
if [ -e "$i/iosched/back_seek_penalty" ]; then
$BB echo "1" > $i/iosched/back_seek_penalty;
fi
if [ -e "$i/iosched/back_seek_max" ]; then
$BB echo "1000000000" > $i/iosched/back_seek_max;
fi
if [ -e "$i/iosched/slice_idle" ]; then
$BB echo "0" > $i/slice_idle;
fi
if [ -e "$i/iosched/quantum" ]; then
$BB echo "16" > $i/quantum;
fi
if [ -e "$i/iostats" ]; then
$BB echo "0" > $i/iostats;
fi
if [ -e "$i/nomerges" ]; then
$BB echo "0" > $i/nomerges;
fi
done;
$BB echo "=>Searching value to change" | $BB tee -a $SYS;
MMC="/sys/block/mmc*"
for S in $MMC
do
if [ -e "$S/queue/nr_requests" ]; then
$BB echo "512" > $S/queue/nr_requests;
fi
done;
for i in /sys/block/*/queue
do
if [ -e "$i/rotational" ]; then
$BB echo "0" > $i/rotational;
fi
if [ -e "$i/nr_requests" ]; then
$BB echo "10240" > $i/nr_requests;
fi
if [ -e "$i/iosched/fifo_batch" ]; then
$BB echo "1" > $i/iosched/fifo_batch;
fi
if [ -e "$i/iosched/writes_starved" ]; then
$BB echo "1" > $i/iosched/writes_starved;
fi
if [ -e "$i/read_ahead_kb" ]; then
$BB echo "1024" > $i/read_ahead_kb;
fi
done;
$BB echo "I/O mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "" | $BB tee -a $SYS;
$BB echo "System Swap mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
if [ -e /proc/swaps ]; then
$BB echo "10" > /proc/sys/fs/lease-break-time;
$BB echo "32000" > /proc/sys/fs/inotify.max_queued_events;
$BB echo "256" > /proc/sys/fs/inotify.max_user_instances;
$BB echo "10240" > /proc/sys/fs/inotify.max_user_watches;
$BB echo "524288" > /proc/sys/fs/file-max;
$BB echo "0" > /proc/sys/vm/laptop_mode;
$BB echo "4" > /proc/sys/vm/min_free_order_shift;
$BB echo "40" > /proc/sys/vm/swappiness;
$BB echo "500" > /proc/sys/vm/dirty_expire_centisecs;
$BB echo "1000" > /proc/sys/vm/dirty_writeback_centisecs;
$BB echo "10" > /proc/sys/vm/dirty_background_ratio;
$BB echo "20" > /proc/sys/vm/dirty_ratio;
$BB echo "2" > /proc/sys/vm/page-cluster;
fi
$BB echo "System Swap mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "" | $BB tee -a $SYS;
$BB echo "System Cache mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "70" > /proc/sys/vm/vfs_cache_pressure;
$BB echo "1" > /proc/sys/vm/overcommit_memory;
$BB echo "75" > /proc/sys/vm/overcommit_ratio;
$BB echo "System Cache mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "" | $BB tee -a $SYS;
$BB echo "CPU mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
if [ -e /dev/cpuctl/cpu.rt_runtime_us ]; then
$BB echo "800000" > /dev/cpuctl/cpu.rt_runtime_us;
fi
if [ -e /dev/cpuctl/cpu.rt_period_us ]; then
$BB echo "1000000" > /dev/cpuctl/cpu.rt_period_us;
fi
if [ -e /dev/cpuctl/bg_non_interactive/cpu.shares ]; then
$BB echo "62" > /dev/cpuctl/bg_non_interactive/cpu.shares;
fi
if [ -e /dev/cpuctl/bg_non_interactive/cpu.rt_runtime_us ]; then
$BB echo "700000" > /dev/cpuctl/bg_non_interactive/cpu.rt_runtime_us;
fi
if [ -e /dev/cpuctl/bg_non_interactive/cpu.rt_period_us ]; then
$BB echo "1000000" > /dev/cpuctl/bg_non_interactive/cpu.rt_period_us;
fi
MCPS="/sys/devices/system/cpu/sched_mc_power_savings"
if [ -e "$MCPS" ]; then
echo "2" > "$MCPS";
fi
$BB echo "CPU mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "" | $BB tee -a $SYS;
$BB echo "OOM mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "1" > /proc/sys/vm/oom_dump_tasks;
$BB echo "1" > /proc/sys/vm/oom_kill_allocating_task;
$BB echo "OOM mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $SYS;
$BB echo "Kernel Tweaks started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
$BB echo "" | $BB tee -a $KR;
$BB echo "Seeder mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
if [ -e /proc/sys/kernel/random/read_wakeup_threshold ]; then
$BB echo "1366" > /proc/sys/kernel/random/read_wakeup_threshold;
fi
if [ -e /proc/sys/kernel/random/write_wakeup_threshold ]; then
$BB echo "2048" > /proc/sys/kernel/random/write_wakeup_threshold;
fi
$BB echo "Seeder mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
$BB echo "" | $BB tee -a $KR;
$BB echo "Kernel Sleeper mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
if [ -e /sys/kernel/debug/sched_features ]; then
$BB echo "NO_AFFINE_WAKEUPS" > /sys/kernel/debug/sched_features;
$BB echo "NO_CACHE_HOT_BUDDY" > /sys/kernel/debug/sched_features;
$BB echo "NO_DOUBLE_TICK" > /sys/kernel/debug/sched_features;
$BB echo "NO_FORCE_SD_OVERLAP" > /sys/kernel/debug/sched_features;
$BB echo "NO_GENTLE_FAIR_SLEEPERS" > /sys/kernel/debug/sched_features;
$BB echo "NO_HRTICK" > /sys/kernel/debug/sched_features;
$BB echo "NO_LAST_BUDDY" > /sys/kernel/debu/sched_features;
$BB echo "NO_LB_BIAS" > /sys/kernel/debug/sched_features;
$BB echo "NO_LB_MIN" > /sys/kernel/debug/sched_features;
$BB echo "NO_NEW_FAIR_SLEEPERS" > /sys/kernel/debug/sched_features;
$BB echo "NO_NEXT_BUDDY" > /sys/kernel/debug/sched_features;
$BB echo "NO_NORMALIZED_SLEEPERS" > /sys/kernel/debug/sched_features;
$BB echo "NO_OWNER_SPIN" > /sys/kernel/debug/sched_features;
$BB echo "NO_SYNC_WAKEUPS" > /sys/kernel/debug/sched_features;
$BB echo "NO_RT_RUNTIME_SHARE" > /sys/kernel/debug/sched_features;
$BB echo "NO_START_DEBIT" > /sys/kernel/debug/sched_features;
$BB echo "NO_WAKEUP_OVERLAP" > /sys/kernel/debug/sched_feature;
$BB echo "NO_WAKEUP_SYNC" > /sys/kernel/debug/sched_feature;
$BB echo "WAKEUP_PREEMPT" > /sys/kernel/debug/sched_features;
$BB echo "NO_NONTASK_POWER" > /sys/kernel/debug/sched_features;
$BB echo "NO_ARCH_POWER" > /sys/kernel/debug/sched_features;
$BB echo "NO_TTWU_QUEUE" > /sys/kernel/debug/sched_features;
fi
$BB echo "Kernel Sleeper mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
$BB echo "" | $BB tee -a $KR;
$BB echo "Kernel Panic started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
if [ -e /proc/sys/vm/panic_on_oom ]; then
$BB echo "0" > /proc/sys/vm/panic_on_oom;
fi
if [ -e /proc/sys/kernel/panic_on_oops ]; then
$BB echo "0" > /proc/sys/kernel/panic_on_oops;
fi
if [ -e /proc/sys/kernel/softlockup_panic ]; then
$BB echo "0" > /proc/sys/kernel/softlockup_panic;
fi
if [ -e /proc/sys/kernel/panic ]; then
$BB echo "0" > /proc/sys/kernel/panic;
fi
$BB echo "Kernel Panic mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
$BB echo "" | $BB tee -a $KR;
$BB echo "Fast Charge mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
FAST_CHARGE="/sys/kernel/fast_charge/force_fast_charge"
if [ -e "$FAST_CHARGE" ]; then
$BB echo "1" > "$FAST_CHARGE";
fi
$BB echo "Fast Charge mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
$BB echo "" | $BB tee -a $KR;
$BB echo "Kernel Various mod started on : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
if [ -e /proc/sys/kernel/hung_task_timeout_secs ]; then
$BB echo "25" > /proc/sys/kernel/hung_task_timeout_secs;
fi
if [ -e /proc/sys/kernel/msgmni ]; then
$BB echo "2048" > /proc/sys/kernel/msgmni;
fi
if [ -e /proc/sys/kernel/msgmax ]; then
$BB echo "65536" > /proc/sys/kernel/msgmax;
fi
if [ -e /proc/sys/kernel/shmmni ]; then
$BB echo "4096" > /proc/sys/kernel/shmmni;
fi
if [ -e /proc/sys/kernel/shmall ]; then
$BB echo "2097152" > /proc/sys/kernel/shmall;
fi
if [ -e /proc/sys/kernel/shmmax ]; then
$BB echo "268435456" > /proc/sys/kernel/shmmax;
fi
if [ -e /proc/sys/kernel/sem ]; then
$BB echo "500 512000 64 2048" > /proc/sys/kernel/sem;
fi
if [ -e /proc/sys/kernel/sched_features ]; then
$BB echo "24189" > /proc/sys/kernel/sched_features;
fi
if [ -e /proc/sys/kernel/sched_latency_ns ]; then
$BB echo "18000000" > /proc/sys/kernel/sched_latency_ns;
fi
if [ -e /proc/sys/kernel/sched_min_granularity_ns ]; then
$BB echo "1500000" > /proc/sys/kernel/sched_min_granularity_ns;
fi
if [ -e /proc/sys/kernel/sched_wakeup_granularity_ns ]; then
$BB echo "3000000" > /proc/sys/kernel/sched_wakeup_granularity_ns;
fi
if [ -e /proc/sys/kernel/threads-max ]; then
$BB echo "524288" > /proc/sys/kernel/threads-max;
fi
if [ -e /proc/sys/kernel/sched_shares_ratelimit ]; then
$BB echo "256000" > /proc/sys/kernel/sched_shares_ratelimit;
fi
if [ -e /proc/sys/kernel/sched_child_runs_first ]; then
$BB echo "0" > /proc/sys/kernel/sched_child_runs_first;
fi
if [ -e /proc/sys/kernel/sched_compat_yield ]; then
$BB echo "1" > /proc/sys/kernel/sched_compat_yield;
fi
$BB echo "Kernel Various mod completed at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
$BB echo "" | $BB tee -a $KR;
$BB echo "Kernel Tweaks activated at : $( date +"%m-%d-%Y %H:%M:%S" ) " | $BB tee -a $KR;
# Set Permissions
chown 0:0 -R /data/tweakslog/11SDT.log
chmod 755 -R /data/tweakslog/11SDT.log
chcon u:object_r:system_data_file:s0 /data/tweakslog/11SDT.log
chown 0:0 -R /data/tweakslog/13KER.log
chmod 755 -R /data/tweakslog/13KER.log
chcon u:object_r:system_data_file:s0 /data/tweakslog/13KER.log
chown 0:0 -R /data/tweakslog/09SYS.log
chmod 755 -R /data/tweakslog/09SYS.log
chcon u:object_r:system_data_file:s0 /data/tweakslog/09SYS.log
# Remount read-only
$BB mount -o remount,ro /
$BB mount -o remount,ro / /
$BB mount -o remount,ro rootfs
$BB mount -o remount,ro /system
$BB mount -o remount,ro /system /system
| true
|
184582041438950f56e0479ca63ed52ee087a589
|
Shell
|
gshang2017/docker
|
/aria2/root/etc/cont-init.d/aria2c.sh
|
UTF-8
| 5,218
| 3.109375
| 3
|
[] |
no_license
|
#! /usr/bin/with-contenv bash
#检查config文件
if [ ! -f /config/aria2.conf ]; then
cp /usr/local/aria2/defaults/aria2.conf /config/aria2.conf
if [ "$ARIA2_CONF_LANGUAGE" == "zh_Hant" ]; then
sed -i 's/\#zh_Hant//g' /config/aria2.conf
sed -i '/\#zh_Hans\#/d' /config/aria2.conf
sed -i '/\#en\#/d' /config/aria2.conf
elif [ "$ARIA2_CONF_LANGUAGE" == "en" ]; then
sed -i 's/\#en//g' /config/aria2.conf
sed -i '/\#zh_Han/d' /config/aria2.conf
else
sed -i 's/\#zh_Hans//g' /config/aria2.conf
sed -i '/\#zh_Hant\#/d' /config/aria2.conf
sed -i '/\#en\#/d' /config/aria2.conf
fi
fi
#检查session文件
if [ ! -f /config/aria2.session ]; then
touch /config/aria2.session
fi
#检查dht文件
if [ ! -f /config/dht.dat ]; then
touch /config/dht.dat
fi
if [ ! -f /config/dht6.dat ]; then
touch /config/dht6.dat
fi
#修改secret
ARIA2_RPC_SECRET_CONF_VAL=$(grep ^rpc-secret= /config/aria2.conf | sed 's/\(.*\)=//g' | sed ":a;N;s/\n//g;ta")
if [ -n "$ARIA2_RPC_SECRET" ]; then
if [ "$ARIA2_RPC_SECRET_CONF_VAL" != "$ARIA2_RPC_SECRET" ]; then
if [ `grep ^rpc-secret= /config/aria2.conf | wc -l` -gt 0 ]; then
sed -i 's/^rpc-secret='"$ARIA2_RPC_SECRET_CONF_VAL"'/rpc-secret='"$ARIA2_RPC_SECRET"'/g' /config/aria2.conf
else
sed -i '$arpc-secret='"$ARIA2_RPC_SECRET"'' /config/aria2.conf
fi
fi
else
if [ ! -n "$ARIA2_RPC_SECRET_CONF_VAL" ] && [ `grep ^rpc-secret= /config/aria2.conf | wc -l` -gt 0 ]; then
sed -i 's/^rpc-secret=/\#rpc-secret=/g' /config/aria2.conf
fi
fi
#修改port
#listen-port and dht-listen-port
ARIA2_LISTEN_PORT_CONF_VAL=$(grep ^listen-port= /config/aria2.conf | sed 's/\(.*\)=//g' | sed ":a;N;s/\n//g;ta")
ARIA2_DHT_LISTEN_PORT_CONF_VAL=$(grep ^dht-listen-port= /config/aria2.conf | sed 's/\(.*\)=//g' | sed ":a;N;s/\n//g;ta")
if [ -n "$ARIA2_LISTEN_PORT" ] && ([ "$ARIA2_LISTEN_PORT_CONF_VAL" != "$ARIA2_LISTEN_PORT" ] || [ "$ARIA2_DHT_LISTEN_PORT_CONF_VAL" != "$ARIA2_LISTEN_PORT" ]); then
if [ `grep ^listen-port= /config/aria2.conf | wc -l` -gt 0 ]; then
sed -i 's/^listen-port='"$ARIA2_LISTEN_PORT_CONF_VAL"'/listen-port='"$ARIA2_LISTEN_PORT"'/g' /config/aria2.conf
else
sed -i '$alisten-port='"$ARIA2_LISTEN_PORT"'' /config/aria2.conf
fi
if [ `grep ^dht-listen-port= /config/aria2.conf | wc -l` -gt 0 ]; then
sed -i 's/^dht-listen-port='"$ARIA2_DHT_LISTEN_PORT_CONF_VAL"'/dht-listen-port='"$ARIA2_LISTEN_PORT"'/g' /config/aria2.conf
else
sed -i '$adht-listen-port='"$ARIA2_LISTEN_PORT"'' /config/aria2.conf
fi
fi
#rpc-listen-port
ARIA2_RPC_LISTEN_PORT_CONF_VAL=$(grep ^rpc-listen-port= /config/aria2.conf | sed 's/\(.*\)=//g' | sed ":a;N;s/\n//g;ta")
if [ -n "$ARIA2_RPC_LISTEN_PORT" ] && [ "$ARIA2_RPC_LISTEN_PORT_CONF_VAL" != "$ARIA2_RPC_LISTEN_PORT" ]; then
if [ `grep ^rpc-listen-port= /config/aria2.conf | wc -l` -gt 0 ]; then
sed -i 's/^rpc-listen-port='"$ARIA2_RPC_LISTEN_PORT_CONF_VAL"'/rpc-listen-port='"$ARIA2_RPC_LISTEN_PORT"'/g' /config/aria2.conf
else
sed -i '$arpc-listen-port='"$ARIA2_RPC_LISTEN_PORT"'' /config/aria2.conf
fi
fi
#修改AriaNg替换js字符串(添加设置的token和rpcPort值为默认。)
cp /usr/local/aria2/AriaNg/js/defaultsjs/aria-ng* /usr/local/aria2/AriaNg/js/
if [ "$ARIANG_RPC_SECRET_AUTO" == "true" ]; then
if [ -n "$ARIA2_RPC_SECRET" ]; then
ARIA2_RPC_SECRET_ARIANg_VAL=`echo -n $ARIA2_RPC_SECRET|base64`
sed -i 's/secret:""/secret:"'"$ARIA2_RPC_SECRET_ARIANg_VAL"'"/g' /usr/local/aria2/AriaNg/js/aria-ng*
else
ARIA2_RPC_SECRET_CONF_VAL=$(grep ^rpc-secret= /config/aria2.conf | sed 's/\(.*\)=//g' | sed ":a;N;s/\n//g;ta")
if [ -n "$ARIA2_RPC_SECRET_CONF_VAL" ]; then
ARIA2_RPC_SECRET_ARIANg_VAL=`echo -n $ARIA2_RPC_SECRET_CONF_VAL|base64`
sed -i 's/secret:""/secret:"'"$ARIA2_RPC_SECRET_ARIANg_VAL"'"/g' /usr/local/aria2/AriaNg/js/aria-ng*
fi
fi
fi
if [ "$ARIANG_RPC_LISTEN_PORT_AUTO" == "true" ]; then
if [ -n "$ARIA2_RPC_LISTEN_PORT" ]; then
if [ "$ARIA2_RPC_LISTEN_PORT" != "6800" ]; then
sed -i 's/rpcPort:"6800"/rpcPort:"'"$ARIA2_RPC_LISTEN_PORT"'"/g' /usr/local/aria2/AriaNg/js/aria-ng*
fi
else
ARIA2_RPC_LISTEN_PORT_CONF_VAL=$(grep ^rpc-listen-port= /config/aria2.conf | sed 's/\(.*\)=//g' | sed ":a;N;s/\n//g;ta")
if [ -n "$ARIA2_RPC_LISTEN_PORT_CONF_VAL" ] && [ "$ARIA2_RPC_LISTEN_PORT_CONF_VAL" != "6800" ]; then
sed -i 's/rpcPort:"6800"/rpcPort:"'"$ARIA2_RPC_LISTEN_PORT_CONF_VAL"'"/g' /usr/local/aria2/AriaNg/js/aria-ng*
fi
fi
fi
#设定trackers更新任务
if [ `grep -c updatetrackers.sh /var/spool/cron/crontabs/root` -eq 0 ]; then
echo "0 0 * * * /usr/local/aria2/updatetrackers.sh" >> /var/spool/cron/crontabs/root
echo trackers更新任务已设定。
else
echo trackers更新任务已存在。
fi
#启动更新trackers。
if [ "$ARIA2_TRACKERS_UPDATE_AUTO" == "true" ]; then
/usr/local/aria2/updatetrackers.sh
fi
#设置时区
ln -sf /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ > /etc/timezone
#修改用户UID GID
groupmod -o -g "$GID" aria2
usermod -o -u "$UID" aria2
#修复权限
chown -R aria2:aria2 /config
chown -R aria2:aria2 /Downloads
chown -R aria2:aria2 /usr/local/aria2
| true
|
70a690b6a6a2625d5cfb2cb76e1ec8b041185bff
|
Shell
|
hitswint/.emacs.d
|
/dotfiles/bin/ss_server.sh
|
UTF-8
| 872
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
# screen加-dm忽略shell环境,可开机启动;加-S xxx命名。
if [ -z "$STY" ]; then exec screen /bin/zsh $0 $1; fi
port="3344"
pass="r408hxkjopzZA"
if [ x$1 != x ]; then
# 可选$1做内网穿透。
server_sshR=$1
login_sshR=$(gpg2 -q --for-your-eyes-only --no-tty -d ~/.authinfo.gpg | awk '$2==server_sshR {print $6}' server_sshR="$server_sshR")
port_sshR=$(gpg2 -q --for-your-eyes-only --no-tty -d ~/.authinfo.gpg | awk '$2==server_sshR {print $4}' server_sshR="$server_sshR")
autossh -f -N -R $port:127.0.0.1:$port -p $port_sshR $login_sshR
fi
# /bin/bash -c "source ~/.virtualenvs/shadowsocks/bin/activate; ssserver -s 0.0.0.0 -p \"$port\" -k \"$pass\" -m aes-256-cfb -t 600; exec /bin/bash -i"
zsh -is eval "source ~/.virtualenvs/shadowsocks/bin/activate; ssserver -s 0.0.0.0 -p \"$port\" -k \"$pass\" -m aes-256-cfb -t 600;"
| true
|
32d1e93d5bca5471d13b4b42940301e62fb1486e
|
Shell
|
cha63506/servlet-safety
|
/src/test/resources/fakesendmail.sh
|
UTF-8
| 550
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This is a totally preposterous script which, if called like sendmail (i.e.,
# with the -t command line flag, echos everything from stdin to a file) to a
# file named 'sent-mail.txt' in the current directory. This allows us to test
# sending error reports via the sendmail command without actually sending mail.
#
# To wit:
# echo "YAY FOR ME I AM HAPPY" | fakesendmail.sh
# cat sent-mail.txt
#
# This script is called from SendmailErrorReporterTest.
if [ $1 == "-t" ]; then
echo `cat /dev/stdin` > sent-mail.txt
exit 0
fi
| true
|
996a46ee4019395676b079b238ea0255c9c1ab69
|
Shell
|
kiegroup/kogito-pipelines
|
/tools/update-build-gradle-regex-line.sh
|
UTF-8
| 339
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
REGEX=$1
VALUE=$2
if [ -z "${REGEX}" ]; then
echo "Please provide which a regex to update"
echo 1
fi
if [ -z "${VALUE}" ]; then
echo "Please provide new value for the property as second argument"
echo 1
fi
find . -name build.gradle -exec sed -i "s|${REGEX}.*|${REGEX} \"${VALUE}\"|g" {} \;
| true
|
3153efc7363935e4db61eee6e348363f9e1be548
|
Shell
|
openbsd/xenocara
|
/app/xlockmore/etc/showmodes.sh
|
UTF-8
| 1,553
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/ksh
# or /bin/bash
# Simple script to look all xlock modes supported.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
#
# This file is provided AS IS with no warranties of any kind. The author
# shall have no liability with respect to the infringement of copyrights,
# trade secrets or any patents by this file or any part thereof. In no
# event will the author be liable for any lost revenue or profits or
# other special, indirect and consequential damages.
#
# xlock-show-modes Copyright (C) 1998 Andrea Arcangeli
# by Andrea Arcangeli <arcangeli@mbox.queen.it>
#
# Revision History:
# 00-Jan-23 erase-modename threw it off, an extra space did the trick
# Also works on Sun now. David Bagley
# awk fails on Solaris but nawk is OK
if [ `uname` == "SunOS" ] ; then
AWK="nawk"
else
AWK="awk"
fi
# gsub(/.*\] \[-mode/, ""); gsub(/\| /, ""); gsub("^ +", ""); \
# --help is a deliberate mistype...
function listmodes
{
xlock --help 2>&1 | $AWK '{ \
if (!true && match ($0,"-mode ")) { \
gsub(/.*-mode /, ""); gsub(/\| /, ""); gsub("^ +", ""); \
printf("%s ", $0); true = 1 \
} else { if (true && /\|/) { \
gsub(/\| /, ""); gsub("^ +", ""); gsub("\]$", ""); \
printf("%s ", $0) } \
} \
}'
}
for i in `listmodes`; do echo Trying mode $i; xlock -nolock -mode $i; done
| true
|
ca308f8631d01198bdb9ba2a9f341f691efc207b
|
Shell
|
s910611s/debian-server-tools
|
/debian-image-normalize.sh
|
UTF-8
| 6,300
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Normalize Debian OS: jessie 8.x netinst (essential, required, important) and standard packages
#
# VERSION :1.0.0
# DEPENDS :apt-get install apt aptitude debian-archive-keyring
# Generated lists
#
# - missing.pkgs
# - extra.pkgs
# - removed.pkgs
# - integrity.log
# - cruft.log
# - installed-size.pkgs
# @TODO
# What to do on critical errors?
# Where to log? stdout, stderr, *file
STANDARD_BLACKLIST="exim.*|procmail|mutt|bsd-mailx|ftp|mlocate|nfs-common|rpcbind\
|texinfo|info|install-info|debian-faq|doc-debian\
|intel-microcode|amd64-microcode"
# ??? isc-dhcp-client Priority: important
# Don't ever remove these
BOOT_PACKAGES="grub-pc|linux-image-amd64|initramfs-tools|firmware-.*|usbutils|mdadm|lvm2|xfsprogs\
|task-ssh-server|task-english|ssh|openssh-server|isc-dhcp-client|pppoeconf|ifenslave|ethtool|vlan\
|open-vm-tools|open-vm-tools-dkms|dkms|sudo|cloud-init|cloud-initramfs-growroot\
|sysvinit|sysvinit-core|sysvinit-utils|insserv|discover\
|systemd|libpam-systemd|systemd-sysv|dbus\
|extlinux|syslinux-common|elasticstack-container|waagent|scx|omi"
TILDE_VERSION="cloud-init|grub-common|grub-pc|grub-pc-bin|grub2-common|libgraphite2-3:amd64|intel-microcode"
set -x -e
export LC_ALL="C"
export DEBIAN_FRONTEND="noninteractive"
export APT_LISTCHANGES_FRONTEND="none"
mkdir ${HOME}/os-normalize; cd ${HOME}/os-normalize/
# List what boot packages are installed
aptitude --disable-columns search '?and(?installed, ?not(?automatic))' -F"%p" \
| grep -Ex "$BOOT_PACKAGES" | sed 's/$/ # boot/' | tee boot.pkgs
# APT status
# Remove no longer needed packages
apt-get autoremove --purge -y
# Purge packages that were removed but not purged
apt-get purge -y $(aptitude --disable-columns search '?config-files' -F"%p")
# Clean package cache
apt-get clean
rm -rf /var/lib/apt/lists/*
apt-get clean
apt-get autoremove --purge -y
apt-get update -qq
# Reinstall tasks
debconf-show tasksel
#tasksel --list-tasks
apt-get purge -qq -y $(aptitude --disable-columns search '?and(?installed, ?or(?name(^task-), ?name(^tasksel)))' -F"%p") #'
echo "tasksel tasksel/first select" | debconf-set-selections -v
echo "tasksel tasksel/desktop multiselect" | debconf-set-selections -v
echo "tasksel tasksel/first multiselect ssh-server, standard" | debconf-set-selections -v
echo "tasksel tasksel/tasks multiselect ssh-server" | debconf-set-selections -v
apt-get install -qq -y tasksel
# May take a while
tasksel --new-install
# Mark dependencies of standard packages as automatic
set +x
for DEP in $(aptitude --disable-columns search \
'?and(?installed, ?not(?automatic), ?not(?essential), ?not(?priority(required)), ?not(?priority(important)), ?not(?priority(standard)))' -F"%p"); do
REGEXP="$(sed -e 's;\([^a-z0-9]\);[\1];g' <<< "$DEP")"
if aptitude why "$DEP" 2>&1 | grep -Eq "^i.. \S+\s+(Pre)?Depends( | .* )${REGEXP}( |$)"; then
apt-mark auto "$DEP" || echo "[ERROR] Marking package ${DEP} failed." 1>&2
fi
done
set -x
# Install standard packages
STANDARD_PACKAGES="$(aptitude --disable-columns search \
'?and(?not(?obsolete), ?or(?essential, ?priority(required), ?priority(important), ?priority(standard)))' -F"%p" \
| grep -Evx "$STANDARD_BLACKLIST")"
#STANDARD_PACKAGES="$(aptitude --disable-columns search \
# '?and(?architecture(native), ?or(?essential, ?priority(required), ?priority(important), ?priority(standard)))' -F"%p" \
# | grep -Evx "$STANDARD_BLACKLIST")"
apt-get -qq -y install ${STANDARD_PACKAGES}
# Install missing recommended packages
MISSING_RECOMMENDS="$(aptitude --disable-columns search '?and(?reverse-recommends(?installed), ?version(TARGET), ?not(?installed))' -F"%p" \
| grep -Evx "$STANDARD_BLACKLIST" || true)"
apt-get -qq -y install ${MISSING_RECOMMENDS}
# Remove non-standard packages
MANUALLY_INSTALLED="$(aptitude --disable-columns search \
'?and(?installed, ?not(?automatic), ?not(?essential), ?not(?priority(required)), ?not(?priority(important)), ?not(?priority(standard)))' -F"%p" \
| grep -Evx "$BOOT_PACKAGES" | tee removed.pkgs || true)"
apt-get purge -qq -y ${MANUALLY_INSTALLED}
# Remove packages on standard-blacklist
apt-get purge -qq -y $(aptitude --disable-columns search '?installed' -F"%p" | grep -Ex "$STANDARD_BLACKLIST" || true)
# Exim bug
getent passwd Debian-exim &> /dev/null && deluser --force --remove-home Debian-exim
# Do dist-upgrade finally
apt-get dist-upgrade -qq -y
apt-get autoremove -qq --purge -y
# Check package integrity and cruft
apt-get install -qq -y debsums cruft > /dev/null
# Should be empty
debsums --all --changed 2>&1 | tee integrity.log | sed 's/$/ # integrity/'
cruft > cruft.log 2>&1
set +e +x
# Check for missing packages
{
aptitude --disable-columns search '?and(?essential, ?not(?installed))' -F"%p"
aptitude --disable-columns search '?and(?priority(required), ?not(?installed))' -F"%p"
aptitude --disable-columns search '?and(?priority(important), ?not(?installed))' -F"%p"
aptitude --disable-columns search '?and(?priority(standard), ?not(?installed))' -F"%p" | grep -Evx "$STANDARD_BLACKLIST"
} 2>&1 | tee missing.pkgs | grep "." && echo "Missing packages" 1>&2
# Check for extra packages
{
aptitude --disable-columns search '?garbage' -F"%p" | sed 's/$/ # garbage/'
aptitude --disable-columns search '?broken' -F"%p" | sed 's/$/ # broken/'
aptitude --disable-columns search '?obsolete' -F"%p" | sed 's/$/ # obsolete/'
aptitude --disable-columns search \
'?and(?installed, ?or(?version(~~squeeze), ?version(\+deb6), ?version(python2\.6), ?version(~~wheezy), ?version(\+deb7)))' -F"%p" \
| sed 's/$/ # old/'
aptitude --disable-columns search '?and(?installed, ?not(?origin(Debian)))' -F"%p" | sed 's/$/ # non-Debian/'
#Ubuntu: aptitude --disable-columns search '?and(?installed, ?not(?origin(Ubuntu)))' -F"%p" | sed 's/$/ # non-Ubuntu/'
dpkg -l | grep "~[a-z]\+" | grep -Ev "^ii (${TILDE_VERSION})\s" | cut -c 1-55 | sed 's/$/ # tilde version/'
# "-dev" versioned packages
aptitude --disable-columns search '?and(?installed, ?name(-dev))' -F"%p" | sed 's/$/ # development/'
} 2>&1 | tee extra.pkgs | grep "." && echo "Extra packages" 1>&2
# List packages by size
dpkg-query -f '${Installed-size}\t${Package}\n' --show | sort -k 1 -n > installed-size.pkgs
exit 0
| true
|
03594df50a2953cd774a374483e6e3b04f23e639
|
Shell
|
estebanpw/docker-geckomgv
|
/src/media/scripts/gecko/workflowGalaxy.sh
|
UTF-8
| 8,512
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
FL=1000 # frequency limit
MG=0
if [ $# -lt 7 ]; then
echo " ==== ERROR ... you called this script inappropriately."
echo ""
echo " usage: $0 seqXName seqYName lenght similarity WL fixedL CSVOutput"
echo ""
exit -1
fi
if [ $# == 8 ]; then
MG=$8
fi
{
BINDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
MYRAND=$(( ( RANDOM % 10000000 ) + 1 ))
dirNameX=$(${BINDIR}/readlink.sh $1 | xargs dirname)
seqXName=$(basename "$1")
extensionX="${seqXName##*.}"
seqXName="${seqXName%.*}"
dirNameY=$(${BINDIR}/readlink.sh $2 | xargs dirname)
seqYName=$(basename "$2")
extensionY="${seqYName##*.}"
seqYName="${seqYName%.*}"
outputFile=$7
#seqXName=`basename $1 .fasta`
#seqYName=`basename $2 .fasta`
length=${3}
similarity=${4}
WL=${5} # wordSize
fixedL=${6}
REALINTER=intermediateFiles_${MYRAND}
REALSULTS=results_${MYRAND}
REALCSV=csv_${MYRAND}
REALCSB=csb_${MYRAND}
REALCOMP=comparaciones_${MYRAND}
REALHIST=hist_${MYRAND}
#mkdir intermediateFiles
mkdir $REALINTER
#mkdir intermediateFiles/${seqXName}-${seqYName}
mkdir $REALINTER/${seqXName}-${seqYName}
#mkdir results
mkdir $REALSULTS
#mkdir intermediateFiles/dictionaries
#mkdir intermediateFiles/hits
mkdir $REALINTER/dictionaries
mkdir $REALINTER/hits
#mkdir csv
#mkdir csb
#mkdir comparaciones
#mkdir hist
mkdir $REALCSV
mkdir $REALCSB
mkdir $REALCOMP
mkdir $REALHIST
# Copiamos los fastas
#ln -s ${dirNameX}/${seqXName}.${extensionX} intermediateFiles/${seqXName}-${seqYName}
#ln -s ${dirNameY}/${seqYName}.${extensionY} intermediateFiles/${seqXName}-${seqYName}
ln -s ${dirNameX}/${seqXName}.${extensionX} $REALINTER/${seqXName}-${seqYName}
ln -s ${dirNameY}/${seqYName}.${extensionY} $REALINTER/${seqXName}-${seqYName}
#cd intermediateFiles/${seqXName}-${seqYName}
cd $REALINTER/${seqXName}-${seqYName}
mkdir GRIMM
cd GRIMM
mkdir anchor
cd ..
###############
echo "${BINDIR}/reverseComplement ${seqYName}.${extensionX} ${seqYName}-revercomp.${extensionY}"
${BINDIR}/reverseComplement ${seqYName}.${extensionX} ${seqYName}-revercomp.${extensionY}
echo "${BINDIR}/reverseComplement ${seqXName}.${extensionX} ${seqXName}-revercomp.${extensionX}"
${BINDIR}/reverseComplement ${seqXName}.${extensionX} ${seqXName}-revercomp.${extensionX}
if [[ ! -f ../dictionaries/${seqXName}.d2hP ]]; then
echo "${BINDIR}/dictionary.sh ${seqXName}.${extensionX} 8 &"
${BINDIR}/dictionary.sh ${seqXName}.${extensionX} 8 &
fi
if [[ ! -f ../dictionaries/${seqYName}.d2hP ]]; then
echo "${BINDIR}/dictionary.sh ${seqYName}.${extensionY} 8 &"
${BINDIR}/dictionary.sh ${seqYName}.${extensionY} 8 &
fi
if [[ ! -f ../dictionaries/${seqYName}-revercomp.d2hP ]]; then
echo "${BINDIR}/dictionary.sh ${seqYName}-revercomp.${extensionY} 8 &"
${BINDIR}/dictionary.sh ${seqYName}-revercomp.${extensionY} 8 &
fi
echo "Waiting for the calculation of the dictionaries"
for job in `jobs -p`
do
#echo $job
wait $job
done
mv ${seqXName}.d2hP ../dictionaries/
mv ${seqXName}.d2hW ../dictionaries/
mv ${seqYName}.d2hP ../dictionaries/
mv ${seqYName}.d2hW ../dictionaries/
mv ${seqYName}-revercomp.d2hP ../dictionaries/
mv ${seqYName}-revercomp.d2hW ../dictionaries/
# Hacemos enlace simbolico
ln -s ../dictionaries/${seqXName}.d2hP .
ln -s ../dictionaries/${seqXName}.d2hW .
ln -s ../dictionaries/${seqYName}.d2hP .
ln -s ../dictionaries/${seqYName}.d2hW .
ln -s ../dictionaries/${seqYName}-revercomp.d2hP .
ln -s ../dictionaries/${seqYName}-revercomp.d2hW .
echo "${BINDIR}/comparison.sh ${seqXName}.${extensionX} ${seqYName}.${extensionY} ${length} ${similarity} ${WL} ${fixedL} f &"
${BINDIR}/comparison.sh ${seqXName}.${extensionX} ${seqYName}.${extensionY} ${length} ${similarity} ${WL} ${fixedL} f &
echo "${BINDIR}/comparison.sh ${seqXName}.${extensionX} ${seqYName}-revercomp.${extensionY} ${length} ${similarity} ${WL} ${fixedL} r &"
${BINDIR}/comparison.sh ${seqXName}.${extensionX} ${seqYName}-revercomp.${extensionY} ${length} ${similarity} ${WL} ${fixedL} r &
echo "Waiting for the comparisons"
for job in `jobs -p`
do
#echo $job
wait $job
done
#echo "rm ${seqYName}-revercomp.${extensionY}"
#rm ${seqYName}-revercomp.${extensionY}
echo "${BINDIR}/combineFrags ${seqXName}-${seqYName}-sf.frags ${seqXName}-${seqYName}-revercomp-sr.frags ${seqXName}-${seqYName}.frags"
${BINDIR}/combineFrags ${seqXName}-${seqYName}-sf.frags ${seqXName}-${seqYName}-revercomp-sr.frags ${seqXName}-${seqYName}.frags
#echo "${BINDIR}/newFragToBalazsVersion ${seqXName}-${seqYName}.frags ${seqXName}-${seqYName}.old.frags"
#${BINDIR}/newFragToBalazsVersion ${seqXName}-${seqYName}.frags ${seqXName}-${seqYName}.old.frags
#echo "${BINDIR}/af2pngrev ${seqXName}-${seqYName}.frags ${seqXName}-${seqYName}.png ${seqXName} ${seqYName}"
#${BINDIR}/af2pngrev ${seqXName}-${seqYName}.frags ${seqXName}-${seqYName}.png ${seqXName} ${seqYName}
# Calc ACGT frequencies
echo "${BINDIR}/getFreqFasta ${seqXName}.${extensionX} ${seqXName}.freq"
${BINDIR}/getFreqFasta ${seqXName}.${extensionX} ${seqXName}.freq
#Calc karlin parameters
echo "${BINDIR}/kar2test ${seqXName}.freq ${BINDIR}/matrix.mat 1 ${seqXName}.karpar"
${BINDIR}/kar2test ${seqXName}.freq ${BINDIR}/matrix.mat 1 ${seqXName}.karpar
#rm -rf ${seqXName}.freq
echo "----------- p-value filter --------------"
## Filtro por pvalue
echo "${BINDIR}/pvalueFilter ${seqXName}-${seqYName}.frags ${seqXName}.karpar ${seqXName}-${seqYName}.fil.frags ${seqXName}-${seqYName}.trash.frags "
${BINDIR}/pvalueFilter ${seqXName}-${seqYName}.frags ${seqXName}.karpar ${seqXName}-${seqYName}.fil.frags ${seqXName}-${seqYName}.trash.frags 1
echo "-------"
echo ${BINDIR}
echo "-------"
${BINDIR}/fragstoMaster ${seqXName}-${seqYName}.fil.frags ${seqXName}-${seqYName}.original.master ${seqXName}.${extensionX} ${seqYName}.${extensionY}
echo "${BINDIR}/csb2csv ${seqXName}-${seqYName}.original.master ${seqXName}-${seqYName}.original.master 0 > ${seqXName}-${seqYName}.original.csv.tmp"
${BINDIR}/csb2csv ${seqXName}-${seqYName}.original.master ${seqXName}-${seqYName}.original.master ${seqXName}.${extensionX} ${seqXName} ${seqYName}.${extensionY} ${seqYName} ${seqXName}-${seqYName}.csv
#fragstoMaster frags/NC_014448.1-NC_019552.1.fil.frags master fastas/NC_014448.1.fasta fastas/NC_019552.1.fasta
#csb2csv master master fastas/NC_014448.1.fasta NC_014448.1 fastas/NC_019552.1.fasta NC_019552.1 master.csv
#cat ${seqXName}-${seqYName}.csb.frags.INF ${seqXName}-${seqYName}.original.csv.tmp > ${seqXName}-${seqYName}.original.csv
# calculamos hits en txt
#${BINDIR}/getHistogramFromHits ${seqXName}-${seqYName}-revercomp-K${WL}.hits.sorted ${seqXName}-${seqYName}-K${WL}.histXrever.txt ${seqXName}-${seqYName}-K${WL}.histYrever.txt r 0
#${BINDIR}/getHistogramFromHits ${seqXName}-${seqYName}-K${WL}.hits.sorted ${seqXName}-${seqYName}-K${WL}.histX.txt ${seqXName}-${seqYName}-K${WL}.histY.txt f 0
#Borramos todo menos los frags y los diccionarios
#cat ${seqXName}-${seqYName}.frags.INF ${seqXName}-${seqYName}.original.csv > ${seqXName}-${seqYName}.csv
#MYRAND=$(( ( RANDOM % 1000000 ) + 1))
echo "Estoy en $PWD"
mv ${seqXName}-${seqYName}.csv ${outputFile}
#cp $outputFile ../../
#mv /home/sergiodiazdp/galaxycsv/${MYRAND}.csv $outputFile
mv ${seqXName}-${seqYName}.frags ../../results
mv ${seqXName}-${seqYName}.frags.INF ../../results
mv ${seqXName}-${seqYName}.frags.MAT ../../results
#mv ${seqXName}-${seqYName}-K${WL}.histXrever.txt ../../hist
#mv ${seqXName}-${seqYName}-K${WL}.histYrever.txt ../../hist
#mv ${seqXName}-${seqYName}-K${WL}.histX.txt ../../hist
#mv ${seqXName}-${seqYName}-K${WL}.histY.txt ../../hist
echo "Deleting the tmp folder: ${seqXName}-${seqYName}"
cd ..
#rm -rf ${seqXName}-${seqYName}
#rm -r ../intermediateFiles
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.v3.frags
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.joined
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.evol.frag2
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.evol.csb2
rm -r ../$REALINTER/${seqXName}-${seqYName}.v3.frags
rm -r ../$REALINTER/${seqXName}-${seqYName}.joined
rm -r ../$REALINTER/${seqXName}-${seqYName}.evol.frag2
rm -r ../$REALINTER/${seqXName}-${seqYName}.evol.csb2
} &> /dev/null
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.v3.frags
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.joined
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.evol.frag2
#rm -r ../intermediateFiles/${seqXName}-${seqYName}.evol.csb2
| true
|
38cecbc7e37b76a49be7bcbb523937ae3160fe26
|
Shell
|
svpv/girar
|
/bin/girar-task-change-state
|
UTF-8
| 539
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh -efu
. girar-sh-functions
PROG='girar-task-change-state'
id="$1"; shift
new_state="$1"; shift
cd "$TASKS_DIR/$id"
enable -f /usr/lib/bash/lockf lockf
# obtain an exclusive lock on the tasks state file
builtin lockf -v task/state
old_state=$(cat task/state)
[ "$old_state" = "$new_state" ] ||
echo "$new_state" > task/state
[ "$old_state" = "$new_state" ] ||
girar-webapi-task state "$id" ||:
girar-task-make-index-html "$id" ||:
[ "$old_state" = "$new_state" ] ||
girar-task-update-queues "$id" "$old_state" "$new_state"
| true
|
7355852ad7832200f9b133011db61bc40f14a58f
|
Shell
|
wycomco/macos-printer-reset
|
/print_env_reset.sh
|
UTF-8
| 1,786
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script will reset the printing system.
# It was inspired by http://www.cnet.com/news/what-does-the-reset-print-system-routine-in-os-x-do/
# This script should work for OS X 10.10 Yosemite and later. Please be aware that it has not been
# tested on macOS versions below 10.14 "Mojave".
#
# For macOS 10.15 "Catalina" and later the integrated printtool reset command will be used.
# Get macOS Version
majorVersion=$(sw_vers -productVersion | awk -F. '{ print $1; }')
minorVersion=$(sw_vers -productVersion | awk -F. '{ print $2; }')
# Use the appropriate reset method.
if [[ $majorVersion -ge 11 || $minorVersion -ge 15 ]]; then
echo "Catalina or higher, using integrated command"
/System/Library/Frameworks/ApplicationServices.framework/Frameworks/PrintCore.framework/Versions/A/printtool --reset -f
else
echo "Mojave or lower, scripted reset"
# Stop CUPS
/bin/launchctl stop org.cups.cupsd
# Backup Installed Printers Property List
if [[ -e "/Library/Printers/InstalledPrinters.plist" ]]; then
/bin/mv /Library/Printers/InstalledPrinters.plist /Library/Printers/InstalledPrinters.plist.bak
fi
# Backup the CUPS config file
if [[ -e "/etc/cups/cupsd.conf" ]]; then
/bin/mv /etc/cups/cupsd.conf /etc/cups/cupsd.conf.bak
fi
# Restore the default config by copying it
if [[ ! -e "/etc/cups/cupsd.conf" ]]; then
/bin/cp /etc/cups/cupsd.conf.default /etc/cups/cupsd.conf
fi
# Backup the printers config file
if [[ -e "/etc/cups/printers.conf" ]]; then
/bin/mv /etc/cups/printers.conf /etc/cups/printers.conf.bak
fi
# Start CUPS
/bin/launchctl start org.cups.cupsd
# Remove all printers
/usr/bin/lpstat -p | /usr/bin/cut -d' ' -f2 | /usr/bin/xargs -I {} /usr/sbin/lpadmin -x {}
fi
| true
|
e47bc3a3edccc16280b0e22fed920d99f26a83ea
|
Shell
|
mzoka/Bashshell_project_dbms
|
/fzenity.sh
|
UTF-8
| 2,781
| 3.703125
| 4
|
[] |
no_license
|
exist_and_create()
{
ID=$(tail -1 Databases/metadata 2>/dev/null|cut -f2 -d:)
if [[ -z $ID ]]; then
ID=0
fi
texistDefultDB=$(find . -name Databases 2>/dev/null)
sleep 3 | zenity --progress --no-cancel --title="Databases" --width=300 --height=200 --text="Searching for Databases" --pulsate --auto-close
if [ -z "$texistDefultDB" ]
then {
mkdir Databases
touch "Databases/metadata"
echo "Databases:base" >>"Databases/metadata"
#echo "database created"
zenity --info --text="database created"
exist_and_create $@
}
else
{
file=$(zenity --title="database" --entry --text="Enter database name" --entry-text="")
f=1
databaseFind=$(find . -name $file 2>/dev/null | grep ^./Databases/$file |cut -f3 -d'/')
sleep 2 |zenity --progress --no-cancel --title="Databases" --width=300 --height=200 --text="Searching for database name" --pulsate --auto-close
if [ -z "$databaseFind" ] && [ -n "$file" ]
then
{
let 'ID++'
f=0
mkdir "Databases/$file">/dev/null
database=$(find . -name $file 2>/dev/null | grep ^./Databases/$file |cut -f3 -d'/')
echo -e "$database:$ID:" >> "Databases/metadata"
#echo "$file has been created :)"
zenity --info --text="$file has been created"
exist_and_create $@
}
fi
if [ $f != 0 ]
then
{
let 'ID++'
# echo "the database created aready named $file ^_^"
zenity --warning --text="the database created aready named $file ^_^" --icon-name=Create-Database --ellipsize
# read -p " do you want to creat new db:)>" req
req=$(zenity --question --text="do you want to creat new db?")
if [[ $? -eq 1 ]]
then
req=$(zenity --question --text="are you sure exit?")
fi
# || [[ "$req" = "Y" ]]
if [[ $? -eq 0 ]]
then
dcount=0
existd=:
while [[ "$dcount" == 0 ]]
do
# read -p "database name $existd=:>" newDatabase
newDatabase=$(zenity --entry --text="database name $existd" --entry-text="")
databaseExist=$(find . -name $newDatabase 2>/dev/null|grep ^./Databases/$newDatabase |cut -f3 -d'/')
sleep 2 |zenity --progress --no-cancel --title="Searching.." --width=300 --height=200 --text="Searching for database name exists" --pulsate --auto-close
count=$(echo $newDatabase |wc -c)
if [[ $databaseFind = $newDatabase ]] || [[ $newDatabase = $databaseExist ]]
then
existd="other than $databaseFind or $databaseExist"
fi
if [[ $count != 1 ]] && [[ $databaseFind != $newDatabase ]] && [[ $newDatabase != $databaseExist ]]
then
mkdir "Databases/$newDatabase" 2>/dev/null
echo -e "$newDatabase:$ID:" >> "Databases/metadata"
dcount=1
# echo "created with name $newDatabase '$_$'"
zenity --info --text="created with name $newDatabase $_$"
fi
done
fi
}
fi
}
fi
}
exist_and_create $@ 2>/dev/null
| true
|
ccc93832d021b5cfd60f7db8b35523bc53eea5c0
|
Shell
|
suraj9741/Daily_Progress
|
/day5/weekdayif.sh
|
UTF-8
| 400
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash -x
echo "Enter the single digit number : "
read n
if [ $n -eq 1 ]
then
echo "Sunday"
elif [ $n -eq 2 ]
then
echo "Monday"
elif [ $n -eq 3 ]
then
echo "Tuseday"
elif [ $n -eq 4 ]
then
echo "Wednesday"
elif [ $n -eq 5 ]
then
echo "Thursday"
elif [ $n -eq 6 ]
then
echo "Friday"
elif [ $n -eq 7 ]
then
echo "Saturday"
else
echo "This is not week day."
fi
| true
|
c44b6b8af26ee021c1befa4373359c5fb590be91
|
Shell
|
dev9318/bash
|
/bash/operation.sh
|
UTF-8
| 593
| 3.421875
| 3
|
[] |
no_license
|
#! /bin/bash
#age=25
#if [ "$age" -gt 18 ] && [ "$age" -lt 30 ]
#then
# echo "yay age"
#else
# echo "boomer"
#fi
#if [ "$age" -gt 60 ] || [ "$age" -lt 10 ]
#then
# echo "vaccine"
#else
# echo "bye bye"
#fi
#num1=20
#num2=5
#echo $(expr $num1 + $num2 )
#echo $(expr $num1 - $num2 )
# *, / and %
# (Or)
#echo $(( $num1 + $num2 ))
#echo $(( $num1 - $num2 ))
#echo $(expr $num1 \* $num2 )
num1=20.5
num2=5
#echo "20.5+5" | bc
#echo "scale=2;20.5/5" | bc #scale implies that many decimal places
# echo "$num1-$num2" | bc
echo "scale=3;sqrt($num1)" | bc -l
echo "3^3" | bc -l
| true
|
53f92459814cfd72a764dd3943ab6e14866e79a6
|
Shell
|
kbalist/xbmc-games
|
/psx/epsxe.sh
|
UTF-8
| 473
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
####################################
# PSX LAUNCHER
####################################
# Thanks to http://www.gwenael.org/xbmc/index.php?title=PCSX-Reloaded#Linux
# launcher : pcsx
# manual : man pcsx
# romext : mdf|iso|bin|img
# savegame : yes
# command line options :
# -nogui : Don't load the GUI
# -loadiso : run specified file (missing from man)
####################################
/media/data/games/psx/epsxe/epsxe -nogui -loadiso "$1"
| true
|
82e1f8b821c16d8906698a29df23ac1ae2d438d4
|
Shell
|
petronny/aur3-mirror
|
/perroquet-bzr/PKGBUILD
|
UTF-8
| 997
| 2.625
| 3
|
[] |
no_license
|
# Contributor: wido <widomaker2k7@gmail.com>
pkgname=perroquet-bzr
pkgver=83
pkgrel=1
pkgdesc="Perroquet is a educational program to improve playfully your listening in a foreign language"
arch=('i686' 'x86_64')
url="http://perroquet.b219.org"
license=('GPL3')
depends=('python' 'gtk2' 'intltool>=0.40.0' 'pygtk' 'gstreamer0.10-good' 'gstreamer0.10-python')
optdepends=('gstreamer0.10-plugins')
makedepends=('gcc' 'bzr')
conflicts=('perroquet')
provides=('perroquet')
install=perroquet.install
md5sums=()
_bzrtrunk="lp:perroquet"
_bzrmod="trunk"
build() {
cd ${srcdir}
msg "Connecting to the server...."
if [ ! -d ./${_bzrmod} ]; then
bzr co ${_bzrtrunk} ${_bzrmod} -r ${pkgver}
else
bzr up ${_bzrmod}
fi
[ -d ./${_bzrmod}-build ] && rm -rf ./${_bzrmod}-build
cp -r ./${_bzrmod} ./${_bzrmod}-build
cd ./${_bzrmod}-build
./setup.py build || return 1
./setup.py --without-icon-cache --without-mime-database --without-desktop-database install --root=${pkgdir}/ || return 1
}
| true
|
90a31c6f96ae0b445826324452972c385af06812
|
Shell
|
zeborrego/opensource.apple.com
|
/src/bind9/bind9-42/bind9/bin/tests/system/pending/tests.sh
|
UTF-8
| 6,266
| 3.375
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
#
# Copyright (C) 2009, 2010 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# $Id: tests.sh,v 1.3.16.4 2010-01-18 19:18:35 each Exp $
SYSTEMTESTTOP=..
. $SYSTEMTESTTOP/conf.sh
# replace_data dname RR old_data new_data
replace_data()
{
if [ $# -ne 4 ]; then
echo I:unexpected input for replace_data
return 1
fi
_dname=$1
_rr=$2
_olddata=$3
_newdata=$4
_ret=0
$NSUPDATE -d <<END>> nsupdate.out.test 2>&1 || _ret=1
server 10.53.0.2 5300
update delete ${_dname} 30 ${_rr} ${_olddata}
update add ${_dname} 30 ${_rr} ${_newdata}
send
END
if [ $_ret != 0 ]; then
echo I:failed to update the test data
return 1
fi
return 0
}
status=0
n=0
DIGOPTS="+short +tcp -p 5300"
DIGOPTS_CD="$DIGOPTS +cd"
echo I:Priming cache.
ret=0
expect="10 mail.example."
ans=`$DIG $DIGOPTS_CD @10.53.0.4 hostile MX` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo I:Checking that bogus additional is not returned with +CD.
ret=0
expect="10.0.0.2"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 mail.example A` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
#
# Prime cache with pending additional records. These should not be promoted
# to answer.
#
echo "I:Priming cache (pending additional A and AAAA)"
ret=0
expect="10 mail.example.com."
ans=`$DIG $DIGOPTS @10.53.0.4 example.com MX` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo "I:Replacing pending A"
ret=0
replace_data mail.example.com. A 192.0.2.2 192.0.2.3 || ret=1
status=`expr $status + $ret`
echo "I:Replacing pending AAAA"
ret=0
replace_data mail.example.com. AAAA 2001:db8::2 2001:db8::3 || ret=1
status=`expr $status + $ret`
echo "I:Checking updated data to be returned (without CD)"
ret=0
expect="192.0.2.3"
ans=`$DIG $DIGOPTS @10.53.0.4 mail.example.com A` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo "I:Checking updated data to be returned (with CD)"
ret=0
expect="2001:db8::3"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 mail.example.com AAAA` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
#
# Prime cache with a pending answer record. It can be returned (without
# validation) with +CD.
#
echo "I:Priming cache (pending answer)"
ret=0
expect="192.0.2.2"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 pending-ok.example.com A` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo I:Replacing pending data
ret=0
replace_data pending-ok.example.com. A 192.0.2.2 192.0.2.3 || ret=1
status=`expr $status + $ret`
echo I:Confirming cached pending data to be returned with CD
ret=0
expect="192.0.2.2"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 pending-ok.example.com A` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
#
# Prime cache with a pending answer record. It should not be returned
# to no-DNSSEC clients.
#
echo "I:Priming cache (pending answer)"
ret=0
expect="192.0.2.102"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 pending-ng.example.com A` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo I:Replacing pending data
ret=0
replace_data pending-ng.example.com. A 192.0.2.102 192.0.2.103 || ret=1
status=`expr $status + $ret`
echo I:Confirming updated data returned, not the cached one, without CD
ret=0
expect="192.0.2.103"
ans=`$DIG $DIGOPTS @10.53.0.4 pending-ng.example.com A` || ret=1
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
#
# Try to fool the resolver with an out-of-bailiwick CNAME
#
echo I:Trying to Prime out-of-bailiwick pending answer with CD
ret=0
expect="10.10.10.10"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 bad.example. A` || ret=1
ans=`echo $ans | awk '{print $NF}'`
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo I:Confirming the out-of-bailiwick answer is not cached or reused with CD
ret=0
expect="10.10.10.10"
ans=`$DIG $DIGOPTS_CD @10.53.0.4 nice.good. A` || ret=1
ans=`echo $ans | awk '{print $NF}'`
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
#
# Make sure the resolver doesn't cache bogus NXDOMAIN
#
echo I:Trying to Prime bogus NXDOMAIN
ret=0
expect="SERVFAIL"
ans=`$DIG +tcp -p 5300 @10.53.0.4 removed.example.com. A` || ret=1
ans=`echo $ans | sed 's/^.*status: \([A-Z][A-Z]*\).*$/\1/'`
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo I:Confirming the bogus NXDOMAIN was not cached
ret=0
expect="SERVFAIL"
ans=`$DIG +tcp -p 5300 @10.53.0.4 removed.example.com. A` || ret=1
ans=`echo $ans | sed 's/^.*status: \([A-Z][A-Z]*\).*$/\1/'`
test "$ans" = "$expect" || ret=1
test $ret = 0 || echo I:failed, got "'""$ans""'", expected "'""$expect""'"
status=`expr $status + $ret`
echo "I:exit status: $status"
exit $status
| true
|
cdf662c470373c3b14d31bc1f5044ed9d2c52ad2
|
Shell
|
kuperiu/mb-test
|
/clean.sh
|
UTF-8
| 178
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
[ $# -eq 0 ] && { echo "Usage: $0 [project name]"; exit 1; }
PROJECT=$1
kubectl delete -f k8s/
pushd infra
terraform destroy --var="project_name=$PROJECT"
popd
| true
|
e01c7e2cf8cf9432e974615f83b31ad62aef2eb9
|
Shell
|
cjgd/scripts
|
/admin/sendmail.restart
|
UTF-8
| 364
| 3.203125
| 3
|
[] |
no_license
|
#! /bin/sh
# sendmail.restart -- restarts sendmail ...
# $Id$
# Carlos Duarte <cgd@mail.teleweb.pt>, 981223
case `uname | tr A-Z a-z` in
linux*) PID=/var/run/sendmail.pid ;;
irix*) PID=/etc/sendmail.pid ;;
esac
if test "$PID" = ""; then
echo "What is this system?"
exit
fi
if test -f $PID; then : ; else
echo "Where is sendmail.pid?"
exit
fi
sed '1s/^/kill /' $PID | sh
| true
|
47d07a2e96cdd0f1a04d5c39e8fe257e3ad97943
|
Shell
|
SerhatTeker/dotfiles
|
/bin/time_utc
|
UTF-8
| 533
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
# vim: set ft=sh et ts=4 sw=4 sts=4:
# Stdout in ISO-8601 format
# https://en.wikipedia.org/wiki/ISO_8601
#
# Date 2022-02-13
# Date and time in UTC 2022-02-13T18:43:51+00:00
# 2022-02-13T18:43:51Z
# 20220213T184351Z
# Week 2022-W06
# Week with weekday 2022-W06-7
# Date without year --02-13
# Ordinal date 2022-044
# date -Iseconds # not working on macos
date -u '+%Y-%m-%dT%H:%M:%S%z'
| true
|
09e4933dc1037fd8270024863fff362fd184f38d
|
Shell
|
connectthefuture/dotfiles-32
|
/scripts/bin/switch_to_workspace
|
UTF-8
| 823
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
handle_selection() {
if [[ $1 ]]; then
echo $( wmctrl -d | grep '*' | awk '{ print $1 }' ) > /tmp/switch-to-workspace-last
target=$1
wmctrl -s $(wmctrl -d | grep " $target" | awk '{ print $1 }')
fi
}
if [[ $1 ]]; then
if [[ $1 == 'last' ]]; then
target=$( cat /tmp/switch-to-workspace-last )
# elif [[ $1 == 'prev' ]]; then
# target=$( wmctrl -d | grep '*' | awk '{ print $1 }' )
# elif [[ $1 == 'next' ]]; then
# target=$( cat /tmp/switch-to-workspace-last )
else
target=$1
fi
if [[ $target ]]; then
echo $( wmctrl -d | grep '\*' | awk '{ print $1 }' ) > /tmp/switch-to-workspace-last
wmctrl -s "$target"
fi
else
handle_selection "$( wmctrl -d | awk '{ print $10 }' | dmenu -i -l 10 -b -nb '#242424' -nf white -sb '#2e557e' -fn 'Ubuntu Mono-12' )"
fi
| true
|
57266112fad9ca6beb6d0bff46206c92ecbb9782
|
Shell
|
hxin/OntoSuite-Miner
|
/lib/ensembl-api/ensembl-functgenomics/scripts/run_update_DB_for_release.sh.example
|
UTF-8
| 2,489
| 3.03125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Copyright [1999-2013] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#$* would be list of builds if none specifed will use DEFAULT build for the update DB
#GetOpts here with
#host
#no_farm
#dbname
#user
#pass
#regex?
USER=$1
shift
PASS=$1
shift
if [ ! $PASS ] || [ ! $USER ]
then echo "Must provide at a user and password argument"; exit; fi
#WARNING: This loop only works for standard DB names (i.e. only dev prefix allowed)
echo "Please check edit the script before running, by adding your dbnames and checking the host parameters"
exit;
dnadb_host='your_mysql_host'
host=$dnadb_host
port=3306
dbs="ornithorhynchus_anatinus_funcgen_71_1 oryctolagus_cuniculus_funcgen_71_3 pan_troglodytes_funcgen_71_214 rattus_norvegicus_funcgen_71_5 saccharomyces_cerevisiae_funcgen_71_4 sus_scrofa_funcgen_71_102 xenopus_tropicalis_funcgen_71_42"
dnadb_port=3306
dnadb_user=$USER
dnadb_pass=$PASS
for db in $dbs; do
echo -e "\n\n::\tUpdating ${host}:${db}"
#Put this in the log
latin=$(echo $db | sed 's/_funcgen_.*//')
latin=$(echo $latin | sed 's/dev_//')
data_version=$(echo $db | sed 's/.*funcgen_//')
bsub_cmd="bsub -o $HOME/logs/update_DB_for_release.${latin}_${data_version}.out -e $HOME/logs/update_DB_for_release.${latin}_${data_version}.err -J update_DB_for_release.${latin}_${data_version} -q long -R\"select[mem>2000] rusage[mem=2000]\" -M 2000000"
job_cmd="perl -w $EFG_SRC/scripts/release/update_DB_for_release.pl\
-species $latin\
-port $port \
-host $host\
-user $USER \
-data_version $data_version\
-dbname $db\
-dnadb_host $dnadb_host\
-dnadb_user $dnadb_user\
-dnadb_pass $dnadb_pass\
-dnadb_port $dnadb_port\
-check_displayable \
-pass $PASS -no_log $*"
#-skip_meta_coord
echo -e "$bsub_cmd $job_cmd"
#omit -no_log if running locally
#echo through bash to avoid LSF -R anomalies VOODOO!
echo "$bsub_cmd $job_cmd" | bash
done
| true
|
f6caffe59b6813d6ff984bc581b5a51100ede6da
|
Shell
|
nievergeltlab/PolygenicPTSD
|
/get_prs.sh
|
UTF-8
| 4,602
| 2.90625
| 3
|
[] |
no_license
|
#Download and install PRSice
https://choishingwan.github.io/PRSice/
#Get the variant rs-id names and stuff from UKBB
wget https://www.dropbox.com/s/puxks683vb0omeg/variants.tsv.bgz?dl=0 -O variants.tsv.bgz
#Split the variant file into just the rs-ids, position, chr, ref and coded variants
zcat variants.tsv.bgz | awk '{print $1,$6,$2,$3,$4,$5}' > variants_short.tsv
#Get SBP
wget https://www.dropbox.com/s/cs6sm1a8wnle966/4080_raw.gwas.imputed_v3.both_sexes.tsv.bgz?dl=0 -O 4080_raw.gwas.imputed_v3.both_sexes.tsv.bgz
#Get DBP
wget https://www.dropbox.com/s/xq7i6wdvw4ov6c8/4079_raw.gwas.imputed_v3.both_sexes.tsv.bgz?dl=0 -O 4079_raw.gwas.imputed_v3.both_sexes.tsv.bgz
#Get HT
wget https://www.dropbox.com/s/m8qlfp0cjnn4ka7/20002_1065.gwas.imputed_v3.both_sexes.tsv.bgz?dl=0 -O 20002_1065.gwas.imputed_v3.both_sexes.tsv.bgz
#Only take bi-allelic non-indel variants with MAF > 5%
#Note: Alt allele is the A1 allele. In other words, caution: the minor allele column in the file is not always the coded allele!!
join variants_short.tsv <(zcat 4080_raw.gwas.imputed_v3.both_sexes.tsv.bgz) | grep -v NaN | awk '{ if (NR == 1 || (length($5) == 1 && length($6) == 1)) print $2,$3,$4,$6,$5,$8,$10,$13,$14,$16}' | sed 's/^X/23/g' | awk '{if (NR == 1 || ($6 > 0.05 && $6 <0.95)) print}' > SBP.tsv
join variants_short.tsv <(zcat 4079_raw.gwas.imputed_v3.both_sexes.tsv.bgz) | grep -v NaN | awk '{ if (NR == 1 || (length($5) == 1 && length($6) == 1)) print $2,$3,$4,$6,$5,$8,$10,$13,$14,$16}' | sed 's/^X/23/g' | awk '{if (NR == 1 || ($6 > 0.05 && $6 <0.95)) print}' > DBP.tsv
#Note, since this is a case/control trait, shift some columns over by 1
join variants_short.tsv <(zcat 20002_1065.gwas.imputed_v3.both_sexes.tsv.bgz) | grep -v NaN | awk '{ if (NR == 1 || (length($5) == 1 && length($6) == 1)) print $2,$3,$4,$6,$5,$8,$11,$14,$15,$17}' | sed 's/^X/23/g' | awk '{if (NR == 1 || ($6 > 0.05 && $6 <0.95)) print}' > HT.tsv
#fix kripke code to note this..
#This is going to run PRSIce. Mainly, it's to get the scores themselves. But also, if you supply valid phenotyping, it will give a basic association analysis.
#This is useful for making sure that the scores are predictive in the correct direction in the data.
study=mrsc
phenofile=mrs1_bp.pheno.txt #write name of phenotype file. Should be formatted like a PLINK phenotype file with FID IID then phenotype columns
phenoname=SBP #Write column name of phenotype to be analyzed
mkdir output # I put the outputs in a directory called output.
Rscript PRSice.R \
--prsice PRSice_linux \
--base SBP.tsv \
--target genotypes/"$study"_bgn_eur_chr"#" \
--thread 16 \
--full \
--nonfounders \
--snp rsid --stat beta --se se --A1 alt --A2 ref --pvalue pval \
--pheno-file $phenofile --pheno-col $phenoname \
--binary-target F \
--upper 1 \
--interval 0.01 \
--all-score \
--out output/"$study"_sbpr
#There is an "$study"_sbpr.all.score file that has each subject's PRS at each risk score threshold.
#We'll change the column names from just p-value thresholds to also include the type of PRS.
#This will let the data read into the R script more easily..
head -n1 output/"$study"_sbpr.all.score | sed 's/\<[0-9]\>/sbp_prs_&/g' > sbp_header.txt
cat sbp_header.txt <(tail -n+2 output/"$study"_sbpr.all.score) > output/"$study"_sbpr.all.score_prs
###Now just do the same thing for the other two blood pressure traits...
#DBP
phenoname=DBP
Rscript PRSice.R \
--prsice PRSice_linux \
--base DBP.tsv \
--target genotypes/"$study"_bgn_eur_chr"#" \
--thread 16 \
--full \
--nonfounders \
--snp rsid --stat beta --se se --A1 alt --A2 ref --pvalue pval \
--pheno-file $phenofile --pheno-col $phenoname \
--binary-target F \
--upper 1 \
--interval 0.01 \
--all-score \
--out output/"$study"_dbpr
head -n1 output/"$study"_dbpr.all.score | sed 's/\<[0-9]\>/dbp_prs_&/g' > dbp_header.txt
cat dbp_header.txt <(tail -n+2 output/"$study"_dbpr.all.score) > output/"$study"_dbpr.all.score_prs
#Hypertension
phenoname=HT
Rscript PRSice.R \
--prsice PRSice_linux \
--base HT.tsv \
--target genotypes/"$study"_bgn_eur_chr"#" \
--thread 16 \
--full \
--nonfounders \
--snp rsid --stat beta --se se --A1 alt --A2 ref --pvalue pval \
--pheno-file $phenofile --pheno-col $phenoname \
--binary-target T \
--upper 1 \
--interval 0.01 \
--all-score \
--out output/"$study"_htnr
head -n1 output/"$study"_htnr.all.score | sed 's/\<[0-9]\>/htn_prs_&/g' > htn_header.txt
cat htn_header.txt <(tail -n+2 output/"$study"_htnr.all.score) > output/"$study"_htnr.all.score_prs
##That's it. You now have PRS that you can use in the pipeline
| true
|
287c8331ff312f61202af8cfe020a57b5d65c7ca
|
Shell
|
joshWretlind/CSCI442-OperatingSystems
|
/project1/alamode-fetch.sh
|
UTF-8
| 4,287
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# alamode-fetch.sh
# this is the main logic for the program
# Most of what this script does is validate things. The actual data we
# get is in the alamode-generate.sh script
# getInfo basically is the wrapper for the SSH command itself. This
# contains a little bit of validation, but most of that is done
# before this is ever called.
getInfo () {
if [ -z $1 ]
then
echo "Must call getInfo with a machine to get the info from"
exit 1
fi
if [ ! $# -eq 1 ] && [ ! $# -eq 2 ]
then
echo "Too many arguments to getInfo"
exit 1
fi
if [ $# -eq 1 ]
then
export outdir="$outdir_base/$1"
else
if [ ! -e $2 ]
then
mkdir $2
fi
if [ ! -d $2 ]
then
echo "A file already exsists where this directiory is meant to be"
fi
if [ ! -w $2 ]
then
echo "We do not have write permissions to this directory"
fi
export outdir=$2
fi
master_host=$(hostname)
command="sh /tmp/alamode-generate.sh;
rm /tmp/alamode-generate.sh;
scp /tmp/$1 $master_host:$outdir;
rm /tmp/$1"
ssh -q $1 $command
}
# What pushCommand does is that it copies over the commands that need to
# ran to the host machine we're running things on, reporting it to their
# respective locaiton. This is it's own file.
pushCommand () {
if [ -z $1 ]
then
echo "Must call pushCommand with a machine to get the info from"
exit 1
fi
if [ ! $# -eq 1 ]
then
echo "Too many arguments to pushCommand"
exit 1
fi
scp -q ./alamode-generate.sh $1:/tmp/alamode-generate.sh
}
while getopts "d:n:f:" OPTIONS
do
case "$OPTIONS" in
d) if [ ! -z $DIRECTORY ]
then
echo "Cannot set two directory options"
exit 1
else
if [ ! -z $OPTARG ]
then
DIRECTORY=$OPTARG
else
echo "You must set a directpry after the -d argument"
exit 1
fi
fi;;
n) isN="foo"
if [ ! -z $isF ]
then
echo "Cannot set both n and f options -n"
exit 1
else
if [ ! -z $OPTARG ]
then
REMOTE_MACHINE=$OPTARG
else
echo "-n options must be given a host"
exit 1
fi
fi;;
f) isF="foo"
if [ ! -z $isN ]
then
echo "Cannot set both n and f options -f"
exit 1
fi
if [ ! -z $READ_FILE ] # Check if we've parsed this flag already
then
echo "$0: does not support nultiple -f's"
exit 1
else
if [ ! -z $OPTARG ] # Check to make sure a token was given with this flag
then
READ_FILE=$OPTARG
else
echo "$0: Must specify a file with hostnames/IPs with -f"
exit 1
fi
fi;;
\?) echo "usage: $0 [-d directory (OPTIONAL)] [-n IP or Name of computer] or [-f File containing names or IPs]"
exit 1;;
esac
done
######## Begin input validation for -f flag ############
if [ ! -f $READ_FILE ] #Check if file exists
then
echo "$0: The file \"$READ_FILE\" does not exist."
exit 1
fi
if [ ! -r $READ_FILE ] #Check if we have read permissions
then
echo "$0: Cannot read \"$READ_FILE\"; check permissions."
exit 1
fi
########################################################
export outdir_base="$(mktemp -d)"
if [ -z $DIRECTORY ]
then
echo $outdir_base
else
echo $DIRECTORY
fi
if [ ! -z $READ_FILE ] #We have a file to do
then
#push the commands to all the hosts first
for host in $(cat $READ_FILE)
do
pushCommand $host
done
#run the commands and clean up
for host in $(cat $READ_FILE)
do
getInfo $host $DIRECTORY
done
fi
if [ ! -z $REMOTE_MACHINE ] # we only have a single machine to get info on
then
echo "trying to push to $REMOTE_MACHINE"
pushCommand $REMOTE_MACHINE
getInfo $REMOTE_MACHINE $DIRECTORY
fi
| true
|
d2bcdfa2b6da046448f390eaddd8580f11ca5a32
|
Shell
|
krystof-k/my-macos-setup
|
/setup.sh
|
UTF-8
| 10,136
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e # exit on any error
# ↓ copy-pasted ./utiliies/message.sh
BOLD='\033[1;30m'
WHITE='\033[0;37m'
YELLOW='\033[0;33m'
RED='\033[0;31m'
OFF='\033[0m'
message () {
if [ "$2" == 'step' ]; then
MESSAGE=" $1"
elif [ "$2" == 'substep' ]; then
MESSAGE=" $1"
else
MESSAGE="${BOLD}$1${OFF}"
fi
if [ "$3" == 'prompt' ]; then
echo -e "${RED}${OFF} ${MESSAGE}"
elif [ "$3" == 'info' ]; then
echo -e "${WHITE}${OFF} ${MESSAGE}"
elif [ "$3" == 'to-do' ]; then
echo -e "${YELLOW}${OFF} ${MESSAGE} (added to to-do.txt)"
echo -e " ${MESSAGE}" >> ./to-do.txt
else
echo -e " ${MESSAGE}"
fi
}
# ↑ copy-pasted ./utiliies/message.sh
message 'Prepare for setup'
message 'Have you enabled full disk access for terminal? [Y/n]' 'step' 'prompt'
read -n 1 -s -r REPLY
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
message 'Please enable it, restart terminal and rerun the setup script.' 'substep' 'prompt'
sleep 2
open 'x-apple.systempreferences:com.apple.preference.security?Privacy_AllFiles'
exit 1
fi
message 'Create a clean APFS snapshot' 'step'
tmutil localsnapshot
message 'Please sign into the App Store manually and press any key to continue' 'step' 'prompt'
sleep 2
open -a "App Store"
read -n 1 -s -r
message 'Add private key' 'step'
message 'Enter absolute path to your private key' 'substep' 'prompt'
read PRIVATE_KEY_PATH
PRIVATE_KEY_FILENAME=`basename $PRIVATE_KEY_PATH`
message 'Move private key to `~/.ssh`' 'substep'
mkdir -p ~/.ssh
mv $PRIVATE_KEY_PATH ~/.ssh/$PRIVATE_KEY_FILENAME
chmod 600 ~/.ssh/$PRIVATE_KEY_FILENAME
message 'Add it to SSH agent' 'substep'
# -K option is deprecated in favor of --apple-use-keychain since macOS Monterey
ssh-add --apple-use-keychain ~/.ssh/$PRIVATE_KEY_FILENAME
message 'Add it to `~/.ssh/config`' 'substep'
tee -a ~/.ssh/config << END > /dev/null
Host *
UseKeychain yes
AddKeysToAgent yes
IdentityFile ~/.ssh/$PRIVATE_KEY_FILENAME
END
message "Export public key to \`~/.ssh/${PRIVATE_KEY_FILENAME%.pem}.pub\`" 'substep'
ssh-keygen -y -f ~/.ssh/$PRIVATE_KEY_FILENAME > ~/.ssh/${PRIVATE_KEY_FILENAME%.pem}.pub
message 'Install Homebrew' 'step'
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
message 'Add Homebrew to PATH' 'substep'
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile
message 'Set number of Homebrew CURL retries to 10' 'substep'
echo 'HOMEBREW_CURL_RETRIES=10' >> ~/.zprofile
source ~/.zprofile
message 'Install Git from Homebrew' 'step'
message "Currently using Git (`git --version`) at \``which git`\`" 'substep' 'info'
message 'Install Git' 'substep'
brew install git
message 'Reload .zprofile' 'substep'
source ~/.zprofile
message "Currently using Git (`git --version`) at \``which git`\`" 'substep' 'info'
message 'Clone the repository into `~/Git/krystof-k/my-macos-setup`' 'step'
mkdir -p ~/Git/krystof-k
cd ~/Git/krystof-k
git clone git@github.com:krystof-k/my-macos-setup.git
cd ./my-macos-setup
message 'Install apps'
message 'Install Rosetta' 'step'
sudo softwareupdate --install-rosetta
message 'Install apps from Brewfile' 'step'
brew bundle
message 'Reload .zprofile' 'substep'
source ~/.zprofile
message 'Set up Node version manager' 'step'
mkdir ~/.nvm
echo 'export NVM_DIR=~/.nvm' >> ~/.zprofile
echo 'source $(brew --prefix nvm)/nvm.sh' >> ~/.zprofile
echo 'source $(brew --prefix nvm)/etc/bash_completion.d/nvm' >> ~/.zprofile
source ~/.zprofile
message 'Install Node.js 18' 'substep'
nvm install 18
message 'Set up Ruby version manager' 'step'
echo 'eval "$(rbenv init -)"' >> ~/.zprofile
source ~/.zprofile
rbenv install 3.2.2
rbenv global 3.2.2
message 'Set global preferences'
bash ./preferences/global.sh
message 'Set macOS apps preferences'
for script in ./preferences/apps/*.sh; do
bash "$script"
done
message 'Configure apps'
message 'Configure night shift' 'step'
message 'Enable from sunset to sunrise' 'substep'
nightlight schedule start
message 'Set the temperature to the warmest' 'substep'
nightlight temp 100
message 'Configure Git' 'step'
message 'Set Git defaults' 'substep'
git config --global user.name 'Kryštof Korb'
git config --global user.email 'krystof@korb.cz'
message 'Add global .gitignore' 'substep'
cp .gitignore.global ~/.gitignore
git config --global core.excludesfile ~/.gitignore
message 'Use origin for branches without tracking upstream' 'substep'
git config --global push.autoSetupRemote true
message 'Use main as default branch' 'substep'
git config --global init.defaultBranch main
message 'Use nano as default editor' 'substep'
git config --global core.editor nano
message 'Configure Espanso' 'step'
message 'Link the config folder to the Git repository' 'substep'
rm -rf ~/Library/Application\ Support/espanso
ln -s ~/Git/krystof-k/my-macos-setup/apps/espanso ~/Library/Application\ Support/espanso
message 'Configure Visual Studio Code' 'step'
message 'Install extensions' 'substep'
bash ./apps/visual-studio-code/install-extensions.sh
message 'Configure Tower' 'step'
message 'Checkout branch after creating' 'substep'
defaults write com.fournova.Tower3 GTUserDefaultsDialogueOptionCheckoutCreatedBranch -bool true
message 'Show icons and text' 'substep'
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Display Mode" integer 1' ~/Library/Preferences/com.fournova.Tower3.plist
message 'Show create branch button' 'substep'
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers" array' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":0 string NSToolbarFlexibleSpaceItem' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":1 string GTToolbarItemGroupIdentifierDashboard' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":2 string NSToolbarSidebarTrackingSeparatorItemIdentifier' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":3 string GTToolbarItemIdentifierQuickOpen' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":4 string GTToolbarItemGroupIdentifierNavigation' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":5 string NSToolbarFlexibleSpaceItem' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":6 string GTToolbarItemIdentifierQuickActions' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":7 string GTToolbarItemIdentifierCreateBranch' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":8 string GTToolbarItemGroupIdentifierSync' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":9 string GTToolbarItemGroupIdentifierMerge' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":10 string GTToolbarItemGroupIdentifierStashes' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":11 string GTToolbarItemIdentifierGitFlow' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":12 string GTToolbarItemIdentifierRefresh' ~/Library/Preferences/com.fournova.Tower3.plist
/usr/libexec/PlistBuddy -c 'Add :"NSToolbar Configuration MainWindowToolbar.BigSur":"TB Item Identifiers":13 string GTToolbarItemIdentifierSearch' ~/Library/Preferences/com.fournova.Tower3.plist
defaults read com.fournova.Tower3 > /dev/null # reload plist file
/System/Library/PrivateFrameworks/SystemAdministration.framework/Resources/activateSettings -u # apply preferences
message 'Configure OneNote' 'step'
message 'Disable spellcheck' 'substep'
defaults write ~/Library/Containers/com.microsoft.onenote.mac/Data/Library/Preferences/com.microsoft.onenote.mac.plist ONEnableSpelling -bool false
message 'Disable autocorrect' 'substep'
defaults write ~/Library/Containers/com.microsoft.onenote.mac/Data/Library/Preferences/com.microsoft.onenote.mac.plist ONEnableAutoCorrect -bool false
message 'Manually disable auto capitalization in OneNote settings' 'substep' 'to-do'
message 'Install Yarn' 'step'
npm install --global yarn
message 'Set up GitHub Packages authentication' 'step'
message 'Open Docker, wait for the daemon to start and press any key to continue' 'step' 'prompt'
sleep 2
open -a Docker
read -n 1 -s -r
message 'Add token to ~/.zshenv' 'substep'
message 'Enter GitHub Packages token' 'substep' 'prompt'
read GITHUB_PACKAGES_TOKEN
message 'Enter GitHub Packages token username' 'substep' 'prompt'
read GITHUB_PACKAGES_TOKEN_USERNAME
tee -a ~/.zshenv << END
# GitHub Packages read-only token
export GITHUB_PACKAGES_TOKEN=${GITHUB_PACKAGES_TOKEN}
export GITHUB_PACKAGES_TOKEN_USERNAME=${GITHUB_PACKAGES_TOKEN_USERNAME}
END
source ~/.zshenv
message 'Login to Docker container registry' 'substep'
echo $GITHUB_PACKAGES_TOKEN | docker login ghcr.io -u $GITHUB_PACKAGES_TOKEN_USERNAME --password-stdin
message 'Add token to ~/.npmrc' 'substep'
echo '//npm.pkg.github.com/:_authToken=${GITHUB_PACKAGES_TOKEN}' > ~/.npmrc
message 'Finishing'
message 'Clear cache' 'step'
killall cfprefsd
message 'Setup complete, please reboot' 'substep' 'info'
| true
|
e8950235cfb1fd12beb6bb6a829a021dd1b350b6
|
Shell
|
dmwm/CMSKubernetes
|
/kubernetes/cmsweb/scripts/deploy-cm.sh
|
UTF-8
| 2,148
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# helper script to deploy given service with given tag to k8s infrastructure
if [ $# -ne 3 ]; then
echo "Usage: deploy-cm.sh <namespace> <service-name> <path_to_configuration>"
exit 1
fi
cluster_name=`kubectl config get-clusters | grep -v NAME`
ns=$1
srv=$2
conf=$3
tmpDir=/tmp/$USER/sops
if [ -d $tmpDir ]; then
rm -rf $tmpDir
fi
mkdir -p $tmpDir
cd $tmpDir
if [ -z "`command -v sops`" ]; then
# download soap in tmp area
wget -O sops https://github.com/mozilla/sops/releases/download/v3.7.2/sops-v3.7.2.linux.amd64
chmod u+x sops
mkdir -p $HOME/bin
echo "Download and install sops under $HOME/bin"
cp ./sops $HOME/bin
fi
# cmsweb configuration area
echo "+++ cluster name: $cluster_name"
echo "+++ configuration: $conf"
echo "+++ cms service : $srv"
echo "+++ namespaces : $ns"
if [ ! -d $conf/$srv ]; then
echo "Unable to locate $conf/$srv, please provide proper directory structure like <configuration>/<service>/<files>"
exit 1
fi
sopskey=$SOPS_AGE_KEY_FILE
kubectl get secrets $ns-keys-secrets -n $ns --template="{{index .data \"$ns-keys.txt\" | base64decode}}" > "$tmpDir/$ns-keys.txt"
export SOPS_AGE_KEY_FILE="$tmpDir/$ns-keys.txt"
echo "Key file: $SOPS_AGE_KEY_FILE"
cmdir=$conf/$srv
osrv=$srv
srv=`echo $srv | sed -e "s,_,,g"`
files=""
if [ -d $cmdir ] && [ -n "`ls $cmdir`" ]; then
for fname in $cmdir/*; do
if [[ $fname == *.encrypted ]]; then
sops -d $fname > $cmdir/$(basename $fname .encrypted)
fname=$cmdir/$(basename $fname .encrypted)
echo "Decrypted file $fname"
fi
if [[ ! $files == *$fname* ]]; then
files="$files --from-file=$fname"
fi
done
fi
echo $files
kubectl delete cm ${srv}-config -n $ns
kubectl create configmap ${srv}-config $files -n $ns
export SOPS_AGE_KEY_FILE=$sopskey
echo
echo "+++ list configmaps"
kubectl get cm -n $ns
rm -rf $tmpDir
| true
|
a4aed7132c9729d0b378d797612381fc8a52e074
|
Shell
|
bacek/asterisk-k8s-demo
|
/live-demo/kamailio/entrypoint.sh
|
UTF-8
| 1,625
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Set default settings, pull repository, build
# app, etc., _if_ we are not given a different
# command. If so, execute that command instead.
set -e
set -x
# Default values
: ${PID_FILE:="/var/run/kamailio.pid"}
: ${KAMAILIO_ARGS:="-DD -E -f /etc/kamailio/kamailio.cfg -P ${PID_FILE}"}
# confd requires that these variables actually be exported
export PID_FILE
# Make dispatcher.list exists
mkdir -p /data/kamailio
touch /data/kamailio/dispatcher.list
: ${CLOUD=""} # One of aws, azure, do, gcp, or empty
if [ "$CLOUD" != "" ]; then
PROVIDER="-provider ${CLOUD}"
fi
: ${PRIVATE_IPV4:="$(netdiscover -field privatev4 ${PROVIDER})"}
: ${PUBLIC_IPV4:="$(netdiscover -field publicv4 ${PROVIDER})"}
: ${PUBLIC_HOSTNAME:="$(netdiscover -field hostname ${PROVIDER})"}
: ${PUBLIC_PORT:=5060}
: ${PRIVATE_PORT:=5080}
# Build local configuration
cat <<ENDHERE >/data/kamailio/local.k
#!define PUBLIC_IP "${PUBLIC_IPV4}"
#!subst "/PUBLIC_IP/${PUBLIC_IPV4}/"
#!define PRIVATE_IP "${PRIVATE_IPV4}"
#!subst "/PRIVATE_IP/${PRIVATE_IPV4}/"
#!define PUBLIC_PORT "${PUBLIC_PORT}"
#!subst "/PUBLIC_PORT/${PUBLIC_PORT}/"
#!define PRIVATE_PORT "${PRIVATE_PORT}"
#!subst "/PRIVATE_PORT/${PRIVATE_PORT}/"
alias=${PUBLIC_IPV4} ${PUBLIC_HOSTNAME} ${SIP_HOSTNAME}
listen=udp:${PRIVATE_IPV4}:${PUBLIC_PORT} advertise ${PUBLIC_IPV4}:${PUBLIC_PORT}
listen=tcp:${PRIVATE_IPV4}:${PUBLIC_PORT} advertise ${PUBLIC_IPV4}:${PUBLIC_PORT}
listen=udp:${PRIVATE_IPV4}:${PRIVATE_PORT}
listen=tcp:${PRIVATE_IPV4}:${PRIVATE_PORT}
ENDHERE
# Runs kamaillio, while shipping stderr/stdout to logstash
exec /usr/sbin/kamailio $KAMAILIO_ARGS # $*
| true
|
68e43595356c22c2d9f360d14295b09a3345ad5f
|
Shell
|
hiddely/imovies
|
/virtual-machines/webservice/scripts/backup.sh
|
UTF-8
| 1,855
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
#
echo "=== BACKING UP ===";
BACKUP_DIR=/tmp/backup
DATESTRING=$(date +'%Y.%m.%d')
BACKUP_PKEY=/home/imovies-backup/.ssh/bak_rsa.pub.pem
#BACKUP_PKEY=/Users/hidde/IdeaProjects/iMovies/virtual-machines/webservice/keys/bak_rsa.pub.pem # local, to test
rm -rf $BACKUP_DIR
mkdir $BACKUP_DIR
mkdir $BACKUP_DIR/original
cd $BACKUP_DIR/original
mkdir identities
# all files to be backed up
cp /var/log/syslog $BACKUP_DIR/original
cp /var/log/auth.log $BACKUP_DIR/original
cp /var/log/kern.log $BACKUP_DIR/original
cp /var/log/faillog $BACKUP_DIR/original
cp /var/log/lastlog $BACKUP_DIR/original
cp /var/log/spring.log $BACKUP_DIR/original
cp /home/imovies-admin/imovies/src/main/resources/crypto/certificates/* $BACKUP_DIR/original/identities
echo "=== DUMPING DATABASE ===";
mysqldump -P 3306 -h 127.0.0.1 -u webservice -pwebservice imovies > $BACKUP_DIR/original/database.sql
echo "=== END DATABASE DUMP ===";
#echo "HELLO BACKUP" > $BACKUP_DIR/original/log.txt # test log
zip -r $BACKUP_DIR/original.zip $BACKUP_DIR/original
echo "=== ENCRYPTING ===";
KEY="$(openssl rand -base64 32)";
openssl aes-256-cbc -a -salt -k $KEY -in $BACKUP_DIR/original.zip -out $BACKUP_DIR/digest.$DATESTRING.zip.enc
# encrypt our encryption key with our public key and store
echo $KEY > $BACKUP_DIR/key.pem
openssl rsautl -encrypt -inkey $BACKUP_PKEY -pubin -in $BACKUP_DIR/key.pem -out $BACKUP_DIR/key.$DATESTRING.bin.enc
rm -f $BACKUP_DIR/key.pem
echo "=== ENCRYPTED ===";
echo "=== UPLOADING TO SERVER ===";
scp -P 8022 $BACKUP_DIR/key.$DATESTRING.bin.enc vagrant@172.16.0.2:~/backups/
scp -P 8022 $BACKUP_DIR/digest.$DATESTRING.zip.enc vagrant@172.16.0.2:~/backups/
# Local, to test
#cp $BACKUP_DIR/key.bin.enc ~/Desktop/key.bin.enc
#cp $BACKUP_DIR/digest.zip.enc ~/Desktop/digest.zip.enc
echo "=== END UPLOAD ===";
echo "=== Finished BACKUP ===";
| true
|
3d64c1f3c9e5cf219c6c77d7a90471f6395f0a27
|
Shell
|
guowei-he/dalma-parallel-job-array
|
/mt_slurm_parallel_ja_submit.sh
|
UTF-8
| 2,942
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# It generates a job script of parallel job array,
# which will paralllely execute all commands in the file.
#
# Usage:
# slurm-parallel-ja-submit-multi-threads.sh <inputfile> -c <Number of threads per process, default 1> -t <max_time (in hours, default 8)>
#
main() {
# Validate and parse input
if [[ "$#" -eq 0 ]]
then
echo "Usage: slurm-parallel-ja_submit-multi-threads.sh <inputfile> -c <Number of threads per process, default 1> -t <max_time (in h, default 8)>"
exit 1
fi
local inputfile="$1"
if [[ ! -f "${inputfile}" ]]
then
echo "${inputfile} does not exist"
exit 1
fi
shift
echo "Input: ${inputfile}"
local nthreads=1
local max_time=1
while getopts ":c:t:" opt; do
case $opt in
c)
echo "Entered #threads per proc: $OPTARG" >&2
nthreads="$OPTARG"
;;
t)
echo "Entered #time: $OPTARG hour(s)" >&2
max_time="$OPTARG"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
DN=$(cd $(dirname $inputfile);pwd)
FN=$(basename $inputfile)
if [[ ! -e $DN/$FN ]]; then
echo "command file does not exist"
exit 1
fi
#
# Find how many jobs are to run
#
NJ=$(cat $DN/$FN | wc -l)
if (( $NJ <= 0 )); then
echo "invalid command file"
exit 1
fi
#
# Set number of job clusters (number of actual iterations as seen by SLURM)
#
NCORES=28
let "ntasks_per_node=NCORES/nthreads"
let "effective_procs_per_node=ntasks_per_node*nthreads"
if [[ "${effective_procs_per_node}" -ne "${NCORES}" ]]; then
echo "Wrong threads per process"
exit 1
fi
let "NN=(NJ+ntasks_per_node-1)/ntasks_per_node"
#
# Limit number of nodes that can be used at once
#
if (( $NN > 8 )); then
NN=8
fi
###############################################################
#
# BUILD THE SCRIPT TO RUN
#
###############################################################
cat << EOF > job.$$.sh
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=${ntasks_per_node}
#SBATCH --cpus-per-task=${nthreads}
#SBATCH --time=${max_time}:00:00
#SBATCH --output=output.log
execute_job() {
#
# retrieve line and execute
#
LIN=\$(awk "NR == \$MY_TASK_ID {print;exit}" $DN/$FN)
echo \$LIN > /tmp/\$\$-\$SLURM_JOBID-\$MY_TASK_ID-cmd.tmp
source /tmp/\$\$-\$SLURM_JOBID-\$MY_TASK_ID-cmd.tmp
rm -f /tmp/\$\$-\$SLURM_JOBID-\$MY_TASK_ID-cmd.tmp >& /dev/null
}
source ./mt_slurm_parallel_ja_core.sh
start_ja $NJ $NN ${ntasks_per_node} ${NCORES}
# To resubmit this job, run:
# sbatch --array=1-$NN job.$$.sh
EOF
#
# Make dynamically generated script executable
#
chmod 755 job.$$.sh
#
# Submit job array ('-n' to have exclusive use of node)
#
echo "Run this command. Change walltime in job script if necessary"
echo "sbatch --array=1-$NN job.$$.sh"
}
main "$@"
| true
|
55d283eecf97400b6652381647dd39a8f3dcfdca
|
Shell
|
dzyasseron/amielke-overlay
|
/media-video/vdrconvert/files/0.2.1/grab.sh
|
UTF-8
| 720
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/media-video/vdrconvert/files/0.2.1/grab.sh,v 1.1 2006/03/02 22:41:36 hd_brummy Exp $
. /etc/conf.d/vdrconvert
getvdrversnum()
{
vdrversnum=$(awk '/VDRVERSNUM/ { gsub("\"","",$3); print $3 }' /usr/include/vdr/config.h)
}
getvdrversnum
[[ "$DVDNORM" = "pal" ]] && RES="720 576" || RES="704 480"
if [[ ${vdrversnum} > "10337" ]] ; then
(
$SVDRPSEND GRAB $1 100 $RES
cp -v /tmp/$1 "$2"
logger -t ${0##*/} -f /tmp/vdrgrab
) >/tmp/vdrgrab 2>&1 &
else
printf "GRAB '%s/%s' %s %d %d %d" "${2}" "${1}" "pnm" 100 $RES | xargs "$SVDRPSEND" >/tmp/vdrgrab 2>&1 &
logger "`cat /tmp/vdrgrab`"
fi
| true
|
d7d604c5e73ea25f7289a4e26f6464c8896195a3
|
Shell
|
luotao717/arsdk
|
/rootfs/db12x-clean/etc/rc.d/rcS
|
UTF-8
| 1,993
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
# This script runs when init it run during the boot process.
# Mounts everything in the fstab
mount -a
mount -o remount +w /
#
# Mount the RAM filesystem to /tmp
#
mount -t ramfs -n none /tmp
export PATH=$PATH:/etc/ath
# for profile-based-optimization
grep -iq "debugfs" /proc/filesystems
if [ $? -eq 0 ]
then
grep -iq "sysfs" /proc/filesystems
if [ $? -eq 0 ]
then
if [ ! -d /sys ]
then
mkdir /sys >/dev/null 2>&1
fi
mount -t sysfs none /sys >/dev/null 2>&1
if [ $? -eq 0 ]
then
mount -t debugfs none /sys/kernel/debug >/dev/null 2>&1
if [ $? -eq 0 ]
then
echo "** sysfs & debugfs mounted successfully **"
else
echo "****** debugfs mount failure ******"
fi
else
echo "****** sysfs mount failure ******"
fi
fi
fi
insmod /lib/modules/2.6.31/net/athrs_gmac.ko
##
## Put the names of the interfaces in the environmental variables
## (They can be board unique)
##
export WAN_IF=eth0
export LAN_IF=eth1
ifconfig $WAN_IF up
ifconfig $LAN_IF up
/etc/rc.d/rc.network
/etc/rc.d/rc.bridge
. /etc/ath/apcfg
#
# Enable USB
#
insmod /lib/modules/2.6.31/usb/usbcore.ko
insmod /lib/modules/2.6.31/usb/ehci-hcd.ko
insmod /lib/modules/2.6.31/usb/usb-storage.ko
insmod /lib/modules/2.6.31/usb/usbnet.ko
insmod /lib/modules/2.6.31/usb/cdc_ether.ko
#
# Enable I2S
#
insmod /lib/modules/2.6.31/i2s/ath_i2s.ko
#
# Untar the debug tools into /tmp/tools
#
mkdir /tmp/tools
cd /tmp/tools
tar -xzvf /sbin/debug.tgz
/usr/sbin/telnetd
# /usr/sbin/httpd -h /usr/www/
/bin/factoryreset /dev/freset
# start the page cache/kmem cache cleanup timer in the kernel
echo 1 > /proc/sys/vm/drop_caches
# when processes uses page-cache more than 30% of system memory,
# lets force them to write
echo 20 > /proc/sys/vm/dirty_ratio
# when the dirty pages cross more than 5% of sys memory,
# kick in the pdflush
echo 5 > /proc/sys/vm/dirty_background_ratio
##
## Check for Auto AP Start
##
if [ "${WLAN_ON_BOOT}" = "y" ]; then
/etc/ath/apup
fi
| true
|
0544b716f01fbc1e97a3dc61f551222ab444ffe5
|
Shell
|
Sunkek/abandoned-sunbot-layered
|
/sunbot-bot/entrypoint.sh
|
UTF-8
| 200
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
apt-get update && apt-get install -y netcat
echo "Waiting for API..."
while ! nc -z $API_HOST $API_PORT; do
sleep 0.1
done
echo "API started"
exec "$@"
#chmod +x /entrypoint.sh
| true
|
75ada940dbacf3af25ee4a086507a566e396e6cb
|
Shell
|
PhillBenoit/uofa-pbs-scripts
|
/ocelote_gpu.pbs
|
UTF-8
| 1,784
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
### This will be the researcher who supported your use of the HPC.
#PBS -W group_list=[your researcher here]
### Researchers can buy in for priority queueing. However, the amount of time
### they can use for this is limited. Everyone has access to unlimited
### windfall. However, any priority jobs will go first. The queues on Ocelote
### GPU nodes are windfall and standard
#PBS -q [your queue here]
### GPU nodes have 28 cores of standard processors. When requesting a GPU node,
### it is important to request the whole node. Each GPU node request will have
### the configuration below. If you need more nodes, simply increase the
### "select" number. This shows how to request 1 GPU. Keep in mind that there
### is only 1 GPU per node on the system. If you want more than 1 GPU, you
### will be requesting multiple nodes. There are 45 nodes but requesting that
### many is not recommended because they will not all be available at once and
### your job is likely to get stuck in the queue. With GPU requests, it's
### important to also configure your software to recognize the GPU resources.
### This will also inform how many you request. If you need help, please reach
### out to the HPC consult team.
#PBS -l select=1:ncpus=28:mem=250gb:pcmem=8gb:np100s=1
### This is the amount of time you think your job will take to run.
### Not sure how long it will take? Request the max of 10 days.
### (240:00:00) #PBS -l walltime=240:00:00
### This request shows 5 minutes #PBS -l walltime=00:05:00
#PBS -l walltime=[your time here]
### This will show which node your job is running on.
echo 'This script is running on:'
hostname
### GPU test program
nvidia-smi
### If you have any trouble or questions, please contact:
### hpc-consult@list.arizona.edu
| true
|
52c375898dee4ef61de226648ab44d53539cbf00
|
Shell
|
mijime/sham
|
/src/plug/clean.sh
|
UTF-8
| 195
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
__sham__plug__clean() {
case ${__v__stat} in
1|3)
if [[ -d "${__v__dir}" ]]
then rm -r "${__v__dir}"
fi
__v__stat=5
;;
*)
;;
esac
}
| true
|
87f38c686cdd06f294953842ca060b6ae62421ba
|
Shell
|
zhangsunsuochang/sams
|
/pro.sh
|
UTF-8
| 396
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
data=$1
type=$2
dataset=$data
if [ $type = cityu ]; then
opencc -i ${data} -o ${data}_s -c hk2s
dataset=${data}_s
fi
if [ $type = as ]; then
opencc -i ${data} -o ${data}_s -c tw2s
dataset=${data}_s
fi
python preprocess.py --type produces \
--dataset ${dataset} \
--to ${data}_repo \
--engs ${data}_engs \
--nums ${data}_nums \
--lines ${data}_repo2
| true
|
5c9fa899131e183ad1def0e7d6247e03d5d07411
|
Shell
|
Christabelawah/Technologies
|
/fl2.sh
|
UTF-8
| 61
| 2.8125
| 3
|
[] |
no_license
|
i=5
while test $i != 0
do
echo "$i"
echo " "
i=expr $i - 1
| true
|
211dac85189f1f4e0e7c3304d4e7245461e260ea
|
Shell
|
casperbrike/git-delete-merged
|
/git-delete-merged
|
UTF-8
| 335
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
current_branch=$(git branch | egrep "^\*" | awk '{split($0, a, " "); print a[2]}')
git checkout master
merged_branches=$(git branch --merged | egrep -v "(master|staging|beta|production)")
if [ -n "$merged_branches" ]
then
git branch -d $merged_branches
else
echo 'No merged branches'
fi
git checkout $current_branch
| true
|
a084c9d6607feff10ea9b9a570a5fdc9379d3414
|
Shell
|
saga1015/scripts
|
/vps-scripts/nginx-install
|
UTF-8
| 1,430
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
###############################
#
# Nginx 1.2.7
# http://nginx.org/
#
# Configuration files
# vi /etc/nginx/*
#
###############################
# Dependencies
yum install gcc make autoconf automake zlib-devel openssl-devel pcre pcre-devel;
# Get public key to verify PGP keys
gpg --recv-keys --keyserver hkp://wwwkeys.pgp.net 0xA1C052F8;
# Download, compile and install
cd /usr/local/src;
wget http://nginx.org/download/nginx-1.2.7.tar.gz;
wget http://nginx.org/download/nginx-1.2.7.tar.gz.asc;
gpg --verify nginx-1.2.7.tar.gz.asc;
tar -xvzf nginx-1.2.7.tar.gz && cd nginx-1.2.7;
./configure --prefix=/usr/local/nginx \
--conf-path=/etc/nginx/nginx.conf \
--pid-path=/var/run/nginx.pid \
--http-log-path=/var/log/nginx/access.log \
--error-log-path=/var/log/nginx/error.log \
--sbin-path=/usr/local/sbin/nginx \
--user=nginx --group=nginx \
--with-http_ssl_module \
--without-mail_pop3_module \
--without-mail_imap_module \
--without-mail_smtp_module;
make && make install;
# Setup nginx user
/usr/sbin/useradd -s /sbin/nologin -r nginx;
# Init script
wget https://raw.github.com/sbarakat/scripts/master/vps-scripts/nginx-init.d -O /etc/init.d/nginx;
chown root:root /etc/init.d/nginx;
chmod 755 /etc/init.d/nginx;
# Start on start up
/sbin/chkconfig --add nginx;
/sbin/chkconfig --level 2345 nginx on;
service nginx start;
ln -s /usr/local/sbin/nginx /usr/sbin/nginx;
| true
|
2f6ba42f65987576620650a483b2516cf72b91b4
|
Shell
|
petemoore/try-server-hook
|
/test/platform_files_test_markup/grab.sh
|
UTF-8
| 937
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# 1.4 for a version that should have files
# 1.3t for a version that shouldn't have files
# aurora for the fallback case
# central for the master case
# /releases for mapping info
for i in \
"https://ftp.mozilla.org/pub/mozilla.org/b2g/tinderbox-builds/mozilla-b2g30_v1_4-linux64_gecko/" \
"https://ftp.mozilla.org/pub/mozilla.org/b2g/tinderbox-builds/mozilla-b2g28_v1_3t-linux32_gecko/" \
"https://ftp.mozilla.org/pub/mozilla.org/b2g/tinderbox-builds/mozilla-aurora-linux32_gecko/" \
"https://ftp.mozilla.org/pub/mozilla.org/b2g/tinderbox-builds/mozilla-central-linux32_gecko/" \
"https://hg.mozilla.org/releases" \
"https://hg.mozilla.org/releases/mozilla-b2g30_v1_4/raw-file/eb690ed47c24/browser/config/version.txt"
do
filename="$(echo "$i" | sed -e 's,/$,,' | xargs basename)"
curl -L -I -o "$filename.headers" "$i"
curl -L -o "$filename" "$i"
echo Saved file to $filename you should add to git
done
| true
|
76b244a990bf40262a90ae05993fa1660a214846
|
Shell
|
leandropincini/dotfiles
|
/home/files/bash_profile
|
UTF-8
| 508
| 3.265625
| 3
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
# loading everything
# ~/.extra, ~/.bash_prompt, ~/.exports, ~/.aliases and ~/.functions
for file in ~/.{aliases,exports,functions,extra}; do
[ -r "$file" ] && source "$file"
done
unset file
if [ $(uname) = "Darwin" ]; then
for file in ~/.{aliases_mac,exports_mac,extra_mac}; do
[ -r "$file" ] && source "$file"
done
unset file
fi
if [ $(uname) = "Linux" ]; then
for file in ~/.{aliases_linux,exports_linux}; do
[ -r "$file" ] && source "$file"
done
unset file
fi
| true
|
996422666b45f3d7ffcdd1bfab4fd569c8b674be
|
Shell
|
rsertelon/rsertelon-plans
|
/baikal/plan.sh
|
UTF-8
| 726
| 2.703125
| 3
|
[
"Unlicense"
] |
permissive
|
pkg_name=baikal
pkg_origin=rsertelon
pkg_version="0.9.3"
pkg_maintainer="Romain Sertelon <romain@sertelon.fr>"
pkg_license=("GPL-3.0")
pkg_description="Baïkal is a Cal and CardDAV server, based on sabre/dav, that includes an administrative interface for easy management."
pkg_upstream_url="http://sabre.io/baikal/"
pkg_source="https://github.com/sabre-io/Baikal/releases/download/${pkg_version}/baikal-${pkg_version}.zip"
pkg_shasum="d36955ce2e60a03875cf33ad793ddcecfae52096af39de1f2bf709de9f16cb5e"
pkg_dirname="baikal"
pkg_svc_user=root
pkg_svc_group=$pkg_svc_user
pkg_deps=(
core/nginx
)
pkg_binds=(
[php]="address port"
)
do_build(){
return 0
}
do_install() {
mkdir -p "${pkg_prefix}/baikal/"
cp -r * "${pkg_prefix}/baikal/"
}
| true
|
73876adeace12d29d37df64c556b1175af3489cd
|
Shell
|
zhixingfeng/shell
|
/hdf5tobam
|
UTF-8
| 581
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ] ; then
echo "hdf5tobam indir(unzipped folder output by unziphdf5) outdir"
exit 1
fi
if [ "$(ls ./$1 | egrep Analysis_Results | wc -l)" == 0 ] ; then
echo "fail to find Analysis_Results in $1"
exit 1
fi
if [ "$(ls ./$1/Analysis_Results/ | egrep .bax.h5 | wc -l)" != 3 ] ; then
echo "fail to find enough .bax.h5 files in ./$1/Analysis_Results/"
exit 1
fi
mkdir -p $2
ls ./$1/Analysis_Results/*.bax.h5 | awk -v indir=$1 -v outdir=$2 'BEGIN{cmd="bax2bam -o "outdir"/raw"} {cmd=cmd" "$0}END{print cmd; system(cmd)}'
| true
|
11d18b60652bb4d1715421d8500f07ee3e31183e
|
Shell
|
msys2/MINGW-packages
|
/mingw-w64-go/PKGBUILD
|
UTF-8
| 3,254
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Martell Malone < martell malone at g mail dot com >
# Contributor: Ray Donnelly <mingw.android@gmail.com>
_realname=go
pkgbase=mingw-w64-${_realname}
pkgname=("${MINGW_PACKAGE_PREFIX}-${_realname}")
pkgver=1.21.0
pkgrel=2
pkgdesc="Go Lang (mingw-w64)"
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clang32' 'clangarm64')
license=('spdx:BSD-3-Clause')
url="https://www.golang.org/"
depends=()
makedepends=("${MINGW_PACKAGE_PREFIX}-${_realname}"
"${MINGW_PACKAGE_PREFIX}-cc")
options=('!strip')
source=("https://go.dev/dl/go${pkgver}.src.tar.gz"{,.asc})
sha256sums=('818d46ede85682dd551ad378ef37a4d247006f12ec59b5b755601d2ce114369a'
'SKIP')
validpgpkeys=('EB4C1BFD4F042F6DDDCCEC917721F63BD38B4796')
noextract=(go${pkgver}.src.tar.gz)
# Helper macros to help make tasks easier #
apply_patch_with_msg() {
for _patch in "$@"
do
msg2 "Applying $_patch"
patch -Nbp1 -i "${srcdir}/$_patch"
done
}
# =========================================== #
prepare() {
tar -xzf ${srcdir}/go${pkgver}.src.tar.gz -C ${srcdir} || true
}
build() {
cd "${srcdir}"/${_realname}/src
export GOROOT_BOOTSTRAP=${MINGW_PREFIX}/lib/go
export GOROOT_FINAL=${MINGW_PREFIX}/lib/go
export GO_CFLAGS="-D__USE_MINGW_ANSI_STDIO=1"
export CFLAGS="-D__USE_MINGW_ANSI_STDIO=1"
export GO_BUILD_VERBOSE=1
cmd //c make.bat
}
check() {
cd "${_realname}"
test_text="Hello MSYS2!"
rm -f /tmp/test_main.go
echo "package main" > /tmp/test_main.go
echo "import \"fmt\"" >> /tmp/test_main.go
echo "func main() {" >> /tmp/test_main.go
echo "fmt.Println(\"${test_text}\")" >> /tmp/test_main.go
echo "}" >> /tmp/test_main.go
./bin/gofmt.exe -w /tmp/test_main.go
./bin/go.exe build -o /tmp/test_result.exe /tmp/test_main.go
output=$(/tmp/test_result.exe)
if [[ "$output" != "$test_text" ]]; then
echo "Output \"${output}\" does not match test-text \"${test_text}\""
exit 1
fi
rm -f /tmp/test_main.go /tmp/test_result.exe
}
package() {
cd "${_realname}"
export GOROOT="${srcdir}/${_realname}"
export GOBIN="${GOROOT}/bin"
mkdir -p "${pkgdir}${MINGW_PREFIX}/"{bin,lib/go,lib/go/doc,lib/go/src,lib/go/site/src,share/licenses/go,share/go}
mkdir -p "${pkgdir}${MINGW_PREFIX}/share/licenses/go"
install -Dm644 "${srcdir}"/${_realname}/LICENSE \
"${pkgdir}${MINGW_PREFIX}/share/licenses/go/LICENSE"
cp -rf bin "${pkgdir}${MINGW_PREFIX}"
cp -rf bin pkg src lib misc api test "${pkgdir}${MINGW_PREFIX}/lib/go"
cp -r doc/* "${pkgdir}${MINGW_PREFIX}/lib/go/doc/"
install -Dm644 VERSION "${pkgdir}${MINGW_PREFIX}/lib/go/VERSION"
rm -rf "${pkgdir}${MINGW_PREFIX}/lib/go/pkg/bootstrap" "${pkgdir}${MINGW_PREFIX}/lib/go/pkg/tool/*/api"
# TODO: Figure out if really needed
rm -rf "${pkgdir}${MINGW_PREFIX}"/lib/go/pkg/obj/go-build/*
# https://github.com/golang/go/issues/57179
install -Dm644 go.env "${pkgdir}${MINGW_PREFIX}/lib/go/go.env"
install -Dm644 LICENSE "${pkgdir}${MINGW_PREFIX}/share/licenses/go/LICENSE"
# install profile script
mkdir -p "${pkgdir}${MINGW_PREFIX}"/etc/profile.d
echo "export GOROOT=${MINGW_PREFIX}/lib/go" > "${pkgdir}${MINGW_PREFIX}"/etc/profile.d/go.sh
cp "${pkgdir}${MINGW_PREFIX}"/etc/profile.d/go.{sh,zsh}
}
| true
|
3aade59822a65ef83b74d15cd9be9fa5f6d495f3
|
Shell
|
Fintan-contents/collaborage
|
/src/aws/script/cron/stop-app-before-creating-snapshot-of-ebs-data-volume.sh
|
UTF-8
| 187
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
set +u
source ~/.bash_profile
set -u
export PATH="/usr/local/bin:$PATH"
# stop docker-compose
cd /home/centos/nop/docker/${NOP_APP_ID}
docker-compose stop
| true
|
228284dfe33c914547c6b48bb24adae5859d846e
|
Shell
|
rzarref/embratel_parallels_vdn
|
/pgms/scripts/apo/startvpn.sh
|
UTF-8
| 880
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#if pgrep -lf openvpn && [ $(dig +noall +answer host1.apo.apsdemo.org | wc -l) -gt 0 ]; then
# dig +noall +answer host1.apo.apsdemo.org
# echo 'VPN já está Configurada'
# exit 0
#fi
if [ $(id -u) != 0 ]; then
echo "must be root to run this script."
sudo /home/fastlane/Embratel/pgms/scripts/apo/startvpn.sh
exit 1
fi
cd /home/fastlane/Embratel/pgms/scripts/apo
DNS=10.112.0.11;
pkill openvpn
echo "Iniciando openvpn"
openvpn --verb 9 --config aps.ovpn > /var/log/openvpn_apo.log &
DATE_LIMIT=$(( $(date +%s) + 30 ))
while [ $(date +%s) -lt ${DATE_LIMIT} ]; do
grep 'Initialization Sequence Completed' /var/log/openvpn_apo.log && break
sleep 1
done
grep -q $DNS /etc/resolv.conf || {
echo nameserver $DNS > /tmp/resolv.conf;
cat /etc/resolv.conf >> /tmp/resolv.conf;
mv /tmp/resolv.conf /etc/resolv.conf;
}
dig +noall +answer host1.apo.apsdemo.org
| true
|
872ec2d91f514a77c37a96116c0ad5120e6bfab2
|
Shell
|
gavioto/org.openscada.deploy
|
/org.openscada.deploy.package/modules/org.openscada.p2.bin/src/bin/p2.create
|
UTF-8
| 1,660
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# check arguments
if [ -z "$1" ]; then
echo "p2.create <targetDir>"
exit 2
fi
# load functions
. p2.functions || exit 1
PROFILE="SDKProfile"
FLAVOR="tooling"
TARGET="$1"
ARG_UNITS="$2"
# check if a launcher already exists
if [ -e "$TARGET" ]; then
echo "$TARGET already exists"
exit 1
fi
UNITS="org.eclipse.equinox.launcher"
UNITS="$UNITS,org.eclipse.osgi"
UNITS="$UNITS,org.eclipse.equinox.common"
UNITS="$UNITS,org.eclipse.update.configurator"
UNITS="$UNITS,org.eclipse.equinox.ds"
UNITS="$UNITS,org.eclipse.equinox.p2.console"
UNITS="$UNITS,org.eclipse.equinox.simpleconfigurator"
UNITS="$UNITS,org.openscada.utils.osgi.autostart"
# add additional units
if [ ! -z "$ARG_UNITS" ]; then
UNITS="$UNITS,$ARG_UNITS"
fi
if p2director -p2.nl "$LANG" -p2.ws gtk -p2.arch x86_64 -p2.os linux -r "$P2_REPOS" -roaming -bundlepool "$TARGET" -flavor "$FLAVOR" -profile "$PROFILE" -destination "$TARGET" -profileProperties org.eclipse.update.install.features=true -i "$UNITS"; then
ln -s "`dirname $0`/p2.launcher" "$TARGET/launcher"
mkdir "$TARGET/configuration"
FULL_TARGET="`readlink -f $TARGET`"
# write config.ini
cat <<__EOF__ > "$TARGET/configuration/config.ini"
osgi.bundles=org.eclipse.equinox.common@1:start, org.eclipse.update.configurator@2:start, org.openscada.utils.osgi.autostart@3:start
eclipse.ignoreApp=true
osgi.noShutdown=true
equinox.use.ds=true
eclipse.p2.data.area=@config.dir/../p2/
eclipse.p2.profile=SDKProfile
org.openscada.utils.osgi.autostart.file=$FULL_TARGET/startLevels.properties
org.eclipse.equinox.simpleconfigurator.configUrl=file\:org.eclipse.equinox.simpleconfigurator/bundles.info
__EOF__
fi
| true
|
cf1b29ae9a43741bcc3f3df4a6fee8b767e4459a
|
Shell
|
chakralinux/desktop
|
/powertop/PKGBUILD
|
UTF-8
| 900
| 2.9375
| 3
|
[] |
no_license
|
pkgname=powertop
pkgver=2.9
pkgrel=1
pkgdesc="Tool that finds the software that makes your laptop use more power than necessary"
arch=('x86_64')
url="https://01.org/powertop"
license=('GPL2')
depends=('gcc-libs' 'libnl' 'ncurses' 'pciutils')
categories=('utils')
screenshot=('https://01.org/powertop/sites/default/files/resize/users/u8/powertop20_overview-640x381.png')
source=($pkgname-$pkgver.tar.gz::https://github.com/fenrus75/powertop/archive/v$pkgver.tar.gz)
sha256sums=('5daf009271a028c55f0d5da7e5cf85ce08f832831957828f0379faf721f5bad1')
prepare() {
cd "${srcdir}"/$pkgname-$pkgver
# version 2.9 info fix
sed -i 's:RUN-VERSION-SCRIPT-IN-GIT-REPOSITORY-ONLY:v2.9:' scripts/version
}
build() {
cd ${srcdir}/${pkgname}-${pkgver}
./autogen.sh
./configure --prefix=/usr --sbindir=/usr/bin
make
}
package() {
cd ${srcdir}/${pkgname}-${pkgver}
make DESTDIR=${pkgdir} install
}
| true
|
8f91ec3a24c748a57186ec472ce878fd97b3ed2e
|
Shell
|
OpenInkpot-archive/iplinux-fontconfig
|
/debian/fontconfig.postinst
|
UTF-8
| 791
| 3.609375
| 4
|
[
"HPND-sell-variant",
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/sh
set -e
#DEBHELPER#
if [ "$1" = configure ]; then
# Moving old caches accidentally installed to /usr/var/cache
if [ -d /usr/var/cache/fontconfig ]; then
rm -rf /usr/var/cache/fontconfig :
fi
if [ -d /usr/var/cache ]; then
rmdir /usr/var/cache
fi
if [ -d /usr/var ]; then
rmdir /usr/var
fi
# Removing accidentally installed Makefile* from /var/cache/fontconfig
if [ -f /var/cache/fontconfig/Makefile ]; then
rm -f /var/cache/fontconfig/Makefile* 2>/dev/null || :
fi
# Removing obsolete cache files
rm -f /var/cache/fontconfig/*.cache-2 || :
printf "Regenerating fonts cache... "
fc-cache -s -f -v 1>/var/log/fontconfig.log 2>&1 || (printf "failed.\nSee /var/log/fontconfig.log for more information.\n"; exit 1)
printf "done.\n"
fi
| true
|
356a315f9c64578deee3d9dbf3430c1227395f78
|
Shell
|
jqhan/devops-demo
|
/.githooks/pre-commit
|
UTF-8
| 1,845
| 3.53125
| 4
|
[] |
permissive
|
#!/bin/bash
all_modules=($(./gradlew projects | grep -oh "':.*" | tr -d \'\:'\r'))
touched_added_files=($(git diff --name-only --cached))
touched_modules=()
for path in ${touched_added_files[@]}; do
splitPath=(${path//\// }) # Splits on /
module=${splitPath[0]}
if [[ " ${all_modules[@]} " =~ " ${module} " ]]; then
# if module exists in all_modules
touched_modules+=(${module})
fi
done
# Remove duplicates
touched_modules=($(for v in "${touched_modules[@]}"; do echo "$v"; done | sort | uniq | xargs))
if [[ -z "${PRECOMMITALLTESTS}" ]]; then
# environment var is not set
PERFORM_ALL_TESTS=false
else
PERFORM_ALL_TESTS="${PRECOMMITALLTESTS}"
fi
for touched_module in ${touched_modules[@]}; do
echo "--------------------------------------"
echo "Running SpotBugs on module: ${touched_module}"
./gradlew ${touched_module}:spotbugsMain
echo "--------------------------------------"
echo "Running PMD on module: ${touched_module}"
./gradlew ${touched_module}:pmdMain
echo "--------------------------------------"
echo "Running checkstyle on module: ${touched_module}"
./gradlew ${touched_module}:checkstyleMain
if [ ${PERFORM_ALL_TESTS} == true ]; then
echo "--------------------------------------"
echo "Running unit tests on module: ${touched_module}"
./gradlew ${touched_module}:test
echo "--------------------------------------"
echo "Running mutation tests on module: ${touched_module}"
./gradlew ${touched_module}:pitest
fi
done
if [ ! ${#touched_modules[@]} -eq 0 ]; then
echo "Uploading HTML reports to AWS.."
./generate-html.sh &> /dev/null
echo "--------------------------------------"
echo "Link to test reports: http://demo-reports-devops-demo.s3-website-eu-west-1.amazonaws.com/"
echo "--------------------------------------"
fi
lolcommits --capture
| true
|
067d26006a41b61caeb8fb6e85f4d9cf4551e100
|
Shell
|
ravivcohen/dotfiles
|
/init/10_osx.sh
|
UTF-8
| 1,203
| 3.359375
| 3
|
[] |
no_license
|
is_osx || return 1
if [[ ! "$(type -P brew)" ]]; then
e_header "Installing Homebrew"
true | /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
e_header "Installing Homebrew pks on first run"
fi
# Exit if, for some reason, Homebrew is not installed.
[[ ! "$(type -P brew)" ]] && e_error "Homebrew failed to install." && return 1
# Just incase we set the path again over here.
# APPLE, Y U PUT /usr/bin B4 /usr/local/bin?!
PATH=/usr/local/bin:$(path_remove /usr/local/bin)
PATH=/usr/local/sbin:$(path_remove /usr/local/sbin)
export PATH
e_header "Brew DR"
brew doctor
e_header "Brew update"
# Make sure we’re using the latest Homebrew
brew update
e_header "Brew upgrade"
#Upgrade any already-installed formulae
brew upgrade
# Tap needed repo's
taps=("homebrew/cask-versions" "homebrew/cask-fonts" "universal-ctags/universal-ctags")
taps=($(setdiff "${taps[*]}" "$(brew tap)"))
if (( ${#taps[@]} > 0 )); then
for a_tap in "${taps[@]}"; do
e_header "Tapping Homebrew: $a_tap"
brew tap $a_tap
done
fi
e_header "Running OSX Global Config"
# OSX Config. Can safely be run everytime.
source $DOTFILES_HOME/conf/osx/conf_osx_global.sh
| true
|
d6104a0cad54d1a74b749d5bf9ad62e3a5c3b221
|
Shell
|
tqvarnst/idc-quarkus-labs
|
/src/main/scripts/oc-clean.sh
|
UTF-8
| 1,789
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
script_dir=$(dirname $0)
############
## Settings
############
container_runtime=podman
postgresql_pod_name=postgresql
spring_project=spring
spring_local_image_name=spring/todo
spring_pod_name=spring-boot
spring_pod_image=${spring_project}/todo
quarkus_jvm_project=quarkus-jvm
quarkus_jvm_local_image_name=quarkus-jvm/todo
quarkus_jvm_pod_name=quarkus-jvm
quarkus_jvm_pod_image=${quarkus_jvm_project}/todo
quarkus_native_project=quarkus-native
quarkus_native_local_image_name=quarkus-native/todo
quarkus_native_pod_name=quarkus-native
quarkus_native_pod_image=${quarkus_native_project}/todo
go_project=go
project_cpu_limit=8
project_mem_limit=2Gi
pod_cpu_limit=100m
spring_pod_memory_limit=256M
quarkus_jvm_pod_memory_limit=128M
quarkus_native_pod_memory_limit=50M
psql_db_name=todo-db
psql_db_host=localhost
psql_db_user=todo
psql_db_password=todo
if [ "$(uname)" == "Darwin" ]; then
container_runtime=docker
fi
function verify_oc_cli {
if ! which oc > /dev/null 2>&1; then
echo "You need to have the oc cli on the path"
exit 1
fi
}
function check_authenticated {
if ! oc whoami > /dev/null 2>&1; then
echo "You need to be authenticated to a cluster"
exit 2
fi
}
function check_admin {
local whoami=$(oc whoami)
if ! [ "$whoami" = "opentlc-mgr" ] || [ "$whoami" = "kube:admin" ]; then
echo "You need to be authenticated as an admin"
exit 3
fi
}
function delete_project {
oc delete project $1 > /dev/null 2>&1 || true
}
verify_oc_cli
check_authenticated
check_admin
delete_project ${spring_project}
delete_project ${quarkus_jvm_project}
delete_project ${quarkus_native_project}
delete_project ${go_project}
# Expose the registry
#oc policy add-role-to-user registry-editor user0
#sleep 2
#
| true
|
6864663a9830bd8abed1569f785a1ae5da883ed7
|
Shell
|
strigazi/athena
|
/InnerDetector/InDetCalibAlgs/PixelCalibAlgs/share/DeadMapBuilder.sh
|
UTF-8
| 2,737
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/zsh
#CMT_SETUP_PATH="/afs/cern.ch/user/i/idcalib/w0/Pixel_Calibration/cmthome"
#CMT_SETUP_TAG="$AtlasVersion,$AtlasProject,32"
CMT_SETUP_TAG="$AtlasVersion,$AtlasProject,here"
#PACKAGE_SETUP_PATH="/afs/cern.ch/user/i/idcalib/w0/Pixel_Calibration/ATHENA/$AtlasProject-$AtlasVersion/cmt"
PACKAGE_SETUP_PATH="/afs/cern.ch/work/a/atlpixdq/Pixel_Calibration/ATHENA/$AtlasProject-$AtlasVersion"
#maintaglong="PixMapLong-RUN2-DATA-RUN1-UPD1-00"
#maintagshor="PixMapShort-RUN2-DATA-RUN1-UPD1-00"
#maintaglong="PixMapLong-DATA-RUN2-000-00"
#maintagshor="PixMapShort-DATA-RUN2-000-00"
#upd4taglong="PixMapLong-BLKP-UPD4-000-02"
#upd4tagshor="PixMapShort-BLKP-UPD4-000-03"
# used in BLK
maintaglong="PixMapLong-RUN2-DATA-RUN1-UPD1-00"
maintagshor="PixMapShort-RUN2-DATA-RUN1-UPD1-00"
# used in ES1
maintaglong="PixMapLong-RUN2-ES1C-UPD1-000-06"
maintagshor="PixMapShort-RUN2-ES1C-UPD1-000-06"
workdir=`pwd`
if [[ $# -lt 2 ]]; then
echo 1>&2 "Usage: $0 <IOV_RMin> <filename>"
exit 127
fi;
updateDeadMaps.exe ${2} DeadMap_DeadMapWriter_run${1}.root
echo;
echo "Running validation"
echo;
cat > $workdir/DeadMap_sqlite_validation_run${1}.py << EOF
PixMapFilename = 'DeadMap_DeadMapWriter_run${1}.root'
PixelStatus = 2049
ListSpecialPixels = False
ReferenceDB = "default" #"oracle", "sqlite"
#ReferenceDB = "sqlite"
ReferenceRun = int("${1}")
#ReferenceTag = 'PixMapShort-000-00'
#ReferenceLongTag = 'PixMapLong-000-00'
ReferenceTag = '${maintagshor}'
ReferenceLongTag = '${maintaglong}'
include( "PixelCalibAlgs/PixMapValidation.py" )
EOF
athena.py $workdir/DeadMap_sqlite_validation_run${1}.py
cat > $workdir/DeadMapDBWriter_run${1}.sh << EOF
#!/bin/zsh
#cd $CMT_SETUP_PATH
#source setup.sh -tag=$CMT_SETUP_TAG
#cd $PACKAGE_SETUP_PATH
#source setup.sh
export STAGE_SVCCLASS="t0atlas"
export AtlasSetup=/afs/cern.ch/atlas/software/dist/AtlasSetup
alias asetup='source \$AtlasSetup/scripts/asetup.sh'
cd $PACKAGE_SETUP_PATH
asetup $CMT_SETUP_TAG
cd $workdir
cat > $workdir/DeadMapDBWriter_run${1}.py << EOG
PixMapFilename = 'DeadMap_DeadMapWriter_run${1}.root'
PixelStatus = 2049
ListSpecialPixels = False
OutputRun = int("${1}")
OutputLB = 1
doValidate = False
#OutputTag = 'PixMapShort-000-00'
#OutputLongTag = 'PixMapLong-000-00'
OutputTag = '${maintagshor}'
OutputLongTag = '${maintaglong}'
#include( "PixelCalibAlgs/PixMapDBWriter.py" )
include( "PixelCalibAlgs/DeadMapDBWriter.py" )
PixMapDBWriter.CalculateOccupancy = False
EOG
athena $workdir/DeadMapDBWriter_run${1}.py
rm $workdir/DeadMapDBWriter_run${1}.py
EOF
chmod u+x $workdir/DeadMapDBWriter_run${1}.sh
echo;
echo "$workdir/DeadMapDBWriter_run${1}.sh"
echo;
$workdir/DeadMapDBWriter_run${1}.sh
export STAGE_SVCCLASS=$STAGE_SVCCLASS_TMP
echo "Job finished"
| true
|
22e57b652c10ffad45f0bc4c64423fcbdbb5bca2
|
Shell
|
RunestoneInteractive/RunestoneServer
|
/makeRelease.sh
|
UTF-8
| 629
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
set -e
if [ $# -eq 0 ]
then
echo "Usage: makeRelease <release no>"
exit
fi
while true; do
read -p "Did you update/commit the version in pyproject.toml " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
while true; do
read -p "Did you update motd and VERSION? " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
echo "tagging this release and pushing to github"
git tag -a $1 -m 'tag new version'
git push --follow-tags
gh release create v$1 --generate-notes
| true
|
8d67ccd6e5bd6de0ac91840c6c0e199525ddbd56
|
Shell
|
Boy-ming/Alic_env
|
/localhost/lnmp.sh
|
UTF-8
| 657
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ${UID} -eq 0 ]; then
case $1 in
start)
/etc/init.d/nginx start #nginx
/etc/init.d/mysql start #mysql
/etc/init.d/php7.0-fpm start #php
;;
stop)
/etc/init.d/nginx stop #nginx
/etc/init.d/mysql stop #mysql
/etc/init.d/php7.0-fpm stop #php
;;
restart)
/etc/init.d/nginx restart #nginx
/etc/init.d/mysql restart #mysql
/etc/init.d/php7.0-fpm restart #php
;;
*)
echo "Usage: $0 (start|stop|restart)"
;;
esac
exit 0
else
echo "没有权限(是不是少了sudo呢)~~~"
fi
| true
|
ab19c2e7986defa8ca249c0c8e991f1a28c96593
|
Shell
|
saifulriza/3-1
|
/3-1.sh
|
UTF-8
| 1,050
| 3.515625
| 4
|
[] |
no_license
|
#!bin/bash
export PROJECT_ID=$(gcloud config get-value project)
export BUCKET_NAME=$(gcloud config get-value project)-bucket
mkdir gcf_hello_world
cd gcf_hello_world
cat <<EOF > index.js
/**
* Background Cloud Function to be triggered by Pub/Sub.
* This function is exported by index.js, and executed when
* the trigger topic receives a message.
*
* @param {object} data The event payload.
* @param {object} context The event metadata.
*/
exports.helloWorld = (data, context) => {
const pubSubMessage = data;
const name = pubSubMessage.data
? Buffer.from(pubSubMessage.data, 'base64').toString() : "Hello World";
console.log(`My Cloud Function: ${name}`);
};
EOF
gsutil mb -p $PROJECT_ID gs://$BUCKET_NAME
gcloud functions deploy helloWorld \
--stage-bucket $BUCKET_NAME \
--trigger-topic hello_world \
--runtime nodejs8
gcloud functions describe helloWorld
DATA=$(printf 'Hello World!'|base64) && gcloud functions call helloWorld --data '{"data":"'$DATA'"}'
gcloud functions logs read helloWorld
| true
|
e263ddde6eb67fee86a0ea5ca5f2f91c46add65c
|
Shell
|
Hadryan/manythings
|
/scripts/deploy.sh
|
UTF-8
| 929
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
function help() {
echo "Please specify which service you would like to deploy"
exit -1
}
IMAGE_NAME=""
IMAGE_PATH=""
VERSION="latest"
SERVICE="manythings-webapp"
# Compose local properties with flag options
while [ "$#" -ne 0 ] ; do
case "$1" in
deploy)
IMAGE_NAME="${DOCKER_USERNAME}/${2:-$SERVICE}:${3:-$VERSION}"
IMAGE_PATH="./services/webapp"
echo ""
echo "Image to deploy: ${IMAGE_NAME} ?"
echo ""
enterToContinue
shift
;;
-h|--help)
help
shift
;;
*)
shift
;;
esac
done
echo "Deploying image ${IMAGE_NAME}..."
docker build -t $IMAGE_NAME $IMAGE_PATH
docker login --username $DOCKER_USERNAME --password $DOCKER_PASSWORD
docker push $IMAGE_NAME
echo "Successfully push image to docker"
| true
|
72a7a2d9a39854cee510c94c2899aea85f521501
|
Shell
|
doumbia-alt/commonvoice-fr
|
/DeepSpeech/import_m-ailabs.sh
|
UTF-8
| 566
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
pushd $HOME/ds/
if [ "${ENGLISH_COMPATIBLE}" = "1" ]; then
IMPORT_AS_ENGLISH="--normalize"
fi;
if [ ! -f "/mnt/extracted/data/M-AILABS/${M_AILABS_LANG}/${M_AILABS_LANG}_train.csv" ]; then
if [ ! -z "${M_AILABS_SKIP}" ]; then
SKIPLIST="--skiplist ${M_AILABS_SKIP}"
fi;
python bin/import_m-ailabs.py ${IMPORT_AS_ENGLISH} \
${SKIPLIST} \
--language ${M_AILABS_LANG} \
${IMPORTERS_VALIDATE_LOCALE} \
/mnt/extracted/data/M-AILABS/
fi;
popd
| true
|
816ee2aa21a20fa2d55a55ab0d4202793f50f071
|
Shell
|
IslamAlam/docker-xubuntu
|
/config/skel/.xsessionrc
|
UTF-8
| 538
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
export XDG_CACHE_HOME=${HOME:?}/.cache
export XDG_CONFIG_DIRS=/etc/xdg/xdg-xubuntu:/etc/xdg
export XDG_CONFIG_HOME=${HOME:?}/.config
export XDG_CURRENT_DESKTOP=XFCE
export XDG_DATA_DIRS=/usr/share/xubuntu:/usr/share/xfce4:/usr/local/share:/usr/share
export XDG_DATA_HOME=${HOME:?}/.local/share
export XDG_MENU_PREFIX=xfce-
export XDG_RUNTIME_DIR=/run/user/${UNPRIVILEGED_USER_UID:?}
export XDG_SESSION_DESKTOP=xubuntu
export XDG_SESSION_TYPE=x11
if [ "${ENABLE_XDUMMY:?}" != 'true' ]; then
export VGL_DISPLAY=${DISPLAY:?}
fi
| true
|
9818a97cbfc2e697a7a2ec789399cc847d940651
|
Shell
|
josephworks/ultimatelinux.sh
|
/makedeb.sh
|
UTF-8
| 502
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
echo "Getting BuildTools"
sudo apt install tree
# Creating Directories
mkdir build
cd build
mkdir DEBIAN
mkdir usr
cd usr
mkdir local
cd local
mkdir bin
cd ../../../
# Move files over for building
cp ./uls.sh ./build/usr/local/bin
cp ./ulsrepeat.sh ./build/usr/local/bin
cp ./ultimatelinux.sh ./build/usr/local/bin
cp ./control ./build/DEBIAN/
# BUILDING ARTIFACT
dpkg-deb --build build
mv build.deb ./ultimatelinux.deb
# Map all build files
ls
tree
# Delete Temporary Build Workspace
rm -rf build
| true
|
0ead3eb4e134458f48530884b30e935b581da357
|
Shell
|
xkszltl/Roaster
|
/pkgs/utils/fpm/install.sh
|
UTF-8
| 4,043
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ================================================================
# Install Locally-built Package
# ================================================================
set -e
# ----------------------------------------------------------------
# Clean up install directory
# ----------------------------------------------------------------
rm -rf "$INSTALL_ABS"
# ----------------------------------------------------------------
# Identify package path
# ----------------------------------------------------------------
[ "$PKG_NAME" ] || export PKG_NAME="roaster-$(basename $(pwd) | tr '[:upper:]' '[:lower:]')"
case "$DISTRO_ID" in
"centos" | "fedora" | "rhel" | 'scientific')
export PKG_TYPE='rpm'
;;
"debian" | "ubuntu")
export PKG_TYPE='deb'
;;
*)
export PKG_TYPE='sh'
;;
esac
export PKG_PATH="$(find "$INSTALL_ROOT/.." -maxdepth 1 -type f -name "$PKG_NAME[\\-_]*.$PKG_TYPE" | xargs realpath -e)"
if [ ! "$PKG_PATH" ]; then
echo "[ERROR] No package file found for \"$PKG_NAME\"."
echo "[ERROR] Might have error occured during packaging."
exit 1
elif [ $(wc -l <<<"$PKG_NAME") -gt 1 ]; then
echo "[ERROR] Multiple candidates detected:"
sed 's/^/[ERROR] /' <<<"$PKG_PATH"
echo "[ERROR] Please update the search condition to narrow it down."
exit 1
fi
echo '----------------------------------------------------------------'
echo " Package Summary"
echo '----------------------------------------------------------------'
case "$PKG_TYPE" in
"rpm")
rpm -qlp "$PKG_PATH" | sed 's/^/ /'
;;
"deb")
dpkg -c "$PKG_PATH" | sed 's/^/ /'
;;
esac
echo '----------------------------------------------------------------'
ls -lh "$PKG_PATH" | sed 's/^/ /'
echo '----------------------------------------------------------------'
# ----------------------------------------------------------------
# Install
# ----------------------------------------------------------------
# Note:
# - yum/dnf skip install with exit code 0 when package with the same version exist.
# - apt-get install regardless in this case.
# - apt-get does not have reinstall command.
case "$PKG_TYPE" in
"rpm")
PKG_YUM_SEQ="reinstall install downgrade update"
rpm -q "$PKG_NAME" || PKG_YUM_SEQ="install"
PKG_YUM_CMD="$(which dnf >/dev/null 2>&1 && echo 'dnf' || echo 'yum')"
# Remove legacy.
sudo "$PKG_YUM_CMD" remove -y "$(sed 's/^[^\-]*\-/codingcafe\-/' <<< "$PKG_NAME")" || true
for i in $PKG_YUM_SEQ _; do
[ "$i" != '_' ]
echo "[INFO] Trying with \"$PKG_YUM_CMD $i\"."
sudo "$PKG_YUM_CMD" "$i" -y "$PKG_PATH" && break
echo "[INFO] Does not succeed with \"$PKG_YUM_CMD $i\"."
done
;;
"deb")
PKG_APT_SEQ="install reinstall upgrade"
# Remove legacy.
sudo DEBIAN_FRONTEND=noninteractive apt-get -o 'DPkg::Lock::Timeout=3600' remove -y "$(sed 's/^[^\-]\-/codingcafe\-/' <<< "$PKG_NAME")" || true
for i in $PKG_APT_SEQ _; do
[ "$i" != '_' ]
echo "[INFO] Trying with \"apt-get $i\"."
if [ "$i" = "reinstall" ]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get -o 'DPkg::Lock::Timeout=3600' remove -y "$PKG_NAME" && sudo apt-get -o 'DPkg::Lock::Timeout=3600' install -y "$PKG_PATH" && break
else
sudo DEBIAN_FRONTEND=noninteractive apt-get -o 'DPkg::Lock::Timeout=3600' "$i" -y "$PKG_PATH" && break
fi
echo "[INFO] Does not succeed with \"apt-get $i\"."
done
sudo DEBIAN_FRONTEND=noninteractive apt-get -o 'DPkg::Lock::Timeout=3600' install -fy
;;
esac
# ----------------------------------------------------------------
# Publish
# ----------------------------------------------------------------
export RPM_PUB_DIR='/var/www/repos/codingcafe'
if [ -d "$RPM_PUB_DIR" ]; then
pushd "$RPM_PUB_DIR"
sudo mkdir -p "rhel$DISTRO_VERSION_ID/$(uname -m)"
pushd "$_"
find . -maxdepth 1 -name "$PKG_NAME-*" -type f | xargs sudo rm -f
sudo install -m664 -t . "$PKG_PATH"
popd
popd
fi
| true
|
d69063cc06d56d0f81e36b421cb63277fd18d6be
|
Shell
|
krunalpatel5456/krunal
|
/mathscript.sh
|
UTF-8
| 601
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# This script asks the user for 2 numbers, then does then
# 5 basic arithmetic operations on those number
read -p "Enter a number " firstnum
read -p " Enter a second number " secondnum
total=$(($firstnum + $secondnum))
echo "$firstnum + $secondnum equals $total"
total=$(($firstnum - $secondnum))
echo "$firstnum - $secondnum equals $total"
total=$(($firstnum * $secondnum))
echo "$firstnum * $secondnum equals $total"
total=$(($firstnum / $secondnum))
echo "$firstnum / $secondnum equals $total"
remainder=$(($firstnum % $secondnum))
echo "$firstnum % $secondnum equals $remainder"
| true
|
e310128615334e38037209c571c442b4e6e5c436
|
Shell
|
7u83/actube
|
/ssl/mkrootca.sh
|
UTF-8
| 2,502
| 3
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
echo Creating Root CAs
KEYSIZE=2048
CONFIG=openssl.cnf
ROOT_CA_DIR=./root-ca
INT_CA_DIR=./intermediate-ca
DAYS=6000
if [ ! -e $ROOT_CA_DIR ]
then
echo "Initializing root-ca"
mkdir $ROOT_CA_DIR
echo '1000' > $ROOT_CA_DIR/serial
touch $ROOT_CA_DIR/index.txt
fi
if [ ! -e $INT_CA_DIR ]
then
echo "Initializing intermediate-ca"
mkdir $INT_CA_DIR
echo '1000' > $INT_CA_DIR/serial
touch $INT_CA_DIR/index.txt
fi
mkrootca()
{
ROOT_SUBJ=$1
INT_SUBJ=$2
NAME=$3
if [ ! -z $NAME ]
then
PREF="$NAME-"
fi
# Create a self-signed root CA
openssl req -nodes -new -x509 \
-sha1 \
-days ${DAYS} \
-extensions v3_ca \
-newkey rsa:${KEYSIZE} \
-keyout $ROOT_CA_DIR/${PREF}root-ca.key -out $ROOT_CA_DIR/${PREF}root-ca.crt \
-config ${CONFIG} \
-x509 \
-subj "$ROOT_SUBJ"
# Create a key for intermediate CA
openssl genrsa -out $INT_CA_DIR/${PREF}int-ca.key $KEYSIZE
# Create req for intermediate CA
openssl req -sha1 -new -key $INT_CA_DIR/${PREF}int-ca.key -out $INT_CA_DIR/${PREF}int-ca.csr \
-subj "$INT_SUBJ"
# Sign intermediate CA cert using previously created root CA
openssl ca -config ${CONFIG} -batch -keyfile $ROOT_CA_DIR/${PREF}root-ca.key \
-cert $ROOT_CA_DIR/${PREF}root-ca.crt \
-extensions v3_ca -notext -md sha1 -in $INT_CA_DIR/${PREF}int-ca.csr \
-out $INT_CA_DIR/${PREF}int-ca.crt
}
ROOT_SUBJ="/C=DE/ST=Berlin/L=Berlin/O=Cauwersin/CN=7u83.cauwersin.com/emailAddress=7u83@mail.ru"
INT_SUBJ="$ROOT_SUBJ"
mkrootca "$ROOT_SUBJ" "$INT_SUBJ"
ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=Cisco Virtual Wireless LAN Controller/CN=CA-vWLC-AIR-CTVM-K9-080027949DE0/emailAddress=support@vwlc.com"
INT_SUBJ="$ROOT_SUBJ"
mkrootca "$ROOT_SUBJ" "$INT_SUBJ" cisco-ac
#ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=airespace Inc/CN=C1130-f866f2a342fc/emailAddress=support@airespace.com"
#ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=Cisco Systems/CN=C1130-f866f2a342fc/emailAddress=support@cisco.com"
#ROOT_SUBJ="/ST=California/L=San Jose/C=US/O=Cisco Systems/CN=C1130-f866f2a342fc/emailAddress=support@cisco.com"
#ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=Cisco Systems/CN=C1200-c80aa9cd7fa4/emailAddress=support@cisco.com"
#ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=Cisco Systems/CN=Cisrot/emailAddress=support@cisco.com"
#ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=Cisco Systems/CN=C1130-c80aa9cd7fa4/emailAddress=support@cisco.com"
ROOT_SUBJ="/C=US/ST=California/L=San Jose/O=Cisco Systems/CN=C1130-0019dbe09327/emailAddress=support@cisco.com"
INT_SUBJ="$ROOT_SUBJ"
mkrootca "$ROOT_SUBJ" "$INT_SUBJ" cisco-ap
| true
|
db89e0e80d38749fa67496a9947a3acea5b311cc
|
Shell
|
Murd4/DEVSCOLA
|
/tree.sh
|
UTF-8
| 785
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "
**********************
Command-Line Assesment:
1. Type 'tree' to create a folder tree.
2. Once created, type 'find' to find a file inside it.
"
read -p "Introduce your option: " opt1
if [[ $opt1 == "tree" ]]; then
read -p "Introduce the number of folders" folder_num
a=0
b=1
while [ $a -lt $folder_num ]
do
read -p "Introduce the name of folder ${b}" folder[$a]
((a+=1))
((b+=1))
done
IFS=/
mkdir -p "${folder[*]})"
touch "${folder[*]})/foo"
echo -e " -> Folder tree created"
read -p "Do you want to display the tree? [y/n] (It might not work if 'tree' is not installed.)" opt2
if [[ $opt2 == "y" ]]; then
tree ${folder[0]}
fi
elif [[ $opt1 == "find" ]]; then
read -p "Introduce a search term (i.e foo) : " word1
find . -name $word1
fi
| true
|
e818d7ecb4dcd6b83308b85e605362444dcf4855
|
Shell
|
gregory1506/Assignment3
|
/Assignment3/installserver.sh
|
UTF-8
| 1,474
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
app_path=/srv/app
mkdir $app_path
cp *.py $app_path
# install python3 stuff
apt -y update
apt -y install python3-bottle
apt -y install python3-pip
pip3 install azure-storage
pip3 install azure-servicebus
pip3 install aiohttp
# create worker service
touch /etc/systemd/system/worker.service
printf '[Unit]\nDescription=worker Service\nAfter=rc-local.service\n' >> /etc/systemd/system/worker.service
printf '[Service]\nWorkingDirectory=%s\n' $app_path >> /etc/systemd/system/worker.service
printf 'ExecStart=/usr/bin/python3 %s/worker.py\n' $app_path >> /etc/systemd/system/worker.service
printf 'ExecReload=/bin/kill -HUP $MAINPID\nKillMode=process\nRestart=on-failure\n' >> /etc/systemd/system/worker.service
printf '[Install]\nWantedBy=multi-user.target\nAlias=worker.service' >> /etc/systemd/system/worker.service
# create server service
touch /etc/systemd/system/server.service
printf '[Unit]\nDescription=server Service\nAfter=rc-local.service\n' >> /etc/systemd/system/server.service
printf '[Service]\nWorkingDirectory=%s\n' $app_path >> /etc/systemd/system/server.service
printf 'ExecStart=/usr/bin/python3 %s/server.py\n' $app_path >> /etc/systemd/system/server.service
printf 'ExecReload=/bin/kill -HUP $MAINPID\nKillMode=process\nRestart=on-failure\n' >> /etc/systemd/system/server.service
printf '[Install]\nWantedBy=multi-user.target\nAlias=server.service' >> /etc/systemd/system/server.service
systemctl start worker
systemctl start server
| true
|
06bb1fbce4b062a70f309432e206e73c52da4aea
|
Shell
|
fabric8-analytics/fabric8-analytics-common
|
/vscode-visual-tests/runtest.sh
|
UTF-8
| 333
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -ex
function prepare_venv() {
virtualenv -p python3 venv && source venv/bin/activate && python3 `which pip3` install -r requirements.txt
}
[ "$NOVENV" == "1" ] || prepare_venv || exit 1
export VS_CODE_VERSION=1.32
PYTHONDONTWRITEBYTECODE=1 python3 `which behave` --tags=-skip -D dump_errors=true @feature_list.txt $@
| true
|
04ab67ba435e11d2f71752a5e941f1ddf6c3611e
|
Shell
|
Klebert-Engineering/deep-spell-9
|
/publish.sh
|
UTF-8
| 630
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function zipdir {
rm $1.zip
zip -r $1.zip $1 -x "*.DS_Store"
}
exportdir="$(head -n1 README.md)-demo"
exportpath="../$exportdir"
rm -rf $exportpath
find . -name __pycache__ | xargs -n1 rm -rf
mkdir $exportpath
mkdir $exportpath/modules
mkdir $exportpath/models
cp -r modules/deepspell $exportpath/modules
cp -r modules/deepspell_service $exportpath/modules
cp models/deepsp_extra-v2_na_lr003_dec50_bat3192_128-128-128.* $exportpath/models
cp models/deepsp_discr-v3_na-lower_lr003_dec50_bat3072_fw128-128_bw128.* $exportpath/models
cp service.* $exportpath
cp README.md $exportpath
cd ..
zipdir $exportdir
| true
|
b1a11eebf74b9b71d6d9e6f901997153c1fffb4c
|
Shell
|
shahed3184/Audio-Album-Shell-Script
|
/albumScripts/Balam-Prem_Shikari.sh
|
UTF-8
| 1,694
| 2.9375
| 3
|
[] |
no_license
|
# ------------- SCRIPT ------------- #
#!/bin/bash
scriptDirectory="${0##*/}"
# Remove .sh from file name
#fileDirectory=${scriptDirectory:: - 3}
#fileDirectory${scriptDirectory::${#scriptDirectory}-3}
fileDirectory=${scriptDirectory%???}
cd ..
mkdir files
cd files
IFS='-' read -ra array <<< "$fileDirectory"
for element in "${array[@]}"
do
echo "creating dir $element"
mkdir $element
cd $element
done
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/02%20-%20Julie%20-%20Meghla%20Bikel%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/09%20-%20Ovi%20-%20Khacha%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/01%20-%20Balam%20-%20Prem%20Shikari%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/04%20-%20Zahid%20Pintu%20-%20Bhalo%20Bhalo%20Lage%20Na%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/07%20-%20Balam%20-%20Bhoboghure%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/10%20-%20Zahid%20Pintu%20-%20Bhurre%20Bhuriya%20Kuri%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/08%20-%20Ovi%20-%20Rang%20Dila%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/06%20-%20Zahid%20Pintu%20-%20Mon%20Majhi%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/03%20-%20Ovi%20-%20Doyal%20(music.com.bd).mp3"
wget -N "http://download.music.com.bd/Music/B/Balam/Prem%20Shikari/05%20-%20Julie%20-%20Golper%20Dinga%20(music.com.bd).mp3"
| true
|
f44e247d5a4153c7af4c716733ab05beef15d96a
|
Shell
|
yosoyfunes/warp-engine
|
/.warp/setup/redis/redis.sh
|
UTF-8
| 5,898
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash +x
warp_message ""
warp_message_info "Configuring the Redis Service"
PATH_CONFIG_REDIS='./.warp/docker/config/redis'
MSJ_REDIS_VERSION_HUB=1 # True
while : ; do
respuesta_redis_cache=$( warp_question_ask_default "Do you want to add a service for Redis Cache? $(warp_message_info [Y/n]) " "Y" )
if [ "$respuesta_redis_cache" = "Y" ] || [ "$respuesta_redis_cache" = "y" ] || [ "$respuesta_redis_cache" = "N" ] || [ "$respuesta_redis_cache" = "n" ] ; then
break
else
warp_message_warn "wrong answer, you must select between two options: $(warp_message_info [Y/n]) "
fi
done
if [ "$respuesta_redis_cache" = "Y" ] || [ "$respuesta_redis_cache" = "y" ]
then
if [ $MSJ_REDIS_VERSION_HUB = 1 ] ; then
warp_message_info2 "You can check the Redis versions available here: $(warp_message_info '[ https://hub.docker.com/_/redis/ ]')"
MSJ_REDIS_VERSION_HUB=0 # False
echo "#Config Redis" >> $ENVIRONMENTVARIABLESFILESAMPLE
fi
resp_version_cache=$( warp_question_ask_default "What version of Redis cache do you want to use? $(warp_message_info [5.0]) " "5.0" )
warp_message_info2 "Selected Redis Cache version: $resp_version_cache, in the internal port 6379 $(warp_message_bold 'redis-cache:6379')"
cache_config_file_cache=$( warp_question_ask_default "Set Redis configuration file: $(warp_message_info [./.warp/docker/config/redis/redis.conf]) " "./.warp/docker/config/redis/redis.conf" )
warp_message_info2 "Selected configuration file: $cache_config_file_cache"
cat $PROJECTPATH/.warp/setup/redis/tpl/redis_cache.yml >> $DOCKERCOMPOSEFILESAMPLE
echo "REDIS_CACHE_VERSION=$resp_version_cache" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "REDIS_CACHE_CONF=$cache_config_file_cache" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "" >> $ENVIRONMENTVARIABLESFILESAMPLE
# Control will enter here if $PATH_CONFIG_REDIS doesn't exist.
if [ ! -d "$PATH_CONFIG_REDIS" ]; then
cp -R ./.warp/setup/redis/config/redis $PATH_CONFIG_REDIS
fi
warp_message ""
fi;
while : ; do
respuesta_redis_session=$( warp_question_ask_default "Do you want to add a service for Redis Session? $(warp_message_info [Y/n]) " "Y" )
if [ "$respuesta_redis_session" = "Y" ] || [ "$respuesta_redis_session" = "y" ] || [ "$respuesta_redis_session" = "N" ] || [ "$respuesta_redis_session" = "n" ] ; then
break
else
warp_message_warn "wrong answer, you must select between two options: $(warp_message_info [Y/n]) "
fi
done
if [ "$respuesta_redis_session" = "Y" ] || [ "$respuesta_redis_session" = "y" ]
then
if [ $MSJ_REDIS_VERSION_HUB = 1 ] ; then
warp_message_info2 "You can check the Redis versions available here: $(warp_message_info '[ https://hub.docker.com/_/redis/ ]')"
MSJ_REDIS_VERSION_HUB=0 # False
echo "#Config Redis" >> $ENVIRONMENTVARIABLESFILESAMPLE
fi
resp_version_session=$( warp_question_ask_default "What version of Redis Session do you want to use? $(warp_message_info [5.0]) " "5.0" )
warp_message_info2 "Selected version of Redis Session: $resp_version_session, in the internal port 6379 $(warp_message_bold 'redis-session:6379')"
cache_config_file_session=$( warp_question_ask_default "Set Redis configuration file: $(warp_message_info [./.warp/docker/config/redis/redis.conf]) " "./.warp/docker/config/redis/redis.conf" )
warp_message_info2 "Selected configuration file: $cache_config_file_session"
cat $PROJECTPATH/.warp/setup/redis/tpl/redis_session.yml >> $DOCKERCOMPOSEFILESAMPLE
echo "REDIS_SESSION_VERSION=$resp_version_session" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "REDIS_SESSION_CONF=$cache_config_file_session" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "" >> $ENVIRONMENTVARIABLESFILESAMPLE
# Control will enter here if $PATH_CONFIG_REDIS doesn't exist.
if [ ! -d "$PATH_CONFIG_REDIS" ]; then
cp -R ./.warp/setup/redis/config/redis $PATH_CONFIG_REDIS
fi
warp_message ""
fi;
while : ; do
respuesta_redis_fpc=$( warp_question_ask_default "Do you want to add a service for Redis FPC? $(warp_message_info [Y/n]) " "Y" )
if [ "$respuesta_redis_fpc" = "Y" ] || [ "$respuesta_redis_fpc" = "y" ] || [ "$respuesta_redis_fpc" = "N" ] || [ "$respuesta_redis_fpc" = "n" ] ; then
break
else
warp_message_warn "wrong answer, you must select between two options: $(warp_message_info [Y/n]) "
fi
done
if [ "$respuesta_redis_fpc" = "Y" ] || [ "$respuesta_redis_fpc" = "y" ]
then
if [ $MSJ_REDIS_VERSION_HUB = 1 ] ; then
warp_message_info2 "You can check the Redis versions available here: $(warp_message_info '[ https://hub.docker.com/_/redis/ ]')"
MSJ_REDIS_VERSION_HUB=0 # False
#echo "#Config Redis" >> $ENVIRONMENTVARIABLESFILESAMPLE
fi
resp_version_fpc=$( warp_question_ask_default "What version of Redis FPC do you want to use? $(warp_message_info [5.0]) " "5.0" )
warp_message_info2 "Selected Redis FPC version: $resp_version_fpc, in the internal port 6379 $(warp_message_bold 'redis-fpc:6379')"
cache_config_file_fpc=$( warp_question_ask_default "Set Redis configuration file: $(warp_message_info [./.warp/docker/config/redis/redis.conf]) " "./.warp/docker/config/redis/redis.conf" )
warp_message_info2 "Selected configuration file: $cache_config_file_fpc"
cat $PROJECTPATH/.warp/setup/redis/tpl/redis_fpc.yml >> $DOCKERCOMPOSEFILESAMPLE
echo "REDIS_FPC_VERSION=$resp_version_fpc" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "REDIS_FPC_CONF=$cache_config_file_fpc" >> $ENVIRONMENTVARIABLESFILESAMPLE
echo "" >> $ENVIRONMENTVARIABLESFILESAMPLE
# Control will enter here if $PATH_CONFIG_REDIS doesn't exist.
if [ ! -d "$PATH_CONFIG_REDIS" ]; then
cp -R ./.warp/setup/redis/config/redis $PATH_CONFIG_REDIS
fi
warp_message ""
fi;
| true
|
08528a5f4c35b43368098c3810d3e2aa54a3acce
|
Shell
|
wework/ray
|
/packages/core/tools/build_chromatic.sh
|
UTF-8
| 203
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
if [ "${CIRCLE_BRANCH}" != "master" ];
then
npx chromatic test "${@}"
else
# We know any changes that make it to master *must* have been approved
npx chromatic test --auto-accept-changes "${@}"
fi
| true
|
0651c170550efa0687a7bf159711d49b795548f5
|
Shell
|
kitzy/rtrouton_scripts
|
/rtrouton_scripts/enable_external_network_adapter/enable_external_network_adapter.sh
|
UTF-8
| 1,826
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Checks to see if the Mac is either a MacBook Pro Retina or MacBook Air
# If it's either of these machines, the script will then check for External Network Adapters
# If either adapter is present, it will add the adapter to network services
# Resolves an issue with USB & Thunderbolt Ethernet adapters with DeployStudio 1.6.3
mbpr=`system_profiler SPHardwareDataType | grep "Model Identifier" | awk '{print $3}' | cut -f1 -d ","`
mba=`system_profiler SPHardwareDataType | grep "Model Identifier" | awk '{print $3}' | cut -c-10`
usbAdapter=`/usr/sbin/networksetup -listallhardwareports | grep "Hardware Port: USB Ethernet"`
tbAdapter=`/usr/sbin/networksetup -listallhardwareports | grep "Hardware Port: Thunderbolt Ethernet"`
/usr/sbin/networksetup -detectnewhardware
if [ $mbpr = "MacBookPro10" -o $mbpr = "MacBookPro11" ]; then
if [ "$usbAdapter" != "" ]; then
/usr/sbin/networksetup -createnetworkservice USB\ Ethernet 'USB Ethernet'
echo "USB Ethernet added to Network Services"
else
echo "No USB Adapter connected"
fi
if [ "$tbAdapter" != "" ]; then
/usr/sbin/networksetup -createnetworkservice Thunderbolt\ Ethernet 'Thunderbolt Ethernet'
echo "Thunderbolt Ethernet added to Network Services"
else
echo "No Thunderbolt Adapter connected"
fi
elif [ $mba = "MacBookAir" ]; then
if [ "$usbAdapter" != "" ]; then
/usr/sbin/networksetup -createnetworkservice USB\ Ethernet 'USB Ethernet'
echo "USB Ethernet added to Network Services"
else
echo "No USB Adapter connected"
fi
if [ "$tbAdapter" != "" ]; then
/usr/sbin/networksetup -createnetworkservice Thunderbolt\ Ethernet 'Thunderbolt Ethernet'
echo "Thunderbolt Ethernet added to Network Services"
else
echo "No Thunderbolt Adapter connected"
fi
else
echo "This machine does not use external network adapters"
fi
exit 0
| true
|
dd96e7e69379ddb7270ed0683e566b0a6236dfb6
|
Shell
|
citusdata/packaging
|
/fetch_build_files
|
UTF-8
| 2,007
| 4.46875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# make bash behave
set -euo pipefail
IFS=$'\n\t'
# constants
stdout=1
stderr=2
success=0
failure=1
badusage=64
noinput=66
packagingurl='https://github.com/citusdata/packaging'
download=$(mktemp)
# outputs usage message on specified device before exiting with provided status
usage() {
cat << 'E_O_USAGE' >&"$1"
usage: fetch_build_files project format target_directory
project : 'citus', 'enterprise', 'hll', or 'rebalancer'
format : 'deb', 'rpm', or 'pgxn'
fetch_build_files fetches files needed to package a specified Citus project for
a specified software packaging system.
E_O_USAGE
exit "${2}";
}
if [ "$#" -eq 1 ] && [ "${1}" = '-h' ]; then
usage $stdout $success
fi
if [ "$#" -ne 3 ]; then
usage $stderr $badusage
fi
case "${1}" in
citus|enterprise|hll|rebalancer)
project=${1}
;;
*)
echo "$0: unknown project -- ${1}" >&2
usage $stderr $badusage
;;
esac
case "${2}" in
deb)
format='debian'
;;
rpm)
format='redhat'
;;
pgxn)
format='pgxn'
;;
*)
echo "$0: unknown format -- ${2}" >&2
usage $stderr $badusage
;;
esac
targetdir=$3
# validate inputs
if ! [ -d "${targetdir}" ]; then
echo "$0: ${targetdir}: Is not a directory" >&2
exit $noinput
elif [ ! -e "${targetdir}" ]; then
echo "$0: ${targetdir}: No such file" >&2
exit $noinput
elif [ ! -r "${targetdir}" ]; then
echo "$0: ${targetdir}: Permission denied" >&2
exit $noinput
fi
downloadurl="${packagingurl}/archive/${format}-${project}.tar.gz"
# download a tarball of the build files
httpcode=$(curl -sL "${downloadurl}" -w "%{http_code}" -o "${download}")
if [ "${httpcode}" -ne 200 ]; then
echo "$0: could not fetch build tarball from ${downloadurl}" >&2
echo "$0: HTTP code was: ${httpcode}" >&2
exit $failure
fi
# expand them directly into the target directory
tar xf "${download}" -C "${targetdir}" --strip-components 1
| true
|
c537d535fcca3104555acd6fe834324c6ce0cf2d
|
Shell
|
hshimamoto/debootstrapper
|
/scripts/build-perl.sh
|
UTF-8
| 358
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
. scripts/default
if [ ! -e $DL_DIR/$PERLTAR ]; then
PushD $DL_DIR
wget $PERLURL
PopD
fi
if [ -e $WORK_DIR/$PERL/miniperl ]; then
echo "Already exist"
exit 0
fi
PushD $WORK_DIR
tar zxf $DL_DIR/$PERLTAR
cd $PERL
# make static miniperl
./Configure -de
cat >> config.sh <<'EOF'
ldflags=" -static $ldflags"
EOF
./Configure -de
Make
PopD
| true
|
8a53323331e388567af83ef52033eb9ecda6ea03
|
Shell
|
newleal/bashScripting
|
/ejemplo06.sh
|
UTF-8
| 846
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# Funcion de ayuda
function ayuda(){
cat <<DESCRIPCION_AYUDA
SYNOPSIS
$0 NOMBRE_1 [NOMBRE_2]...[NOMBRE_N]
DESCRIPCION
Muestra "Hola NOMBRE_1, NOMBRE_2,...NOMBRE_N!" por pantalla.
CODIGOS DE RETORONO
1 Si el numero de parametros es menos que 1
2 Si el usuario no esta conectado
DESCRIPCION_AYUDA
}
# Si numero de parametos es <= 0
if [ $# -le 0 ];then
echo "Hay que introducir al menos un argumento."
ayuda
exit 1
fi
MENSAJE="Hola"
PRIMERO=1
# mientras haya parametros
while [ -n "$1" ];do
ESTA_CONECTADO=`who | grep $1`
if [ -z "$ESTA_CONECTADO" ];then
echo "El usuario $1 no esta conectado"
ayuda
exit 2
fi
if [ $PRIMERO -eq 1 ];then
MENSAJE="$MENSAJE $1"
PRIMERO=0
else
MENSAJE="$MENSAJE, $1"
fi
# pasamos al siguiente parametro
shift
done
# mostramos la salida por pantalla
echo${MENSAJE}"!"
| true
|
c5003db849715d484d24d85219987df282cac0a0
|
Shell
|
Najmshosan/runc
|
/tests/integration/update.bats
|
UTF-8
| 9,336
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load helpers
function teardown() {
rm -f $BATS_TMPDIR/runc-cgroups-integration-test.json
teardown_running_container test_update
teardown_running_container test_update_rt
teardown_busybox
}
function setup() {
teardown
setup_busybox
set_cgroups_path "$BUSYBOX_BUNDLE"
# Set some initial known values
DATA=$(cat <<EOF
"memory": {
"limit": 33554432,
"reservation": 25165824
},
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000,
"cpus": "0"
},
"pids": {
"limit": 20
}
EOF
)
DATA=$(echo ${DATA} | sed 's/\n/\\n/g')
if grep -qw \"resources\" ${BUSYBOX_BUNDLE}/config.json; then
sed -i "s/\(\"resources\": {\)/\1\n${DATA},/" ${BUSYBOX_BUNDLE}/config.json
else
sed -i "s/\(\"linux\": {\)/\1\n\"resources\": {${DATA}},/" ${BUSYBOX_BUNDLE}/config.json
fi
}
# Tests whatever limits are (more or less) common between cgroup
# v1 and v2: memory/swap, pids, and cpuset.
@test "update cgroup v1/v2 common limits" {
[[ "$ROOTLESS" -ne 0 && -z "$RUNC_USE_SYSTEMD" ]] && requires rootless_cgroup
if [[ "$ROOTLESS" -ne 0 && -n "$RUNC_USE_SYSTEMD" ]]; then
file="/sys/fs/cgroup/user.slice/user-$(id -u).slice/user@$(id -u).service/cgroup.controllers"
# NOTE: delegation of cpuset requires systemd >= 244 (Fedora >= 32, Ubuntu >= 20.04).
for f in memory pids cpuset; do
if grep -qwv $f $file; then
skip "$f is not enabled in $file"
fi
done
fi
init_cgroup_paths
# run a few busyboxes detached
runc run -d --console-socket $CONSOLE_SOCKET test_update
[ "$status" -eq 0 ]
# Set a few variables to make the code below work for both v1 and v2
case $CGROUP_UNIFIED in
no)
MEM_LIMIT="memory.limit_in_bytes"
MEM_RESERVE="memory.soft_limit_in_bytes"
MEM_SWAP="memory.memsw.limit_in_bytes"
SYSTEM_MEM=$(cat "${CGROUP_MEMORY_BASE_PATH}/${MEM_LIMIT}")
SYSTEM_MEM_SWAP=$(cat "${CGROUP_MEMORY_BASE_PATH}/$MEM_SWAP")
;;
yes)
MEM_LIMIT="memory.max"
MEM_RESERVE="memory.low"
MEM_SWAP="memory.swap.max"
SYSTEM_MEM="max"
SYSTEM_MEM_SWAP="max"
# checking swap is currently disabled for v2
#CGROUP_MEMORY=$CGROUP_PATH
;;
esac
# check that initial values were properly set
check_cgroup_value "cpuset.cpus" 0
if [[ "$CGROUP_UNIFIED" = "yes" ]] && ! grep -qw memory "$CGROUP_PATH/cgroup.controllers"; then
# This happen on containerized environment because "echo +memory > /sys/fs/cgroup/cgroup.subtree_control" fails with EINVAL
skip "memory controller not available"
fi
check_cgroup_value $MEM_LIMIT 33554432
check_cgroup_value $MEM_RESERVE 25165824
check_cgroup_value "pids.max" 20
# update cpuset if supported (i.e. we're running on a multicore cpu)
cpu_count=$(grep -c '^processor' /proc/cpuinfo)
if [ $cpu_count -gt 1 ]; then
runc update test_update --cpuset-cpus "1"
[ "$status" -eq 0 ]
check_cgroup_value "cpuset.cpus" 1
fi
# update memory limit
runc update test_update --memory 67108864
[ "$status" -eq 0 ]
check_cgroup_value $MEM_LIMIT 67108864
if [[ -n "${RUNC_USE_SYSTEMD}" ]] ; then
if [ "$CGROUP_UNIFIED" != "yes" ]; then
check_systemd_value "runc-cgroups-integration-test.scope" "MemoryLimit=" "MemoryLimit=67108864"
else
check_systemd_value "runc-cgroups-integration-test.scope" "MemoryMax=" "MemoryMax=67108864"
fi
fi
runc update test_update --memory 50M
[ "$status" -eq 0 ]
check_cgroup_value $MEM_LIMIT 52428800
if [[ -n "${RUNC_USE_SYSTEMD}" ]] ; then
if [ "$CGROUP_UNIFIED" != "yes" ]; then
check_systemd_value "runc-cgroups-integration-test.scope" "MemoryLimit=" "MemoryLimit=52428800"
else
check_systemd_value "runc-cgroups-integration-test.scope" "MemoryMax=" "MemoryMax=52428800"
fi
fi
# update memory soft limit
runc update test_update --memory-reservation 33554432
[ "$status" -eq 0 ]
check_cgroup_value "$MEM_RESERVE" 33554432
# Run swap memory tests if swap is available
if [ -f "$CGROUP_MEMORY/$MEM_SWAP" ]; then
# try to remove memory swap limit
runc update test_update --memory-swap -1
[ "$status" -eq 0 ]
check_cgroup_value "$MEM_SWAP" $SYSTEM_MEM_SWAP
# update memory swap
runc update test_update --memory-swap 96468992
[ "$status" -eq 0 ]
check_cgroup_value "$MEM_SWAP" 96468992
fi
# try to remove memory limit
runc update test_update --memory -1
[ "$status" -eq 0 ]
# check memory limit is gone
check_cgroup_value $MEM_LIMIT $SYSTEM_MEM
# check swap memory limited is gone
if [ -f "$CGROUP_MEMORY/$MEM_SWAP" ]; then
check_cgroup_value $MEM_SWAP $SYSTEM_MEM
fi
# update pids limit
runc update test_update --pids-limit 10
[ "$status" -eq 0 ]
check_cgroup_value "pids.max" 10
if [[ -n "${RUNC_USE_SYSTEMD}" ]] ; then
check_systemd_value "runc-cgroups-integration-test.scope" "TasksMax=" "TasksMax=10"
fi
# Revert to the test initial value via json on stdin
runc update -r - test_update <<EOF
{
"memory": {
"limit": 33554432,
"reservation": 25165824
},
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000,
"cpus": "0"
},
"pids": {
"limit": 20
}
}
EOF
[ "$status" -eq 0 ]
check_cgroup_value "cpuset.cpus" 0
check_cgroup_value $MEM_LIMIT 33554432
check_cgroup_value $MEM_RESERVE 25165824
check_cgroup_value "pids.max" 20
# redo all the changes at once
runc update test_update \
--cpu-period 900000 --cpu-quota 600000 --cpu-share 200 \
--memory 67108864 --memory-reservation 33554432 \
--pids-limit 10
[ "$status" -eq 0 ]
check_cgroup_value $MEM_LIMIT 67108864
check_cgroup_value $MEM_RESERVE 33554432
check_cgroup_value "pids.max" 10
# reset to initial test value via json file
cat << EOF > $BATS_TMPDIR/runc-cgroups-integration-test.json
{
"memory": {
"limit": 33554432,
"reservation": 25165824
},
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000,
"cpus": "0"
},
"pids": {
"limit": 20
}
}
EOF
runc update -r $BATS_TMPDIR/runc-cgroups-integration-test.json test_update
[ "$status" -eq 0 ]
check_cgroup_value "cpuset.cpus" 0
check_cgroup_value $MEM_LIMIT 33554432
check_cgroup_value $MEM_RESERVE 25165824
check_cgroup_value "pids.max" 20
}
@test "update cgroup v1 cpu limits" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
requires cgroups_v1
# run a few busyboxes detached
runc run -d --console-socket $CONSOLE_SOCKET test_update
[ "$status" -eq 0 ]
# check that initial values were properly set
check_cgroup_value "cpu.cfs_period_us" 1000000
check_cgroup_value "cpu.cfs_quota_us" 500000
check_cgroup_value "cpu.shares" 100
# update cpu-period
runc update test_update --cpu-period 900000
[ "$status" -eq 0 ]
check_cgroup_value "cpu.cfs_period_us" 900000
# update cpu-quota
runc update test_update --cpu-quota 600000
[ "$status" -eq 0 ]
check_cgroup_value "cpu.cfs_quota_us" 600000
# update cpu-shares
runc update test_update --cpu-share 200
[ "$status" -eq 0 ]
check_cgroup_value "cpu.shares" 200
# Revert to the test initial value via json on stding
runc update -r - test_update <<EOF
{
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000
}
}
EOF
[ "$status" -eq 0 ]
check_cgroup_value "cpu.cfs_period_us" 1000000
check_cgroup_value "cpu.cfs_quota_us" 500000
check_cgroup_value "cpu.shares" 100
# redo all the changes at once
runc update test_update \
--cpu-period 900000 --cpu-quota 600000 --cpu-share 200
[ "$status" -eq 0 ]
check_cgroup_value "cpu.cfs_period_us" 900000
check_cgroup_value "cpu.cfs_quota_us" 600000
check_cgroup_value "cpu.shares" 200
# reset to initial test value via json file
cat << EOF > $BATS_TMPDIR/runc-cgroups-integration-test.json
{
"cpu": {
"shares": 100,
"quota": 500000,
"period": 1000000
}
}
EOF
runc update -r $BATS_TMPDIR/runc-cgroups-integration-test.json test_update
[ "$status" -eq 0 ]
check_cgroup_value "cpu.cfs_period_us" 1000000
check_cgroup_value "cpu.cfs_quota_us" 500000
check_cgroup_value "cpu.shares" 100
}
@test "update rt period and runtime" {
[[ "$ROOTLESS" -ne 0 ]] && requires rootless_cgroup
requires cgroups_rt
# run a detached busybox
runc run -d --console-socket $CONSOLE_SOCKET test_update_rt
[ "$status" -eq 0 ]
runc update -r - test_update_rt <<EOF
{
"cpu": {
"realtimePeriod": 800001,
"realtimeRuntime": 500001
}
}
EOF
check_cgroup_value "cpu.rt_period_us" 800001
check_cgroup_value "cpu.rt_runtime_us" 500001
runc update test_update_rt --cpu-rt-period 900001 --cpu-rt-runtime 600001
check_cgroup_value "cpu.rt_period_us" 900001
check_cgroup_value "cpu.rt_runtime_us" 600001
}
| true
|
bd819b24c6cee845119dd7f00230ff7a7742516a
|
Shell
|
bbhunter/Recon-3
|
/create_words.sh
|
UTF-8
| 1,982
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# script that generate all wordlist useful for recon process and removes unnecessary data.
domain=$1
name=$(echo $domain | cut -d "." -f1)
SECONDS=0
rm -r wordlists
mkdir -p wordlists
echo -e "\e[32m[*] Fetching DNS bruteforce wordlists ... \e[0m"
cd wordlists
git clone -q https://gist.github.com/jhaddix/86a06c5dc309d08580a018c66354a056
wget https://github.com/assetnote/commonspeak2-wordlists/blob/master/subdomains/subdomains.txt > /dev/null 2>&1 &
cat 86a06c5dc309d08580a018c66354a056/all.txt subdomains.txt | sort -u | uniq > /root/Desktop/Recon/massdns/lists/brutesub.txt
cp /root/Desktop/Recon/massdns/lists/brutesub.txt /root/Desktop/Recon/wordlists/
rm -rf 86a06c5dc309d08580a018c66354a056 subdomains.txt
echo -e "\e[32m[*] Fetching content discovery wordlists ... \e[0m"
git clone -q https://gist.github.com/jhaddix/b80ea67d85c13206125806f0828f4d10
mv /root/Desktop/Recon/wordlists/b80ea67d85c13206125806f0828f4d10/content_discovery_all.txt /root/Desktop/Recon/wordlists/
rm -r /root/Desktop/Recon/wordlists/b80ea67d85c13206125806f0828f4d10
echo -e "\e[32m[*] Fetching providers data for ST ... \e[0m"
cp /root/go/src/github.com/anshumanbh/tko-subs/providers-data.csv /root/Desktop/Recon/wordlists
echo -e "\e[32m[*] Fetching DNS resolver lists ... \e[0m"
git clone -q https://github.com/Abss0x7tbh/bass.git
cd bass #&& pip3 install -r requirements.txt
python3 bass.py -d $1 -o $1_bass.txt >/dev/null
mv $1_bass.txt /root/Desktop/Recon/wordlists/ && cd ..
rm -rf bass
git clone -q https://github.com/vortexau/dnsvalidator
cd dnsvalidator
python3 setup.py install > /dev/null 2>&1 &
dnsvalidator -tL https://public-dns.info/nameservers.txt -threads 20 -o resolvers.txt #>/dev/null
mv resolvers.txt /root/Desktop/Recon/wordlists/ && cd ..
rm -rf dnsvalidator
cat $1_bass.txt resolvers.txt > /root/Desktop/Recon/massdns/lists/validated.txt
duration=$SECONDS
echo -e "\e[31m[*] $(($duration / 60)) minutes and $(($duration % 60)) seconds elapsed.\e[0m"
| true
|
0c7fc687903c0fbcb53bb3b6e2a7625b000ac420
|
Shell
|
Pingva74/mysql_sc
|
/mysql.split.sh
|
UTF-8
| 342
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
####
# Split MySQL dump SQL file into one file per table
####
if [ $# -ne 1 ] ; then
echo "USAGE $0 DUMP_FILE"
fi
csplit -s -ftable $1 "/-- Table structure for table/" {*}
mv table00 head
for FILE in `ls -1 table*`; do
NAME=`head -n1 $FILE | cut -d$'\x60' -f2`
cat head $FILE > "$NAME.sql"
done
rm head table*
| true
|
7096f9fe13301bfaa87a4064d396ae6237f290b1
|
Shell
|
teepark/puffin
|
/bench
|
UTF-8
| 302
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$1" = "-p" ]; then
PROF=1
shift
fi
python bench_server.py $1 &
SERVER=$(echo $!)
sleep 1
if [ -n "$PROF" ]; then
kill -TTIN $SERVER
fi
weighttp -n100000 -c100 127.0.0.1:8000/
if [ -n "$PROF" ]; then
kill -TTOU $SERVER
sleep 1
fi
kill -HUP $SERVER
wait $SERVER
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.