blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
fee8a1e5240e33dc5a5a26f61ccc27c4bef8acec | Shell | audioscience/doxygen-action | /entrypoint.sh | UTF-8 | 1,061 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ ! -d $2 ]; then
echo "Path $2 (working-dir) could not be found!"
fi
if [ ! -d $1 ]; then
echo "Path $1 (doxyfile-path) could not be found!"
fi
if [ ! -d $3 ]; then
echo "Path $3 (output-dir) could not be found!"
fi
if [ ! -d $4 ]; then
echo "Path $4 (hpi-dir) could not be found!"
fi
hpiversion=$(python $4/hpi_version.py {"dotted"})
sed "s!PROJECT_NUMBER.*!PROJECT_NUMBER = Version_$hpiversion!g;s!OUTPUT_DIRECTORY.*!OUTPUT_DIRECTORY = $3/dox!g" $1/doxyfile.dox > $3/d.dox
cd $2
doxygen $3/d.dox > $3/doxygen.log
cp stylesheet.css $3/dox/html
sed -i "s/Modules/Functions/g" $3/dox/html/*.html
hpi_version_minor=$(python $4/hpi_version.py {"minor"})
if (( ${hpi_version_minor} % 2 == 0 ))
then
DOCSUBDIR=html
else
DOCSUBDIR=beta_html
fi
LOCAL_DIR=$3/dox/html
REMOTE_DIR=internet/download/sdk/$5_usermanual_html/${DOCSUBDIR}
HOST=ftp.audioscience.com
ftp -n $HOST <<END_SCRIPT
quote USER $6
quote PASS $7
passive
cd $REMOTE_DIR
lcd $LOCAL_DIR
prompt
mput *.html
mput *.png
mput *.css
quit
END_SCRIPT
| true |
439ac78da6e2bcdcb3cb8ed3cda7220afde6fa84 | Shell | Exist2Resist/nginx-dev-api-docker | /install.sh | UTF-8 | 3,647 | 3.546875 | 4 | [] | no_license | #!/bin/bash
rm -rf /etc/localtime
ln -s /usr/share/zoneinfo/$TZ /etc/localtime
yum install -y epel-release
yum update -y
yum install -y yum-utils gcc python-pip python-devel wget nginx tmux curl unzip
yum -y groupinstall development
pip install --upgrade pip
pip install vitualenv
##Install Python 3.x dev env tools
yum -y install https://centos7.iuscommunity.org/ius-release.rpm
yum -y install python36u
yum -y install python36u-pip
yum -y install python36u-devel
##Make env folder
mkdir /nginx /environments
##Change folder ownership and permission.
chown -R nobody:users /nginx /environments
chmod -R 755 /nginx /environments
##Append the nginx service file, copy new configuration file.
##Shouldn't be changing the /usr/lib/systemd file, should be working in dopr in file /etc/systemd/system
##but the systemd python replacement script does not support 'systemctl edit nginx.service' drop in files.
FILE="/usr/lib/systemd/system/nginx.service"
DEST="/etc/systemd/system/nginx.service"
LINE=$(cat /usr/lib/systemd/system/nginx.service | grep -n "ExecStart=" | cut -d: -f1)
APPEND=$(cat /usr/lib/systemd/system/nginx.service | grep "ExecStart=")
CONFIG=" -c /nginx/nginx.conf"
awk -v "LINE=$LINE" -v "APPEND=$APPEND" -v "CONFIG=$CONFIG" \
'NR==LINE{gsub(APPEND, APPEND""CONFIG)};1' \
$FILE > $FILE"tmp" && mv -f $FILE"tmp" $DEST
#Add ExecPreStartup=/usr/local/bin/start.sh to line 11 of the nginx unit file
sed -i '11 i ExecStartPre=/usr/local/bin/start.sh' /etc/systemd/system/nginx.service
#LINE_U=$(cat $FILE | grep -n "After=" | cut -d: -f1)
#APPEND_U=$(cat $FILE | grep "After=")
#AFTER=" startup.service"
#awk -v "LINE_U=$LINE_U" -v "APPEND_U=$APPEND_U" -v "AFTER=$AFTER" \
'NR==LINE_U{gsub(APPEND_U, APPEND_U""AFTER)};1' \
$DEST > $DEST"tmp" && mv -f $DEST"tmp" $DEST
#sed -i '1s/^/[Unit]\nDescription=Custom Nginx Service Unit.\nAfter=startup.service\n\n/' /etc/systemd/system/nginx.service
##Install ODBC driver for RedHat7
curl https://packages.microsoft.com/config/rhel/7/prod.repo > /etc/yum.repos.d/mssql-release.repo
yum remove -y unixODBC-utf16 unixODBC-utf16-devel #to avoid conflicts
ACCEPT_EULA=Y yum install -y msodbcsql17
echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bash_profile
echo 'export PATH="$PATH:/opt/mssql-tools/bin"' >> ~/.bashrc
source ~/.bashrc
yum install -y unixODBC-devel
## Startup Script
cat <<'EOF' > /usr/local/bin/start.sh
#!/bin/bash
#Sets the timezone in the container and sets up dev environment.
TIMEZONE=${TZ:-America/Edmonton}
rm -rf /etc/localtime
ln -s /usr/share/zoneinfo/$TIMEZONE /etc/localtime
#Set UID and GID and change folder permission
USERID=${PUID:-99}
GROUPID=${GUID:-100}
groupmod -g $GROUPID users
usermod -u $USERID nobody
usermod -g $USERID nobody
usermod -d /home nobody
chown -R nobody:users /nginx /environments
chmod -R 755 /nginx /environments
#Check if nginx config file exists if not copy it
if [[ ! -f /nginx/nginx.conf ]];then
echo "Creating Nginx configuration file in volume"
cp /etc/nginx/nginx.conf /nginx
fi
#Create new pythong dev env
cd /environments
python3.6 -m venv $DEV_ENV
#Activate environment
source $DEV_ENV/bin/activate
EOF
chmod 755 /usr/local/bin/start.sh
##Create Startup service for the above script
cat <<'EOF' > /etc/systemd/system/startup.service
[Unit]
Description=Startup Script, sets TZ, user/group, config location, and dev environment.
Before=nginx.service
[Service]
Type=simple
ExecStart=/usr/local/bin/start.sh
TimeoutStartSec=0
[Install]
WantedBy=default.target
EOF
yum clean all
#systemctl enable startup.service
#service fails added ExecPreStartup to nginx.servce
systemctl enable nginx.service | true |
acf4f4d41aad61cf7b1d0dc4459634c78858d7ca | Shell | Major-Lee/my-first-company | /msip-bhu-deploy/testecho.sh | UTF-8 | 550 | 3.15625 | 3 | [] | no_license | #echo -n "please confirm your operation:(yes/no)"
#read name
#echo "hello $name, welcome to IT website"
#if read -t 5 -p "please input your name:"
#then
# echo "hello $REPLY, welcome to come back here"
#else
# echo "sorry , you are too slow "
# exit
#fi
read -n1 -p "do you want to continue [Y/N]?"
case $REPLY in
Y | y) echo
echo "fine ,continue on ..";;
N | n) echo
echo "OK, goodbye..."
exit
;;
*) echo
echo "only accept Y,y,N,n"
exit
;;
#exit
esac
echo "starting" | true |
f32ee55596e890ed5465361be1a1cf1e2623210d | Shell | myminseok/pcf-apptx-pipelines | /tasks/test-smoke/projectType/pipeline-gradle.sh | UTF-8 | 569 | 3.109375 | 3 | [] | no_license | #!/bin/bash
set -e
function runSmokeTests() {
local applicationHost="${APPLICATION_URL}"
local stubrunnerHost="${STUBRUNNER_URL}"
echo "Running smoke tests"
if [[ "${CI}" == "CONCOURSE" ]]; then
./gradlew smoke -PnewVersion=${PIPELINE_VERSION} -Dapplication.url="${applicationHost}" -Dstubrunner.url="${stubrunnerHost}" || ( echo "$( printTestResults )" && return 1)
else
./gradlew smoke -PnewVersion=${PIPELINE_VERSION} -Dapplication.url="${applicationHost}" -Dstubrunner.url="${stubrunnerHost}"
fi
}
export -f runSmokeTests
| true |
41125f640fac3366de4c0c0b8dee63261c486201 | Shell | bruno-fs/dstserver | /dstserver | UTF-8 | 1,486 | 3.671875 | 4 | [] | no_license | #!/bin/bash
steamcmd_dir="$HOME/.steam/steamcmd"
install_dir="$steamcmd_dir/dstserver"
cluster_name="${1:-MyDediServer}"
dontstarve_dir="$HOME/.klei/DoNotStarveTogether"
function fail()
{
echo Error: "$@" >&2
exit 1
}
function check_for_file()
{
if [ ! -e "$1" ]; then
fail "Missing file: $1"
fi
}
function install_mods()
{
# get all mods from your current clusters
grep workshop- $dontstarve_dir/*/*/modoverrides.lua | \
perl -pe 's/.*-(\w+)\".*/ServerModSetup("$1")/' | \
sort -Vu >> "$install_dir/mods/dedicated_server_mods_setup.lua"
}
function run()
{
echo "Starting: $cluster_name"
echo "!!! !!!"
cd "$steamcmd_dir" || fail "Missing $steamcmd_dir directory!"
check_for_file "steamcmd.sh"
check_for_file "$dontstarve_dir/$cluster_name/cluster.ini"
check_for_file "$dontstarve_dir/$cluster_name/cluster_token.txt"
check_for_file "$dontstarve_dir/$cluster_name/Master/server.ini"
check_for_file "$dontstarve_dir/$cluster_name/Caves/server.ini"
steamcmd +force_install_dir "$install_dir" +login anonymous +app_update 343050 validate +quit
check_for_file "$install_dir/bin"
cd "$install_dir/bin" || fail
install_mods
run_shared=(./dontstarve_dedicated_server_nullrenderer)
run_shared+=(-cluster "$cluster_name")
run_shared+=(-monitor_parent_process $$)
"${run_shared[@]}" -shard Caves | sed 's/^/Caves: /' &
"${run_shared[@]}" -shard Master | sed 's/^/Master: /'
}
run
| true |
9aa39c14195a2a58ceaab250e3083f1d8bf1a9a3 | Shell | flugsio/dotfiles | /bin/rs | UTF-8 | 1,612 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#set -e
# wip
function process
{
args="$@"
files=${args%% -*}
opts=-${args#* -}
if [ "$opts" = "-$files" ]; then
opts=""
fi
files=${files:-.}
if [ "$opts" = "-d" ]; then
opts="-fd --order defined"
elif [ "$opts" = "-f" ] || grep -qe "\(fit\|fdescribe\|fcontext\)" $files; then
opts="-fd --order defined -t focus"
fi
declare -a errors
declare -a commands
for file in $files; do
command=`echo $file | sed -r "s/((spec\/|test\/|release\/)|(\w+|)\/?)(.*)/cd .\/\3 \&\& if [ -d spec ]; then bundle exec rspec --order defined --no-color \2\4 $opts; else bundle exec rake test \2\4 $opts; fi/"`
#command=`echo $file | sed -r "s/(\w+)\/(.*)/cd \1 \&\& bundle exec rspec \2 spec\/promote\/promote\/copy_spec.rb/"`
#command=`echo $file | sed -r "s/(\w+)\/(\w+)\/(.*)/bundle exec rake konacha:run SPEC=\3/"`
if [ -n "$VAGRANT_MACHINE" ]; then
command="ssh -tt $VAGRANT_MACHINE \"bash -O huponexit -lc 'cd $VAGRANT_DIR; $command'\""
elif [[ $PWD = *promote-docker* ]]; then
command="docker-compose run --rm promote bash -O huponexit -lc '$command'"
#command="docker-compose run --rm fetch bash -O huponexit -lc '$command'"
else
command=$command $opts
fi
commands+=("$command")
echo "$command"
if ! echo "$command" | sh; then
errors+=("$file")
fi
done
if [ ${#errors[*]} -ne 0 ]; then
echo "Errors: ${errors[*]}/${commands[@]}"
exit 1
else
echo Success: ${#commands[@]}/${#commands[@]}
exit 0
fi
}
if [ "$1" = "-" ]
then
process "$(</dev/stdin)"
else
process "$@"
fi
| true |
a18d281784fc5fcc99ad45e216c0895f21ce97c8 | Shell | 2aven/edv | /backup-db.sh | UTF-8 | 202 | 2.53125 | 3 | [] | no_license | #!/bin/bash
backups_dir="./db_backup/"
datetime=$(date +'%Y-%m-%dT%H:%M:%S')
docker exec edv-db /usr/bin/mysqldump -u root --password=contrasenya edv | gzip -9 > $backups_dir$db_name--$datetime.sql.gz
| true |
1b09dcd786b87984ea601a748e9b905207dfd9a7 | Shell | echoheqian/whi-diet-response | /scripts/archive/gwas_pipeline_gen2_int.sh | UTF-8 | 513 | 2.71875 | 3 | [] | no_license | #!/bin/bash
DIET=$1
PHENO=$2
DIR=$3
mkdir -p $DIR
# Run GWAS
sbatch --mem 60G -t 5:00:00 run_gwas_int/run_gwas_fhs.sh $DIET $PHENO $DIR
sbatch --mem 80G -t 2:00:00 run_gwas_int/run_gwas_whi.sh white $DIET $PHENO $DIR
sbatch --mem 50G -t 2:00:00 run_gwas_int/run_gwas_mesa.sh white $DIET $PHENO $DIR
# Meta-analysis
sbatch --mem 30G -t 40:00 --begin=now+5hours --wrap="./meta_analysis_white_int.sh $DIR"
## Calculate scores in WHI
#sbatch --mem 50G -t 40:00 --begin=now+90minutes --wrap="./pt_model.sh $DIR"
| true |
37426b81f256567281b1b4340108e0a7ce4bb0d7 | Shell | mrihtar/Docker-helper-scripts | /dkrmi | UTF-8 | 1,044 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
TMPFILE=`mktemp /tmp/${0##*/}.XXXXX`
trap 'rm -f $TMPFILE' 0
HWD=`cd "${0%/*}" >/dev/null 2>&1; echo $PWD`
#
ALL=""; LIST=""
while [ $# -gt 0 ]; do
case "$1" in
-a | all ) ALL="all"; shift ;;
-d ) ALL="dangling"; shift ;;
-? ) echo "Usage: ${0##*/} [-a|-d] [id/name ...]" 1>&2; exit 1 ;;
* ) [ -n "$1" ] && LIST+=" $1"; shift ;;
esac
done
[ -z "$LIST" -a -n "$ALL" ] && LIST=all
#
case "$ALL" in
# all ) docker images -a | awk -f "$HWD/dki.awk" > $TMPFILE
dangling ) docker images -a -f dangling=true | awk -f "$HWD/dki.awk" > $TMPFILE ;;
* ) docker images -a | awk -f "$HWD/dki.awk" > $TMPFILE ;;
esac
#
typeset -i RC=0
for dkn in $LIST; do
while IFS= read -a A; do
id=${A[0]}
name=${A[1]}
[ "$name" == "<none>" -o "$dkn" == "$id" ] && name=$id
if [ "$dkn" == "$name" -o -n "$ALL" ]; then
[ $RC -gt 0 ] && echo ""
echo "Removing $name:"
docker rmi -f $id
[ $? -eq 0 ] && let RC=RC+1
fi
done < $TMPFILE
done
[ $RC -gt 0 ] && echo ""
echo "$RC images removed"
| true |
3ac6ef864c04280ca1e0e8b7029560c81b9daf9c | Shell | n0ts/lambda-runtime-bash | /sample/function.sh | UTF-8 | 233 | 3.109375 | 3 | [] | no_license | function handler () {
EVENT_DATA=$1
echo "$EVENT_DATA" 1>&2;
RESPONSE=$(cat <<EOS
Echoing request: '$EVENT_DATA'
uname -a: $(uname -a)
awscli version: $(aws --version 2>&1)
jq version: $(jq --version)
EOS
)
echo $RESPONSE
}
| true |
ad3826cd961296781d58ab05eac9da57308bb765 | Shell | bcdev/wv-cci-toolbox | /src/main/bin/cems_shared/wvcci-inst/bin/wvcci-l2-tcwv-modis-bash_5.sh | UTF-8 | 9,201 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -x
set -e
####################################################################
# Script for collocation of EraInterim auxdata product with MODIS L1b.
# Follows original scripts
# - era-interim-<sensor>-prepare
# - era-interim-<sensor>-process
# built on Calvalus by MB
####################################################################
###############################
# was PREPARE step on Calvalus:
###############################
l1bPath=$1
l1bName=$2 # MOD021KM.A2008016.1540.006.2012066182519.hdf
cloudMaskPath=$3 # cloud product e.g. MOD35_L2.A2008016.1540.006.2012066182519.hdf
sensor=$4
year=$5
month=$6
day=$7
wvcciRootDir=$8
# Exit already here if L1b product is not in daily mode: (note the syntax: no [] brackets!!)
if ! `python $WVCCI_INST/bin/check_if_modis_daily_product.py $l1bPath`; then
echo "SKIP nightly product $l1bPath ..."
exit 0
else
echo "DAILY product: $l1bPath ..."
echo "START processing - wallclock time is: `date`"
fi
tmpdir=$wvcciRootDir/tmp
#tmpdir=/work/scratch-nompiio/odanne/wvcci/tmp
mkdir -p $tmpdir
modisstem=${l1bName%.hdf}
hour=${l1bName:18:2}
minute=${l1bName:20:2}
date_in_seconds=$(($(date +%s -u -d "$year-01-01 $hour:$minute:00") + ( 1$doy - 1000 ) * 86400 - 86400))
month=$(date --date "@$date_in_seconds" +%m)
day=$(date --date "@$date_in_seconds" +%d)
acquisitionTime=$year$month$day
# replace 'L1b' by 'L1bEraInterim':
l1bDir=`dirname $l1bPath`
current="L1b"
replace="L1bEraInterim"
l1bEraInterimDir=${l1bDir/$current/$replace}
mkdir -p $l1bEraInterimDir
replace="Tcwv"
tcwvDir=${l1bDir/$current/$replace}
mkdir -p $tcwvDir
eramodis=${tmpdir}/${modisstem}_era-interim.nc
echo "eramodis: $eramodis"
#if [ -f $eramodis ]; then
# echo "EraInterim file '$eramodis' already exists - exit (code 0)."
# exit 0
#fi
###############################
# was PROCESS step on Calvalus:
###############################
auxroot=$wvcciRootDir/auxiliary
let day_before_in_seconds="date_in_seconds - 86400"
let day_after_in_seconds="date_in_seconds + 86400"
date_before=`date +%Y-%m-%d -u -d @$day_before_in_seconds`
date_after=`date +%Y-%m-%d -u -d @$day_after_in_seconds`
# extract geo information in SCRIP format for CDOs:
echo "$(date +%Y-%m-%dT%H:%M:%S -u) extracting geo information of $l1bName ..."
scripfile=$tmpdir/${modisstem}-scrip.nc
if [ ! -e $scripfile ]; then
echo "START gpt Write SCRIP - wallclock time is: `date`"
if ! $SNAP_HOME/bin/gpt Write -q 1 -PformatName=SCRIP -Pfile=$scripfile $l1bPath
#if ! $SNAP_HOME/bin/gpt Write -q 4 -PformatName=SCRIP -Pfile=$scripfile $l1bPath
#if ! $SNAP_HOME/bin/gpt Write -PformatName=SCRIP -Pfile=$scripfile $l1bPath
then
cat gpt.out
exit 1
fi
echo "END gpt Write SCRIP - wallclock time is: `date`"
fi
# distinguish beginning of month, end of month, or in between and create time stacks
if [ "$day" = "01" -a "$year$month" != "197901" ]
then
echo "$(date +%Y-%m-%dT%H:%M:%S -u) merge era stack with previous month ..."
eradaybefore=$auxroot/era-interim-t2m-mslp-tcwv-u10-v10/${date_before:0:4}/era-interim-t2m-mslp-tcwv-u10-v10-${date_before:0:4}-${date_before:5:2}.nc
erathisday=$auxroot/era-interim-t2m-mslp-tcwv-u10-v10/$year/era-interim-t2m-mslp-tcwv-u10-v10-$year-$month.nc
eratimestack=era-interim-t2m-mslp-tcwv-u10-v10-${date_before:0:4}-${date_before:5:2}-$year-$month.nc
echo "START cdo mergetime - wallclock time is: `date`"
cdo -b 32 mergetime $eradaybefore $erathisday $eratimestack
echo "END cdo mergetime - wallclock time is: `date`"
elif [ "$day" = "31" -a "$year$month" != "201509" ]
then
echo "$(date +%Y-%m-%dT%H:%M:%S -u) merge era stack with next month ..."
erathisday=$auxroot/era-interim-t2m-mslp-tcwv-u10-v10/$year/era-interim-t2m-mslp-tcwv-u10-v10-$year-$month.nc
eradayafter=$auxroot/era-interim-t2m-mslp-tcwv-u10-v10/${date_after:0:4}/era-interim-t2m-mslp-tcwv-u10-v10-${date_after:0:4}-${date_after:5:2}.nc
eratimestack=era-interim-tcwv-u10-v10-$year-$month-${date_after:0:4}-${date_after:5:2}.nc
echo "START cdo mergetime - wallclock time is: `date`"
cdo -b 32 mergetime $erathisday $eradayafter $eratimestack
echo "END cdo mergetime - wallclock time is: `date`"
else
eratimestack=$auxroot/era-interim-t2m-mslp-tcwv-u10-v10/$year/era-interim-t2m-mslp-tcwv-u10-v10-$year-$month.nc
fi
# interpolate temporally
echo "$(date +%Y-%m-%dT%H:%M:%S -u) interpolate temporally ..."
eramodistimeslice=$tmpdir/era-interim-t2m-mslp-tcwv-u10-v10-$year$month$day$hour$minute.nc
echo "START cdo mergetime - wallclock time is: `date`"
cdo inttime,$year-$month-$day,$hour:$minute:01 $eratimestack $eramodistimeslice
echo "END cdo inttime - wallclock time is: `date`"
# interpolate spatially
echo "$(date +%Y-%m-%dT%H:%M:%S -u) interpolate spatially ..."
eramodisspatial=$tmpdir/${modisstem}_era-interim_spatial.nc
echo "START cdo remapbil - wallclock time is: `date`"
cdo -L -f nc4c remapbil,$scripfile $eramodistimeslice $eramodisspatial
echo "END cdo remapbil - wallclock time is: `date`"
# remove cdo input...
sleep 10
rm -f $scripfile
rm -f $eramodistimeslice
# band subset: we need t2m, msl, tcwv, u10, v10
echo "START gpt band Subset - wallclock time is: `date`"
$SNAP_HOME/bin/gpt Subset -q 1 -Ssource=$eramodisspatial -PbandNames=t2m,msl,tcwv,u10,v10 -f Netcdf4-BEAM -t $eramodis
#$SNAP_HOME/bin/gpt Subset -q 4 -Ssource=$eramodisspatial -PbandNames=t2m,msl,tcwv,u10,v10 -f Netcdf4-BEAM -t $eramodis
#$SNAP_HOME/bin/gpt Subset -Ssource=$eramodisspatial -PbandNames=t2m,msl,tcwv,u10,v10 -f Netcdf4-BEAM -t $eramodis
echo "END gpt band Subset - wallclock time is: `date`"
# remove band subset input...
sleep 10
rm -f $eramodisspatial
# merge L1b with EraInterim band subset
l1bEraInterimMerge=$l1bEraInterimDir/${modisstem}_l1b-era-interim.nc
echo "START gpt Merge - wallclock time is: `date`"
$SNAP_HOME/bin/gpt ESACCI.MergeModisL1bEraInterim -q 1 -Sl1bProduct=$l1bPath -SeraInterimProduct=$eramodis -PeraInterimBandsToCopy=t2m,msl,tcwv,u10,v10 -f Netcdf4-BEAM -t $l1bEraInterimMerge
#$SNAP_HOME/bin/gpt ESACCI.MergeModisL1bEraInterim -q 4 -Sl1bProduct=$l1bPath -SeraInterimProduct=$eramodis -PeraInterimBandsToCopy=t2m,msl,tcwv,u10,v10 -f Netcdf4-BEAM -t $l1bEraInterimMerge
#$SNAP_HOME/bin/gpt ESACCI.MergeModisL1bEraInterim -Sl1bProduct=$l1bPath -SeraInterimProduct=$eramodis -PeraInterimBandsToCopy=t2m,msl,tcwv,u10,v10 -f Netcdf4-BEAM -t $l1bEraInterimMerge
echo "END gpt Merge - wallclock time is: `date`"
# remove EraInterim subset...
sleep 10
rm -f $eramodis
## TCWV
if [ -f $l1bEraInterimMerge ]; then
#auxdataPath=/gws/nopw/j04/esacci_wv/software/dot_snap/auxdata/wvcci
auxdataPath=/home/users/odanne/.snap/auxdata/wvcci
tcwv=$tcwvDir/${modisstem}_tcwv.nc
# If existing, add MOD35_L2 product as optional source product.
# We want to write TCWV with NetCDF4_WVCCI in order NOT to write lat,lon!
echo "START gpt Tcwv - wallclock time is: `date`"
if [ -f $cloudMaskPath ]; then
echo "${SNAP_HOME}/bin/gpt ESACCI.Tcwv -q 1 -e -SsourceProduct=$l1bEraInterimMerge -Smod35Product=$cloudMaskPath -PauxdataPath=$auxdataPath -Psensor=$sensor -PprocessOcean=true -f NetCDF4-WVCCI -t $tcwv"
#echo "${SNAP_HOME}/bin/gpt ESACCI.Tcwv -q 4 -e -SsourceProduct=$l1bEraInterimMerge -Smod35Product=$cloudMaskPath -PauxdataPath=$auxdataPath -Psensor=$sensor -PprocessOcean=true -f NetCDF4-WVCCI -t $tcwv"
#echo "${SNAP_HOME}/bin/gpt ESACCI.Tcwv -e -SsourceProduct=$l1bEraInterimMerge -Smod35Product=$cloudMaskPath -PauxdataPath=$auxdataPath -Psensor=$sensor -PprocessOcean=true -f NetCDF4-WVCCI -t $tcwv"
${SNAP_HOME}/bin/gpt ESACCI.Tcwv -q 1 -e -SsourceProduct=$l1bEraInterimMerge -Smod35Product=$cloudMaskPath -PauxdataPath=$auxdataPath -Psensor=$sensor -PprocessOcean=true -f NetCDF4-WVCCI -t $tcwv
#${SNAP_HOME}/bin/gpt ESACCI.Tcwv -q 4 -e -SsourceProduct=$l1bEraInterimMerge -Smod35Product=$cloudMaskPath -PauxdataPath=$auxdataPath -Psensor=$sensor -PprocessOcean=true -f NetCDF4-WVCCI -t $tcwv
#${SNAP_HOME}/bin/gpt ESACCI.Tcwv -e -SsourceProduct=$l1bEraInterimMerge -Smod35Product=$cloudMaskPath -PauxdataPath=$auxdataPath -Psensor=$sensor -PprocessOcean=true -f NetCDF4-WVCCI -t $tcwv
fi
echo "END gpt Tcwv - wallclock time is: `date`"
status=$?
echo "Status: $status"
fi
# cleanup: remove l1b-erainterim merge product (TCWV input)
echo "START cleanup - wallclock time is: `date`"
sleep 10
if [ -f $l1bEraInterimMerge ]; then
# Merge products are large. As examples, just keep L1bEraInterim of Jan 15th and Jul 15th of each year:
if [ "$day" != "15" ] || ([ "$month" != "01" ] && [ "$month" != "07" ]); then
echo "DELETING L1bEraInterim merge product : $l1bEraInterimMerge"
rm -f $l1bEraInterimMerge
fi
fi
status=$?
if [ $status = 0 ] && [ -e "$tcwv" ]
then
echo "TCWV product created."
echo "Status: $status"
else
echo "TCWV product NOT successfully created (corrupt or MODIS L1b is not a Day product)."
echo "Status: $status"
if [ -e "$tcwv" ]
then
echo "rm -f $tcwv"
rm -f $tcwv # delete unwanted file
fi
fi
echo "END cleanup - wallclock time is: `date`"
echo "FINISHED job wvcci-tcwv-l2-modis - wallclock time is: `date`"
| true |
f3df1be2f35899151ac8756a5da8c89778cecc1e | Shell | ras-martin/nginx-phpfpm | /hooks/build | UTF-8 | 1,047 | 3.25 | 3 | [] | no_license | #!/bin/bash
source hooks/.config
set -e
for phpversion in ${php_versions[@]}; do
for purpose in ${build_purposes[@]}; do
for arch in ${build_architectures[@]}; do
if [ -d "purpose/${phpversion}/${purpose}" ]; then
echo ">>> Build ${phpversion} ${purpose} for ${arch}, qemu architecture ${docker_qemu_arch_map[${arch}]}"
BASE_IMAGE_PREFIX="${base_image_prefix_map[${arch}]}"
docker build \
--no-cache \
--build-arg BASE_IMAGE_PREFIX=${BASE_IMAGE_PREFIX} \
--build-arg ARCH=${docker_qemu_arch_map[${arch}]} \
--build-arg PURPOSE=${purpose} \
--build-arg PHPVERSION=${phpversion} \
--build-arg BUILD_DATE="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \
--tag "${DOCKER_REPO}:${phpversion}-${purpose}-${arch}" \
-f purpose/${phpversion}/${purpose}/Dockerfile \
.
fi
done
done
done
| true |
591610966c3f6ab5a610403c885ffc9a475a26d8 | Shell | bbendick/feedly | /feedly.sh | UTF-8 | 365 | 2.6875 | 3 | [] | no_license | # Export opml from feedly
# Convert from opml to json (at https://codebeautify.org/opmlviewer)
cat feedly.json | jq -rc '.opml.body.outline[]' | while IFS='' read stack; do
ccc=$(echo $stack | jq -r ._text);
echo "### ${ccc}";
jq --arg gg "$ccc" -r '.opml.body.outline[] | select(._text == $gg) | .outline[] | "- [\(._title)](\(._htmlUrl))"' feedly.json;
done
| true |
4ffce133871097f0069dcca35ae4c651e86e3d18 | Shell | jotapepinheiro/projeto-saas | /docker/nginx/startup.sh | UTF-8 | 724 | 2.65625 | 3 | [] | no_license | #!/bin/bash
# if [ ! -f /etc/nginx/ssl/_CONTAINER_DOMAIN_.crt ]; then
# openssl genrsa \
# -out "/etc/nginx/ssl/_CONTAINER_DOMAIN_.key" 2048
# openssl req -new -key "/etc/nginx/ssl/_CONTAINER_DOMAIN_.key" \
# -out "/etc/nginx/ssl/_CONTAINER_DOMAIN_.csr" \
# -subj "/CN=_CONTAINER_DOMAIN_/O=_CONTAINER_DOMAIN_/C=BR" \
# -addext "subjectAltName=DNS:_CONTAINER_DOMAIN_" \
# -addext "certificatePolicies=1.2.3.4"
# openssl x509 -req -days 365 \
# -in "/etc/nginx/ssl/_CONTAINER_DOMAIN_.csr" \
# -signkey "/etc/nginx/ssl/_CONTAINER_DOMAIN_.key" \
# -out "/etc/nginx/ssl/_CONTAINER_DOMAIN_.crt"
# fi
# Start crond in background
crond -l 2 -b
# Start nginx in foreground
nginx
| true |
55424e54547342cb7259030aa60101d396bd43fb | Shell | mrosata/new | /bin/destroy-new | UTF-8 | 1,495 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
################################################################################
####
#### bin/destroy-new.sh
#### @package: "New"
#### @author: mrosata
#### @date: 10-2016
#### @desc: This file is sourced from the blueprints package. It should
#### not be used as a stand alone executable. It will completely
#### remove a "new" blueprints templates folder from system.
####
EDITOR=${EDITOR:-vi}
PAGER=${PAGER:-less}
new_destroy_folder="${blueprint_folder:-NEW_TEMPLATES_DIR/$blueprint}"
if [ -z "$blueprint" ];then
exit_with_code $E_MISS
fi
if [ -d $new_destroy_folder ];then
# PROMPT THE USER IF THEY WANT TO OVERWRITE/EDIT/EXIT
echo -e "\e[1m About to completely remove $running_mode template $blueprint!\e[21m"
echo -n " - Are you sure? -press y/n then [ENTER]"
read ans
if [ $? != 0 ] || [[ ! $ans =~ ^y(es)?$ ]];then
add_verbose_msg "fewww, that was close, exiting instead of destroying folder"
exit_with_code $EXIT_USER
fi
# Remove
rm -r $new_destroy_folder
# Make sure the directory no longer exists
if [ -d $new_destroy_folder ];then
add_verbose_msg "Unable to remove folder, try 'rm -rf $new_destroy_folder'"
else
add_verbose_msg "Removed template $blueprint"
fi
else
# The directory to destroy doesn't exist
echo " Unable to destroy blueprint \"$blueprint\" because it can't be found!"
echo " - hint: running \`new list\` should display local templates."
fi
| true |
57236989f1e817e6c6c639dfb6c777ad9e93324c | Shell | ohrite/snowflake | /spec/lib/package/yum_package_spec.sh | UTF-8 | 333 | 3.1875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | . spec/spec_helper.sh
describe_module "package/yum_package"
before () {
if [ -z "`which yum`" ]
then
. spec/helpers/stub_yum.sh
fi
}
it_determines_if_a_package_is_installed () {
if yum_package has nonexistent
then
return 1
fi
}
it_installs_a_package () {
yum_package install nginx
yum_package has nginx
}
| true |
427f8f4b302b649c1ee0a6327744f258be1abf9c | Shell | arakawatomonori/covid19-surveyor | /test/env_test.sh | UTF-8 | 1,908 | 3.375 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
. ./lib/test-helper.sh
# .env のファイル存在チェック.
echo "test .env: file non-existing error case"
set +e
rm -f .env-test # わざと消す
msg=`ENV_FILENAME=.env-test source ./lib/env.sh 2>&1`
assert_equal "ENV ERROR: .env-test is required. See .env.sample, README, or Wiki of Repository." "$msg"
set -e
# .env 内パラメーター slack_channel の存在チェック.
echo "test .env: param 'slack_channel' non-existing error case"
set +e
cat << EOF > .env-test
environment=test
slack_token=aaaa
slack__channel=bbbb # slack_channel パラメーター名をわざと間違える
slack_channel_develop=cccc
EOF
msg=`ENV_FILENAME=.env-test source ./lib/env.sh 2>&1`
assert_equal "ENV ERROR: .env-test parameter 'slack_channel' is required. See .env.sample, README, or Wiki of Repository." "$msg"
set -e
# .env 内パラメーター slack_channel_develop の存在チェック.
echo "test .env: OPTIONAL param 'slack_channel_develop' non-existing success case"
cat << EOF > .env-test
environment=test
slack_token=aaaa
slack_channel=bbbb
# slack_channel_develop パラメーターをわざと省略する(このパラメーターは Optional なので省略可)
EOF
msg=`ENV_FILENAME=.env-test source ./lib/env.sh 2>&1`
assert_equal "" "$msg"
ENV_FILENAME=.env-test source ./lib/env.sh
assert_equal "covid19-surveyor-dev" "$slack_channel_develop" # 省略するとデフォルト値が入ってくる.
# .env 内パラメーター正常読み取りチェック
echo "test .env: success case"
cat << EOF > .env-test
environment=test
slack_token=aaaa
slack_channel=bbbb
slack_channel_develop=cccc
EOF
msg=`ENV_FILENAME=.env-test source ./lib/env.sh 2>&1`
assert_equal "" "$msg"
ENV_FILENAME=.env-test source ./lib/env.sh
assert_equal "test" "$environment"
assert_equal "aaaa" "$slack_token"
assert_equal "bbbb" "$slack_channel"
assert_equal "cccc" "$slack_channel_develop"
| true |
a93676a73f504139c0ec1aa2e0945e1690ff3e1e | Shell | multi-io/utils | /cpmydir.sh | UTF-8 | 126 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
SRCDIR=mydir
#FILES=`eval echo "$SRCDIR/$1"`;
FILES="$SRCDIR/$1";
echo FILES=$FILES;
cp $FILES some_other_dir;
| true |
679234f9e8bede6f215aaf0d83a3c8d5bc974fc5 | Shell | kwatraAkshay1995/webCredit123 | /ec2-vpc-alb/terraform/apache.sh | UTF-8 | 503 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# sleep until instance is ready
until [[ -f /var/lib/cloud/instance/boot-finished ]]; do
sleep 1
done
# install apache
yum update
yum install -y httpd
# make sure apache webserver is started and enabled
systemctl start httpd
systemctl enable httpd
# creating a partion and mounting the volume
partled -l
parted mklabel msdos
mkpart primary ext4 0 1G
quit
mkfs.ext4 /dev/xvdb1
mkdir -p /web
mount /dev/xvdb1 /web
echo "/dev/xvdb1 /web ext4 defaults 1 1" >> /etc/fstab
| true |
90e1a2a5427b7173509af73a1e076a41563dc51e | Shell | GPT-RL/impala | /sweep.sh | UTF-8 | 245 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env bash
device=$(redis-cli -h redis RPOP device-queue)
while [ "$device" == "(nil)" ]
do
sleep 1
echo "Waiting for device-queue"
device=$(redis-cli RPOP device-queue)
done
CUDA_VISIBLE_DEVICES="$device" python src/sweep.py $@ | true |
03f3a29eecc6d6c825b4a63d783d9a35f68fe65e | Shell | UndarkAido/IconManager | /bin/prepare.sh | UTF-8 | 3,926 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [ -z ${1+x} ]; then echo "No path was provided."; exit 1; else srcfilein=$1; fi
if [ -z ${2+x} ]; then echo "No destination was provided."; exit 1; else dest=$2; fi
function sameimg(){
if [ -z ${1+x} ]; then echo "No file 1 was provided."; exit 1; else file1=$1; fi
# echo $file1
if [ -z ${2+x} ]; then echo "No file 2 was provided."; exit 1; else file2=$2; fi
# echo $file2
compareresult=$(compare -metric RMSE "$file1" "$file2" :/dev/null 2>&1)
if [ "$compareresult" == "0 (0)" ]; then
return 0
fi
return 1
}
srcfile=$(realpath $srcfilein)
srcname=$(basename $srcfile)
srcext=".${srcname##*.}"
srcname="${srcname%.*}"
srcdir=$(dirname $srcfile)
if [ "$srcext" == ".svg" ]; then
if [ -f "$dest/$srcname[Source].png" ]; then
rm "$dest/$srcname[Source].png"
fi
inkscape --export-png "$dest/$srcname[Source].png" -w 512 "$srcdir/$srcname.svg" > /dev/null 2>&1
elif [ "$srcext" == ".webp" ]; then
if [ -f "$dest/$srcname[Source].png" ]; then
rm "$dest/$srcname[Source].png"
fi
convert "$srcdir/$srcname.webp" -background transparent -gravity center -resize 310x310 -extent 310x310 "PNG32:$dest/$srcname[Source].png"
else
cp "$srcdir/$srcname.png" "$dest/$srcname[Source].png"
fi
##########
filedir=$dest
filename=$srcname
namemod="\[Source\]"
##########
convert "$filedir/$filename$namemod.png" -trim "PNG32:$filedir/$filename[Magick].png"
if sameimg "$filedir/$filename$namemod.png" "$filedir/$filename[Magick].png"; then
rm "$filedir/$filename[Magick].png"
else
namemod="\[Magick\]"
fi
width=$(identify -format '%w' "$filedir/$filename$namemod.png")
height=$(identify -format '%h' "$filedir/$filename$namemod.png")
size=$(($width>$height?$width:$height))
if [ $height -eq $width ]; then
# echo "Already square"
squaremod=$namemod
else
# echo "Not square"
squaremod="[Square]"
dimensions=$size"x"$size
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize $dimensions -extent $dimensions "PNG32:$filedir/$filename$squaremod.png"
fi
if [ $size -gt 70 ]; then
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize 70x70 -extent 70x70 "PNG32:$filedir/$filename[Small].png"
if [ $size -gt 150 ]; then
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize 150x150 -extent 150x150 "PNG32:$filedir/$filename[Medium].png"
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize 310x150 -extent 310x150 "PNG32:$filedir/$filename[Wide].png"
if [ $size -gt 310 ]; then
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize 310x310 -extent 310x310 "PNG32:$filedir/$filename[Large].png"
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize 310 -extent 310x150 "PNG32:$filedir/$filename[Wide2].png"
if sameimg "$filedir/$filename[Wide].png" "$filedir/$filename[Wide2].png"; then
rm "$filedir/$filename[Wide2].png"
fi
else
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -extent 310x310 "PNG32:$filedir/$filename[Large].png"
fi
else
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -extent 150x150 "PNG32:$filedir/$filename[Medium].png"
fi
else
convert "$filedir/$filename$namemod.png" -background transparent -gravity center -extent 70x70 "PNG32:$filedir/$filename[Small].png"
fi
#convert "$filedir/$filename$namemod.png" -background transparent -gravity center -resize 256x256 -extent 256x256 "PNG32:$filedir/$filename[Icon256].png"
#convert "$filedir/$filename[Icon256].png" -define icon:auto-resize=256,192,128,96,64,48,32,24,16 "$filedir/$filename.ico"
#rm "$filedir/$filename[Icon256].png"
exit 0
| true |
739047ef49bb264b3d50035d4cd2a343c0b9db8c | Shell | MrDreamBot/arm64-docker-binaries | /install.sh | UTF-8 | 1,266 | 3.984375 | 4 | [] | no_license | #!/bin/bash
if [ `whoami` != 'root' ]; then
echo You have to be root to run this script.
exit 1
fi
if [ $# -gt 0 ]; then
if [ $1 != "-s" ]; then
echo Invalid command line option: $1
echo Usage: $0 [-s]
exit 2
fi
echo Entering simulation mode...
ECHO=echo
else
echo Starting installation...
fi
echo Copying docker binaries...
$ECHO cp ./bin/docker /usr/bin/
$ECHO cp ./bin/docker-containerd /usr/bin/
$ECHO cp ./bin/docker-containerd-ctr /usr/bin/
$ECHO cp ./bin/docker-containerd-shim /usr/bin/
$ECHO cp ./bin/dockerd /usr/bin/
$ECHO cp ./bin/docker-proxy /usr/bin/
$ECHO cp ./bin/docker-runc /usr/bin/
echo Checking existence of docker group
if [ ! `grep docker /etc/group` ]; then
echo Adding docker group and adding root to the docker group...
$ECHO groupadd docker
$ECHO usermod -aG docker root
echo Setting up systemd
$ECHO cp docker.service /lib/systemd/system/
$ECHO ln -s /lib/systemd/system/docker.service /etc/systemd/system/multi-user.target.wants/docker.service
$ECHO cp docker /etc/init.d/
else
echo docker group already exists, skipping set up of systemd docker.service
fi
echo When you are ready, reboot the system.
docker.service can be controlled using the command:
echo sudo systemctl start|sop docker.service
| true |
24cbadd904601a58637a9f23292a411721bb6a49 | Shell | csebesta/dotfiles | /scripts/.bin/voice | UTF-8 | 267 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# To run: voice.sh "Hello world!"
# To read file contents: voice.sh "$(cat <filename)"
# Valid languages: en-US, en-GB, de-DE, es-ES, fr-FR, it-IT
pico2wave -l=en-GB -w=/tmp/voice.wav "$1"
paplay /tmp/voice.wav 2> /dev/null
rm /tmp/voice.wav 1> /dev/null
| true |
72670ae5fbd108779a27d58186faf46cc39ca06d | Shell | embarktrucks/sour | /run | UTF-8 | 788 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
IMAGE="sour:dev"
color_blue() {
echo -e "\e[34m"$1"\e[0m"
}
image_exists() {
docker image ls \
--format "{{ .Repository}}:{{ .Tag }}" $IMAGE \
| grep $IMAGE 2>&1 > /dev/null
}
if ! image_exists; then
make dev -C "$SCRIPT_DIR"
else
time=$(docker image inspect $IMAGE -f "{{ .Metadata.LastTagTime }}")
out="Using $IMAGE. Last built at $time.\n"
echo -e $(color_blue "$out")
echo $(color_blue "Run 'make dev' to rebuild.")
fi
if [ "$#" -eq 0 ]; then
EXEC="/bin/bash"
else
EXEC="$@"
fi
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
docker run --rm \
--network=sour_default \
--init \
-v "$(pwd)":/app/sour \
-w "/app/sour" \
-it sour:dev \
$EXEC
| true |
9b20b6cf2fe7def9583d8af0a55d938cbd237a1b | Shell | jfgreen/renoise-tools | /tasks | UTF-8 | 909 | 3.703125 | 4 | [] | no_license | #!/bin/bash
set -Eeo pipefail
lua_version=5.1
if [ "$#" -ne 2 ]; then
echo "Usage: $0 TASK TOOL-DIR"
exit 1
fi
function set_lua_path {
eval $(luarocks path --lua-version ${lua_version})
}
function get_package_id {
xmllint --xpath "string(//RenoiseScriptingTool//Id)" $1/src/manifest.xml
}
task="$1"
tool_dir="$2"
case "${task}" in
lint)
set_lua_path
luacheck --std lua51+busted+renoise $tool_dir
;;
test)
set_lua_path
busted \
--directory=$tool_dir \
-p test_ \
-m "./src/?.lua" \
-m "./tests/asserts/?.lua" \
tests
;;
package)
archive_name="$(get_package_id $tool_dir).xrnx"
echo "Packaging ${tool_dir} into ${archive_name}"
zip -jr $archive_name $tool_dir/src
;;
*)
echo "Invalid task: '${task}'"
exit 1
;;
esac
set +e
| true |
0bbf5286643ae2d6121e25a6c6a880a3aa06b10d | Shell | testthedocs/rakpart | /ttd-ts/dockerfiles/entrypoint.sh | UTF-8 | 436 | 3.125 | 3 | [
"MIT",
"GPL-2.0-only"
] | permissive | #!/bin/bash
set -eo pipefail
# Check if we have a index.rst, if not exit
file="/build/docs/index.rst"
if [ -f "$file" ]
then
:
else
echo "Can't detect a index.rst, please make sure you have one!"
echo "Stopping now..."
exit 1
fi
#exec su-exec $@
# Re-set permission to the `ttd` user
# This avoids permission denied if the data volume is mounted by root
chown -R ttd /build
exec su-exec ttd /usr/local/bin/testts.sh
| true |
58ad5ac3ae9891f9279467cfad37fddeb2c9f044 | Shell | ordenador/ordevutils | /shells/cambio_horario/cambia_hora_linux.sh | UTF-8 | 1,759 | 3.796875 | 4 | [] | no_license | #!/usr/bin/ksh
#Vars
cuenta=0
CMD_SSH="ssh -n -o PasswordAuthentication=no"
LOGS="salida_cambio_hora.txt"
zona_chile_ver='CLST'
zona_chile_inv='CLT'
FECHA=`date +"%d%m%y"`
NODOS_LINUX="nodos_linux.txt"
echo "La fecha es: ${FECHA}"
cuenta=`cat ${NODOS_LINUX} | wc -l`
echo "Son: "$cuenta" Maquinas"
cuenta=$(( cuenta - 0 ))
while [ $cuenta -ge 1 ]; do
line=`cat ${NODOS_LINUX} | tail -${cuenta} |head -1`
echo $line
os=`${CMD_SSH} ${line} uname -s`
if [ $os = "Linux" ]; then
zona_maquina=`${CMD_SSH} ${line} date +"%Z"`
if [ $zona_chile_ver = $zona_maquina ] || [ $zona_chile_inv == $zona_maquina ]; then
echo "La maquina ${line} Tiene zona horaria ${zona_maquina}, SI se hace cambio" >> $LOGS
echo $line
echo $line >> $LOGS
$CMD_SSH $line cp /usr/share/zoneinfo/America/Santiago /usr/share/zoneinfo/America/Santiago.ori.$FECHA
$CMD_SSH $line ls -ltr /usr/share/zoneinfo/America/Santiago.ori*
scp hora_2013.txt $line:/root
$CMD_SSH $line zic /root/hora_2013.txt
$CMD_SSH $line mv /etc/localtime /etc/localtime.ori.$FECHA
$CMD_SSH $line "ls -ltr /etc/localtime.ori.$FECHA" >> $LOGS
$CMD_SSH $line ln -s /usr/share/zoneinfo/America/Santiago /etc/localtime
$CMD_SSH $line ls -ltr /etc/localtime >> $LOGS
$CMD_SSH $line zdump -v /etc/localtime | grep 2013 >> $LOGS
else
echo "La máquina ${line} NO tiene zona horaria CLST o CLT, NO se hace cambio" >> $LOGS
echo "Tiene Zona Horaria: ${zona_maquina}" >> $LOGS
fi
else
echo "La maquina ${line} no es Linux o no es posible conectarse, NO se hace cambio" >> $LOGS
fi
echo Fin >> $LOGS
cuenta=$(( cuenta - 1 ))
done
| true |
c466a1d65bc6f3fda6181b2fb482afd66557fc41 | Shell | lncdBot/ANTISTATELONG | /VoxelwiseHLM/HLMimages/makeMovie.sh | UTF-8 | 1,290 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
################################
# rename output images and make movie
# suma continues to number from last file so male-top ends at 9 and female-top starts at 10, ffmpeg doesn't like this
# list all jpgs from suma, get extenion, and mv to sequentually named
# ffmpeg likes sequntial series starting at 1
#
echo "$1"
cd movieStills
#ls *jpg | cut -f1 -d. | uniq | while read ext; do i=1; for f in $ext*jpg; do echo mv $f $ext-$i.jpg; let i++ ; done; done;
#ls *00*.jpg | cut -f1 -d. | uniq | while read ext; do
## suddenlty things appear to be numbered correctly
#ffmpeg -y image2 -i "$ext-%d.jpg" -o $ext.avi
# thres/sex/hemisphere
for dir in */*/*/; do
pushd $dir
#
# rename
ls *00*.jpg | cut -f1 -d. | uniq | while read ext; do
echo $ext
i=1;
for f in $ext*jpg; do
mv $f $ext-$i.jpg;
let i++ ;
done;
done
ls *-*.jpg | cut -f1 -d- | uniq | while read ext; do
echo -e "\n\n\n**** $ext ($dir)"
#[[ -r $ext.avi && "$1"!="-f" ]] && continue # only build new movies **** not always desired
echo ffmpeg -r 1 -i "'$ext-%d.jpg'" "'$ext.avi'"
echo
echo
# overwrite, one second interval, input is jpegs, output is *avi
ffmpeg -y -r 1 -i "$ext-%d.jpg" -vcodec mjpeg "$ext.avi"
done
popd
done
| true |
e50f1d47037faf5c4c92b2931c767206c24fb16b | Shell | kelsin/dotfiles | /zsh/.zsh/prompt.zsh | UTF-8 | 3,495 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env zsh
git_symbol=`echo -e '\ue0a0'`
node_symbol=`echo -e '\ue718'`
ruby_symbol=`echo -e '\ue791'`
python_symbol=`echo -e '\ue73c'`
lambda_symbol=`echo -e '\u03bb'`
k8s_symbol=`echo -e '\u2388'`
git_arrows() {
prompt_git_arrows=
command git rev-parse --abbrev-ref @'{u}' &>/dev/null || return
local arrow_status
# check git left and right arrow_status
arrow_status="$(command git rev-list --left-right --count HEAD...@'{u}' 2>/dev/null)"
# exit if the command failed
(( !$? )) || return
# left and right are tab-separated, split on tab and store as array
arrow_status=(${(ps:\t:)arrow_status})
local arrows left=${arrow_status[1]} right=${arrow_status[2]}
(( ${right:-0} > 0 )) && arrows+="%F{green}⇣%f"
(( ${left:-0} > 0 )) && arrows+="%F{yellow}⇡%f"
[[ -n $arrows ]] && prompt_git_arrows="${arrows}"
}
precmd_functions+=(git_arrows)
autoload -Uz vcs_info
zstyle ':vcs_info:*' enable git svn
zstyle ':vcs_info:git*' formats "%b"
zstyle ':vcs_info:git*' actionformats "%b|%a"
precmd_functions+=(vcs_info)
git_branch() {
prompt_git_branch=
if git rev-parse --is-inside-work-tree &>/dev/null; then
if git diff-index --cached --quiet HEAD &>/dev/null; then
if git diff --no-ext-diff --quiet --exit-code &>/dev/null; then
if [ -n "$(git ls-files --others --exclude-standard)" ]; then
branch_symbol="%F{yellow}`echo -e '\u2605'`%f"
else
branch_symbol="%F{green}`echo -e '\u2713'`%f"
fi
else
branch_symbol="%F{blue}`echo -e '\u271a'`%f"
fi
else
branch_symbol="%F{red}`echo -e '\u2731'`%f"
fi
prompt_git_branch=" $vcs_info_msg_0_%f $branch_symbol $prompt_git_arrows "
fi
}
precmd_functions+=(git_branch)
[[ "$SSH_CONNECTION" != '' ]] && prompt_username='%F{green}%n%f@%F{yellow}%m%f '
[[ $UID -eq 0 ]] && prompt_username='%F{red}%n%f '
prompt_status() {
print -P "%F{blue}%~%f\$prompt_git_branch"
}
precmd_functions+=(prompt_status)
k8s_context() {
kubectl config current-context 2>/dev/null
}
k8s_namespace() {
kubectl config view --minify --output 'jsonpath={..namespace}' 2>/dev/null
}
k8s_prompt() {
local context="${$(k8s_context):-none}"
[[ "$context" != 'none' ]] && echo " %F{blue}$k8s_symbol%f ${context}:${$(k8s_namespace):-default}"
}
python_prompt() {
local conda_name="${CONDA_DEFAULT_ENV:-base}"
if [[ "$conda_name" != 'base' ]]; then
echo " %F{blue}$python_symbol%f conda:${conda_name}"
return
fi
local venv_name="${$(basename "${VIRTUAL_ENV}"):-system}"
if [[ "$venv_name" != 'system' ]]; then
echo " %F{blue}$python_symbol%f ${venv_name}"
return
fi
if [[ $+commands["pyenv"] ]]; then
local pyenv_name="${$(pyenv version-name):-system}"
[[ "$pyenv_name" != 'system' ]] && echo " %F{blue}$python_symbol%f ${pyenv_name}"
fi
}
ruby_prompt() {
local rbenv_name="${$(rbenv version-name):-system}"
[[ "$rbenv_name" != 'system' ]] && echo " %F{red}$ruby_symbol%f ${rbenv_name}"
}
node_prompt() {
local nodenv_name="${$(nodenv version-name):-system}"
[[ "$nodenv_name" != 'system' ]] && echo " %F{green}$node_symbol%f ${nodenv_name}"
}
setopt prompt_subst
export PROMPT="$prompt_username%(?.%F{magenta}.%F{red})${lambda_symbol}%f "
export PROMPT2="%F{cyan}%_❯%f "
export PROMPT3="%F{cyan}?❯%f "
export PROMPT4="%F{red}+%N:%i❯%f "
export RPROMPT="\${\$(python_prompt)}\${\$(ruby_prompt)}\${\$(node_prompt)}\${\$(k8s_prompt)}"
export PROMPT_EOL_MARK="%F{red}↵%f"
| true |
07a72decac406deb5741de150c8091604d4d526f | Shell | rongfengliang/hashids.js | /add-extension.sh | UTF-8 | 205 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
declare file="esm/hashids.js"
declare file_contents
file_contents=$(<$file)
declare replacement="from './util.js'"
echo "${file_contents//from \'.\/util\'/${replacement}}" >"$file"
| true |
e48c8260b77c1c874d244227bba7421160c9d0e7 | Shell | heartshare/my-cap | /bin/libxml2.sh | UTF-8 | 211 | 2.640625 | 3 | [
"X11-distribute-modifications-variant"
] | permissive | #!/bin/bash
export VERSION=2.7.8
. `dirname $0`/functions.sh
setup /usr/local/include/libxml2/libxml/parser.h "echo $VERSION"
download ftp://xmlsoft.org/libxml2/libxml2-$VERSION.tar.gz
build libxml2-$VERSION
| true |
46d96beff04ceedc090595884f214c87d5e3129f | Shell | UVA-PCL/hadoop-cloud-rebalance-scripts | /scripts/setup-local | UTF-8 | 1,102 | 3.234375 | 3 | [] | no_license | #!/bin/bash
set -ex
. $(dirname $0)/config.sh
for i in "${EXPERIMENT_IDS[@]}"; do
mkdir -p ${EXPERIMENT_DIRS[$i]}
mkdir -p ${EXPERIMENT_DIRS[$i]}/log
tar -C ${EXPERIMENT_DIRS[$i]} -xf $EXPERIMENT_HADOOP_TGZ
rm -f ${EXPERIMENT_DIRS[$i]}/hadoop
ln -s $EXPERIMENT_HADOOP_TGZ_DIR ${EXPERIMENT_DIRS[$i]}/hadoop
ln -s $EXPERIMENT_HADOOP_TGZ_DIR ${EXPERIMENT_DIRS[$i]}/namenode
rm -rf ${EXPERIMENT_DIRS[$i]}/scripts
rm -rf ${EXPERIMENT_DIRS[$i]}/hadoop_tmp
cp -r $EXPERIMENT_DIR/scripts ${EXPERIMENT_DIRS[$i]}/scripts
for file in $(ls $EXPERIMENT_DIR/hadoop-configs-single-template); do
sed \
-e "s!@EXPERIMENT_DIR@!${EXPERIMENT_DIRS[$i]}!g" \
-e "s/@DATANODE_PORT@/$(($i + 40000))/g" \
-e "s/@DATANODE_HTTP_PORT@/$(($i + 41000))/g" \
-e "s/@DATANODE_IPC_PORT@/$(($i + 42000))/g" \
< $EXPERIMENT_DIR/hadoop-configs-single-template/$file \
> ${EXPERIMENT_DIRS[$i]}/namenode/etc/hadoop/$file
done
done
for i in 0; do
${EXPERIMENT_DIRS[$i]}/namenode/bin/hdfs namenode -format
done
| true |
1a5ba8359c0d6eddb02a88c8392f42d8bb7aa542 | Shell | k3nno88/forcrawler | /parse_reserved.sh | UTF-8 | 1,666 | 3.359375 | 3 | [] | no_license | #!/bin/zsh
# This script goes through all the pages contain houses that have reserved status
export PATH=/home/uh/anaconda3/bin:$PATH
source ~/anaconda3/etc/profile.d/conda.sh
#conda init zsh
conda activate base
cd ~/reserved
~/refresh_vpn.sh
for i in {$1..$2}
do
echo "Parsing page $i"
line=$(timeout 10 lynx -connect_timeout=10 --source https://www.funda.nl/koop/heel-nederland/in-onderhandeling/sorteer-datum-af/p$i > ~/house/some_random_house.html | wc -l)
#echo "$line results"
while [[ $line -lt 2000 ]] ; do
echo "Retrying"
~/refresh_vpn.sh
sleep 0.5
echo "Parsing page $i"
line=$(timeout 10 lynx -connect_timeout=10 --source https://www.funda.nl/koop/heel-nederland/in-onderhandeling/sorteer-datum-af/p$i > ~/house/some_random_house.html | wc -l)
#echo "$line results"
done
scrapy crawl reserved > /dev/null 2>&1 # Scraping content and save its in reserved table
nordvpn_refresh=$i%3
if [[ $nordvpn_refresh -eq 0 ]]
then
echo "Refreshing vpn connection"
~/refresh_vpn.sh
sleep 0.5
else
sleep 0.5
fi
done
echo "Update house"
mysql --login-path=server -s -N scraping_data -e "UPDATE funda f INNER JOIN reserved r ON f.url = r.url SET f.status=r.status, f.VerkochtOnderVoorbehoud = IF(ISNULL(f.VerkochtOnderVoorbehoud), r.VerkochtOnderVoorbehoud, f.VerkochtOnderVoorbehoud), f.Onderbod = IF(ISNULL(f.Onderbod), r.Onderbod, f.Onderbod);"
echo "Truncate table, scraping finished"
mysql --login-path=server -s -N scraping_data -e "TRUNCATE TABLE reserved"
| true |
9350e1996517f2b321a77d6a8026cadc1d922adf | Shell | rdmenezes/exodusdb | /swig/share/perl/clean.sh | UTF-8 | 434 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
export SWIG_SYNTAX="Syntax is ./clean.sh"
export SWIG_TARGET=$1
source config.sh
if [ "$SWIG_TARGET" == "all" ]; then
for SWIG_TARGET in $SWIG_ALL_TARGETS; do
./clean.sh $SWIG_TARGET
done
echo "all clean"
exit 0
fi
echo
echo ------------------
echo $SWIG_TARGET clean
echo ------------------
test -d $SWIG_TARGET || exit 0
echo -ne "Removing: `pwd`/$SWIG_TARGET: "
rm -rf $SWIG_TARGET && echo "removed."
| true |
93fd120f1888080613f34aaed34fa6efc70e5e2a | Shell | rasa/dotfiles-3 | /.config/zsh/plugins/devel/brew.plugin.zsh | UTF-8 | 296 | 3.25 | 3 | [] | permissive | #!/bin/zsh
case $OSTYPE:l in
linux*)
# linuxbrew (for git builds)
function () {
local brew_path="$HOME/.linuxbrew"
[[ -d "$brew_path" ]] || return
add-suffix-paths "$brew_path"
# always compile from source, using distro libc
export HOMEBREW_BUILD_FROM_SOURCE=1
}
;;
esac
| true |
fc8c2b1ae524e94854c16280372e425df6c4b7cc | Shell | bennapp/mavencraft | /scripts/backup.sh | UTF-8 | 594 | 3.09375 | 3 | [] | no_license | #!/bin/sh
# makes backups and invokes overviewer
set -e
#TODO: make backup dir argument
DATE=`date +%m-%d-%Y-%H-%M-%Z`
BACKUP=/opt/backup/minecraft-world-backup-$DATE
screen -r minecraft -x -p 0 -X stuff "say $DATE\n"
sleep 1
screen -r minecraft -x -p 0 -X stuff "save-off\n"
sleep 1
screen -r minecraft -x -p 0 -X stuff "save-all\n"
sleep 2
cp -p -r /opt/minecraft/world $BACKUP
screen -r minecraft -x -p 0 -X stuff "say $DATE\n"
sleep 1
screen -r minecraft -x -p 0 -X stuff "save-on\n"
sh ~/mavencraft/scripts/overviewer.sh
find /opt/backup -maxdepth 1 -type d -ctime +2 | xargs rm -rf
| true |
046fde2783b31db9eff7e7cc25fc6c9b52f30f1c | Shell | yyfeisky/uenc_src | /tools/gen_proto_python.sh | UTF-8 | 357 | 3.328125 | 3 | [] | no_license | #!/bin/bash
SHDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PROTODIR=${SHDIR}/../proto/src
PROTOC=${SHDIR}/protoc/protoc
OUTDIR=${SHDIR}/../proto/python
for proto in `ls $PROTODIR`
do
echo "processing--->"${proto}
$PROTOC -I=$PROTODIR --python_out=$OUTDIR ${proto}
done
# -------------
# for proto in `find $PROTODIR -name *.proto`
| true |
c937c2687786a46d1ee39b4b7d13b99055ee8412 | Shell | Robert512/xmind- | /Shell/变量高级用法/minlingtihuan.sh | UTF-8 | 412 | 3.515625 | 4 | [] | no_license | #!/bin/bash
# 命令替换
# `command`
# $(command)
index=1
for user in `cat ./users.txt | cut -d ":" -f 1`
do
echo "This is $index user:" $user
index=$(($index+1))
done
# $(())算术运算
date +%Y
echo "This is $(($(date +%Y)+1)) year"
date +%j
echo "This year have passed $(date +%j) days"
echo "This year have passed $(($(date +%j) / 7)) weeks"
echo "This year $(((365-$(date +%j)) / 7)) weeks left." | true |
6b5c0d29d52dc777c59d9390150cf24add2b844b | Shell | bigzhu/bash_tools | /install.sh | UTF-8 | 979 | 3.84375 | 4 | [] | no_license | #! /bin/bash
force=false
help(){
echo "Usage: install.sh file_name [-f]"
echo ""
echo "Flags:"
echo " -f"
echo " force install if File exists(will repalce!)"
echo " -h"
echo " show help info"
echo ""
echo "Example:"
echo " one file:"
echo " install.sh ssh.py"
echo " multiple file:"
echo " install.sh *.py"
echo " force install:"
echo " install.sh *.py -f"
}
# check is need force install
for param in "$@"
do
if [ $param = "-f" ]; then
#echo "$param force is true"
force=true;
elif [ $param = "-h" ]; then
help;
fi
done
if [ $1 ];then
for param in "$@"
do
if [ $param != "-f" ] && [ $param != "-h" ]; then
if [ $force = true ];then
echo "force install $param to /usr/local/bin/";
sudo ln -sf ${PWD}/$param /usr/local/bin/
else
echo "install $param to /usr/local/bin/";
sudo ln -s ${PWD}/$param /usr/local/bin/
fi
fi
done
else
help;
fi
| true |
a00c906fa89a434cc554156d69adf4966104cbac | Shell | dereckrx/workstation-setup | /files/bash_profile.template.bash | UTF-8 | 924 | 3.125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#### DONT EDIT THIS FILE ####
## Thu Aug 9 2018 8:06 PM
# This file overwrites the default bash-it .bash_profile template
#To show the available aliases/completions/plugins, type one of the following:
# bash-it show aliases
# bash-it show completions
# bash-it show plugins
# Path to the bash it configuration
export BASH_IT="{{BASH_IT}}"
# Lock and Load a custom theme file
# location /.bash_it/themes/
export BASH_IT_THEME='bobby'
# Set this to false to turn off version control status checking within the prompt for all themes
export SCM_CHECK=true
# Load Bash It
source "$BASH_IT"/bash_it.sh
## Sun May 6 2018 9:47 PM Stuff from original bash_profie
# Don't add to this file, instead add to either .profile or .bashrc
# Load .profile, containing login, non-bash related initializations.
source ~/.profile
# Load .bashrc, containing non-login related bash initializations.
source ~/.bashrc | true |
078528708220fff085ee22a50a634bbcd90f0c00 | Shell | tox2ik/local-bin | /balmora-snapshots | UTF-8 | 5,343 | 3.5 | 4 | [] | no_license | #!/bin/bash
#
# Inspiration / etc
#
# - Mark Merlin (script)
# http://marc.merlins.org/perso/btrfs/
# post_2014-03-22_Btrfs-Tips_-Doing-Fast-
# Incremental-Backups-With-Btrfs-Send-and-Receive.html
#
# - Martin Wilck
# clone everything to fresh btrf: https://github.com/mwilck/btrfs-clone
#
# check out
# - https://github.com/mwilck/btrfs-clone
# - https://github.com/Thomas-Tsai/partclone
#
# maybe read
# - https://helmundwalter.de/en/blog/next-gen-backup-with-btrfs-snapshots-for-root-fs-and-databases/#
# - https://github.com/csirac2/snazzer/#snazzer
#
# Dependencies
#
# - sys-apps/util-linux (/bin/mountpoint)
if [[ $USER != root ]]; then
exec sudo $0 "$@"
fi
mountpoint -q /ss || mount /ss || exit 124
mountpoint -q /B/rr || mount /B/rr || exit 124
if [[ $1 = ls ]]; then
for i in /ss /B/rr; do sudo btrfs subvolume list -at --sort=path $i | awk '{ print $(NF) }' ; done |
grep -e rr -e ss | grep -v FS_T
exit 0
fi
if [[ $UID -ne 0 ]]; then
echo sudo $0
exit 1
fi
DEBUG=${DEBUG:-}
# set -x
set -o nounset
set -o errexit
set -o pipefail
die () {
set +x
msg=$*
trap '' ERR
echo >&2
echo "$msg" >&2
# This is a fancy shell core dumper
if echo $msg | grep -q 'Error line .* with status'; then
line=`echo $msg | sed 's/.*Error line \(.*\) with status.*/\1/'`
nl -ba $0 | grep -3 "\b$line\b" >&2
fi
touch ~/__________________CHECK_BACKUPS__.$(date +%Y-%m-%d).balmora-snapshots
if [ -o errexit ]
then
exit 55
fi
}
trap 'die "Error line $LINENO with status $?"' ERR
ymd=$(date +%Y-%m-%d)
mkdir -p /ss/$ymd /ss/last /B/rr/$ymd
btrfs filesystem show /ss > /dev/null || exit 124
btrfs filesystem show /B/rr > /dev/null || exit 124
declare -A mp=(
[/T]=T
['/']=slash
[/home]=home
[/srv]=srv
)
##
## Create daily snapshots to avoid losing data
##
for mount in ${!mp[@]}; do
snap=${mp[$mount]}
if [[ ! -e /ss/$ymd/$snap ]]; then
[[ -n $DEBUG ]] && echo b s snap-r $mount /ss/$ymd/$snap
btrfs subvolume snapshot -r $mount /ss/$ymd/$snap &&
ln -snf ../$ymd/$snap /ss/last/$snap;
else
[[ -n $DEBUG ]] && echo have /ss/$ymd/$snap
fi
done
sync
##
## Send snapshots to a secondary volume for redundancy
##
for mount in ${!mp[@]}; do
snap=${mp[$mount]}
#prev_local=$(ls -dv1 /ss/*/$snap | grep $(realpath /ss/last/$snap) -B1 | sed 1q)
prev_local=$( printf "%s\n" /ss/*/$snap | sort | grep $(realpath /ss/last/$snap) -B1 | sed 1q)
last_remote=$( { ls -dv1 /B/rr/*/$snap | tail -n1; } || echo seed)
if [[ -n $DEBUG ]]; then
printf "prev_local..:%30s\n" $prev_local
printf "last_remote.:%30s\n" $last_remote
fi
if [[ ! -e /B/rr/$ymd/$snap ]]; then
if [[ $last_remote == seed ]]; then
#set -x
btrfs send /ss/$ymd/$snap | btrfs receive /B/rr/$ymd/
else
rrs=/tmp/backups.err.$ymd.send.$$
rrr=/tmp/backups.err.$ymd.recv.$$
echo errors in $rrs $rrr
declare -a ps=()
function incremental {
#set -x
echo ":incr: btrfs send -v -p $prev_local /ss/$ymd/$snap | btrfs receive -v /B/rr/$ymd " | tee $rrr $rrs
btrfs send -v -p $prev_local /ss/$ymd/$snap 2>$rrs | btrfs receive -v /B/rr/$ymd 2>$rrr
ps=( "${PIPESTATUS[@]}" )
set +x
}
#function reseed {
# reseed should find the latest possible snapshot and use that as the parent
# this is useful if backups fail 4 days in a row or something.
#
# so on the 20th we should use 16th as the partent if 17-18-19 are missing.
#
# an exception like this we need to re-transmit from even earlier
##
## receiving snapshot home uuid=776adb95-9f15-8748-be6a-55a4255808c3, ctransid=1408097 parent_uuid=3a19a0e4-ded6-7441-988d-c786c4abd697, parent_ctransid=1405312
## ERROR: cannot find parent subvolume
##
## This means that the remote end hoes not have the parent volume you have specified on the source.
##
## eg.
# btrfs send -v -p /ss/2023-04-15/home /ss/2023-04-20/home | btrfs receive -v /B/rr/2023-04-20
# btrfs send -v -p /ss/2023-04-15/srv /ss/2023-04-20/srv | btrfs receive -v /B/rr/2023-04-20
# btrfs send -v -p /ss/2023-04-15/T /ss/2023-04-20/T | btrfs receive /B/rr/2023-04-20
# btrfs send -v -p /ss/2023-04-15/slash /ss/2023-04-20/slash | btrfs receive /B/rr/2023-04-20
function reseed {
# $ errno `imath 141-128`
# EACCES 13 Permission denied
if [[ ${ps[0]} -ne 0 ]]; then
export DEBUG=1
echo -e "\e[33;01m"
echo -e "remote end failed ${ps[0]}"
echo -e "\e[0m"
eval $(grep -oE '[a-z_]+=[0-9a-z-]+' $rrr)
#... if ${ps[0]} == 141
parent=$(btrfs subvolume list -sru /ss \
| awk -v u=$parent_uuid '$(NF-2) == u { print $NF }')
#set -x
parent_date=${parent%%/*}
echo ":reseed: btrfs send /ss/$parent | btrfs receive /B/rr/$parent_date/"
mkdir -p /B/rr/$parent_date
btrfs send /ss/$parent | btrfs receive /B/rr/$parent_date/
set +x
incremental
exit ${ps[0]}
fi
}
set +o errexit
incremental
reseed
set -o errexit
fi
set +x
else
if [[ -n $DEBUG ]]; then
date '+present.....: %Y-%m-%d (snapshot exists)'
fi
fi
if [[ -n $DEBUG ]]; then
echo
fi
done
umount /ss /B/rr || true
| true |
aac05b8a2101c22cd88c816f9dc3043a02a64850 | Shell | BackupTheBerlios/clc-svn | /x86/tags/CRUX-2_1/contrib/ipsec-tools/setkey | UTF-8 | 349 | 3.5625 | 4 | [] | no_license | #!/bin/sh
#
# /etc/rc.d/setkey: setup IPsec SA/SP database
#
SETKEY=/usr/sbin/setkey
CONF=/etc/racoon/setkey.conf
case $1 in
start)
test -e $CONF && $SETKEY -f $CONF
;;
stop)
$SETKEY -F
$SETKEY -F -P
;;
restart)
$0 stop
$0 start
;;
dump)
$SETKEY -D
$SETKEY -D -P
;;
*)
echo "usage: $0 start|stop|restart|dump"
;;
esac
# End of file
| true |
46950c8a163aaac7d23a4e24d804e413fcac8182 | Shell | BlissfulBlue/linux_cprogramming | /terminate.sh | UTF-8 | 386 | 3.046875 | 3 | [] | no_license | #!/bin/bash
# echo this is the first parameter: $1
# echo this is the second parameter: $2
# echo this is the third parameter: $3
# create variable function to cat out instance id
idinstance=$(cat ipad.json | jq -r '.Instances[].InstanceId')
# print instance id
echo $idinstance
# use variable (instance id) terminate instance
aws ec2 terminate-instances --instance-ids $idinstance
| true |
d380649453413e9eec3ad788cf2fa3a73594aaed | Shell | zbigg/axl | /misc/bkupdchangelog | UTF-8 | 719 | 3.53125 | 4 | [] | no_license | #!/bin/sh
############################################################
#
# Update static ChangeLog from bk changes !
#
# Id: $Id: bkupdchangelog 1.1 04/01/28 21:18:12+01:00 zbigg@nowy.dom $
#
# Author: Zbigniew Zagorski <zbig@pentagon.eu.org>
#
############################################################
FILE=${1-ChangeLog}
[ -f $FILE ] || { echo "$0: $FILE don't exists" >&2 ; exit 1 ; }
LV=`cat $FILE | grep ChangeSet@ |sed -e 's/.*ChangeSet@\([0-9.][0-9.]*\).*/\1/' | head -n 1`
echo "Last changeset :$LV" >&2
LN=`bk changes -r$LV | wc -l`
bk changes -r$LV.. > TMP
XN=`cat TMP | wc -l`
HN=$[ $XN - $LN - 1 ]
cat TMP | sed -e 's/^\(.\)/ \1/'| head -n $HN > $FILE.new
rm -f TMP
echo >> $FILE.new
cat $FILE >> $FILE.new
mv $FILE.new $FILE
| true |
45d1605c9b7e9fbdacb43740e864a543de711613 | Shell | anviar/scripts | /magento_clone/shadowcopy_cut.sh | UTF-8 | 1,899 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# Perferences
WEBROOT=~/public_html/domain.com
WORKDIR=~/shadowcopy
SHOST=someftphost.com
ftpuser=someftpuser
ftppass=somepassword
db=somedatabase
dbuser=somedbuser
dbpass=somedbpass
sqlfile=domain.sql.bz2
datafile=domain.tar.bz2
SRCDOMAIN=src.domain.com
DSTDOMAIN=dst.domain.com
# ==== Let's begin ====
# Download last backup
echo "$(date|tr -d "\n"): Downloading..."
cd ${WORKDIR}
rm -f ${sqlfile}
rm -f ${datafile}
wget --quiet --user="$ftpuser" --password="$ftppass" ftp://$SHOST/backup/*.bz2
if [[ $? -ne 0 ]] ; then
echo "$(date|tr -d "\n"): Error: Can not download files!"
exit 1
fi
echo "$(date|tr -d "\n"): Clean database..."
# Cleaning database
CLEAN_SQL="SET FOREIGN_KEY_CHECKS=0;"
for table in $(mysql -u${dbuser} -p${dbpass} $db <<< 'SHOW TABLES;'|grep -v Tables_in_shop)
do
CLEAN_SQL="${CLEAN_SQL}DROP TABLE $table;"
done
CLEAN_SQL="${CLEAN_SQL}SET FOREIGN_KEY_CHECKS=1;"
mysql -u${dbuser} -p${dbpass} $db -e "$CLEAN_SQL" >>/dev/null
if [[ $? -ne 0 ]] ; then
echo "$(date|tr -d "\n"): Error: DB cannot be cleared!"
exit 1
fi
echo "$(date|tr -d "\n"): Restoring database..."
# Restoring DB from last backup
bunzip2 < ${WORKDIR}/${sqlfile} | mysql -u${dbuser} -p${dbpass} $db
mysql -u$dbuser -p$dbpass $db -e "UPDATE core_config_data SET value=REPLACE(value, \"${SRCDOMAIN}\", \"${DSTDOMAIN}\") WHERE path=\"web/secure/base_url\" OR path=\"web/unsecure/base_url\";"
echo "$(date|tr -d "\n"): Clean files..."
# Cleaning site root
if [[ ! -d ${WEBROOT} ]]
then
echo "$(date|tr -d "\n"): Warning: Direcrory ${WEBROOT} does not exist!"
mkdir -p ${WEBROOT}
else
rm -rf ${WEBROOT}/*
fi
echo "$(date|tr -d "\n"): Restoring files..."
# Unpack last backup of files
cd ${WEBROOT}
tar -jxf ${WORKDIR}/${datafile}
# Clean cache and sessions
rm -rf ${WEBROOT}/var/cache/mage--*
rm -f ${WEBROOT}/var/session/sess_*
echo "$(date|tr -d "\n"): Completed."
| true |
b31f9897a0776c1d215ad3811e9168647b9c16e0 | Shell | openlibrarysociety/authorprofile | /bash.d/vertical.sh | UTF-8 | 308 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if [[ ! $2 ]] || [[ ! -f $1 ]] ; then
echo "Usage: $0 [ACIS PROFILE PATH] [MAXIMUM DEPTH]"
exit 1
fi
AUTHOR_NAME=${1//\/*\//}
AUTHOR_NAME=${AUTHOR_NAME%*.*.*}
nohup nice -n20 $HOME/ap/perl/bin/vertical/vertical.pl --maxd=$2 $1 >$HOME/var/log/vertical/`date +%s`.$AUTHOR_NAME.log 2>&1 & | true |
81e8811814ad1348315fcc8ab194948677c093d5 | Shell | lil5/smallergallery | /bin/smallergallery | UTF-8 | 1,463 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# License MIT (c) Lucian I. Last
# https://github.com/lil5/smallergallery/blob/master/LICENSE
function run_version() {
function requirements() {
command -v $1 >/dev/null 2>&1 || { echo >&2 "Requires $1 but it's not installed. Aborting."; exit 1; }
}
echo 'smallergallery 1.0.0'
requirements convert
requirements find
requirements pushd
requirements realpath
requirements mkdir
requirements read
requirements dirname
requirements rsync
}
while [[ $# -gt 0 ]]; do
case "$1" in
-i|--in)
IN_PATH="$2"
shift
shift
;;
-o|--out)
OUT_PATH="$2"
shift
shift
;;
-w|--width)
WIDTH="$2"
shift
shift
;;
-v|--version)
run_version
exit 0
;;
esac
done
if [[ -z "$WIDTH" ]]; then
WIDTH=80
fi
if [[ -z "$IN_PATH" ]]; then
IN_PATH=original/
elif [[ "${IN_PATH: -1}" != '/' ]]; then
IN_PATH="${IN_PATH}/"
fi
if [[ -z "$OUT_PATH" ]]; then
OUT_PATH="gallery/"
fi
OUT_PATH="$(realpath $OUT_PATH)"
pushd $IN_PATH
IMAGE_EXTS=( 'jpg' 'jpeg' 'png' )
for i in ${IMAGE_EXTS[@]}; do
while IFS= read -rd '' f; do
DIR_PATH=$(dirname "${OUT_PATH}/${f}")
mkdir "$DIR_PATH" &> /dev/null
if ! [[ -f "${OUT_PATH}/${f%.${i}}.${i}" ]]; then
convert ./"$f" -resize ${WIDTH}x\> "${OUT_PATH}/${f%.${i}}.${i}"
fi
done < <(find -L . -type f -iname "*.${i}" -print0)
done
rsync --ignore-existing --delete -ruvhL . $OUT_PATH
popd
| true |
4200cd204c490318e85c79b6684aefa6b4042453 | Shell | RodrigoBalest/php-7-debian | /mcrypt_install.sh | UTF-8 | 524 | 3.03125 | 3 | [] | no_license | #!/bin/bash
apt-get install -y php-pear libmcrypt-dev
export PHP_PEAR_PHP_BIN=/usr/local/php7/bin/php
echo -e "\e[33m------\e[0m"
echo -e "\e[33mATENÇÃO: tecle ENTER para a pergunta 'libmcrypt prefix? [autodetect]'\e[0m"
echo -e "\e[33m------\e[0m"
pecl install mcrypt-1.0.1
echo "extension=mcrypt.so" > /usr/local/php7/etc/conf.d/mcrypt.ini
# Remove os programas usados para instalar o mcrypt no PHP.
x="$(dpkg --list | grep php | awk '/^ii/{ print $2}')"
apt-get --purge -y remove $x
/etc/init.d/php7-fpm restart
| true |
6caed7b708923303ecd8fb6bcba8a45a0eb3d9f6 | Shell | itmm/superfood | /expand.sh | UTF-8 | 504 | 3.3125 | 3 | [] | no_license | #!/bin/bash
src=$1
YEAR=`date +%Y`
TITLE=`grep '<h2>' $src | ./escape.sh | cut -b 5-`
len=`echo "$TITLE" | wc -c`
TITLE=`echo "$TITLE" | cut -b -$((len - 10))`
CONTENT=`grep -v '<h2>' $src | ./escape.sh`
NAVIGATION=`./escape.sh <navigation.html`
while read line; do
line=`echo $line | sed -e "s/\\${TITLE}/$TITLE/"`
line=`echo $line | sed -e "s/\\${CONTENT}/$CONTENT/"`
line=`echo $line | sed -e "s/\\${NAVIGATION}/$NAVIGATION/"`
line=`echo $line | sed -e "s/\\${YEAR}/$YEAR/"`
echo -e $line
done
| true |
c11f04689ea31528932d7619255965ebe91b13b4 | Shell | FCSadoyama/colchonet | /scripts/validate_service | UTF-8 | 314 | 3.0625 | 3 | [] | no_license | #!/bin/bash
function validate {
if $(curl -Is $1 | head -n 1 | grep -q "200 OK"); then
return 0
else
exit 1
fi
}
function add_host {
if (cat /etc/hosts | grep "$1"); then
return 0
else
echo "$1" >> /etc/hosts
fi
}
add_host "127.0.0.1 validate.colchonet"
validate validate.colchonet
| true |
38cedec4d9042ea6d431f13e10dcaab352548dd5 | Shell | brugger/easih-toolbox | /sbin/correct_dir_permissions | UTF-8 | 593 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#
# Checks for group writeable permissions in the /data directories. If wrong permissions they are fixed
#
#
# Kim Brugger (15 Jun 2011), contact: kim.brugger@easih.ac.uk
for basedir in `ls /data/`
do
if [[ "$basedir" =~ [A-Z]$ ]]; then
for range in `ls /data/$basedir/`
do
chmod 775 /data/$basedir/
if [[ "$range" =~ [0-9]_[0-9] ]]; then
# echo "Looking in: /data/$basedir/$range/";
chmod 775 /data/$basedir/$range
for project in `ls /data/$basedir/$range/`
do
chmod -R 775 /data/$basedir/$range/$project
done
fi
done
fi
done
| true |
f38d704ee9e0c764f444255161ac364ceefe879b | Shell | sblatnick/resources | /bash/spy.sh | UTF-8 | 1,642 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#boot them from your computer (look up the pid in whowatch or ps):
kill -9 <pid>
#spy on other users:
#show currently logged on people and what they're doing
whowatch
#show currently logged on people:
who
#run a program in a terminal fullscreen, repeating the output:
watch
#watch with color:
watch -c
#current process for each user:
w
#watch the current process for each user:
watch w
#watch tty's (if set up beforehand) See: (watch login, not over ssh) http://www.linuxhelp.net/guides/ttysnoop/
ttysnoop pty
#watch another command line over ssh even (hard to read, but get pid from whowatch -> user -> bash) (see: http://www.terminalinflection.com/strace-stdin-stdout/):
#1,2 is STDOUT,STDERR, and this gets STDIN too
sudo strace -ff -e trace=write -e write=1,2 -p <pid>
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: ${0##*/} [pid]"
echo " Trails a bash process for keystrokes."
exit
fi
regex=".*\"(.*)\".*)"
sudo strace -ff -e trace=write -e write=1,2 -p $1 2>&1 | \
while read pipe
do
[[ $pipe =~ $regex ]]
key="${BASH_REMATCH[1]}"
#echo "Pipe: \"$pipe\""
echo -e "Key: \"$key\""
done
#see when someone logged in to the server last:
last username
#messaging:
#send message to another user:
write
#send a message to all users:
wall -n "Message"
#echo to their command line ("who" can tell you which pts to use):
echo "hello world" > /dev/pts/4
ls -alrt /dev/pts #find the right terminal
cat > /dev/pts/4
Connection to 127.0.0.1 closed by remote host.
Connection to 127.0.0.1 closed.
| true |
9b636cb076a445141ba4fcfcd43cc36e35bd6de2 | Shell | gianlucoDev/braccino-leap-motion | /arduino/upload.sh | UTF-8 | 951 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# script dependencies: arduino-cli, jq
# board information
port="/dev/ttyACM1"
fqbn="arduino:avr:mega"
platform="arduino:avr"
# sketch
sketch=$(pwd)
# from https://stackoverflow.com/a/32708121
prompt_confirm() {
while true; do
read -r -n 1 -p "${1:-Continue?} [y/n]: " REPLY
case $REPLY in
[yY]) echo ; return 0 ;;
[nN]) echo ; return 1 ;;
*) printf " \033[31m %s \n\033[0m" "invalid input"
esac
done
}
# ask confirmation
echo "selected board: $fqbn at $port"
prompt_confirm "do you want to upload?" || exit 0
printf "\n%s\n\n" "installing libraries"
arduino-cli core install $platform
arduino-cli lib install Servo@1.1.7
arduino-cli lib install PacketSerial@1.4.0
# you also need to intall https://github.com/cgxeiji/CGx-InverseK.git
printf "\n%s\n\n" "compiling and uploading"
arduino-cli compile --fqbn $fqbn $sketch
arduino-cli upload -p $port --fqbn $fqbn $sketch
printf "\n%s\n\n" "done."
| true |
25839ce85705d42e20200381da3741aaf0d9427a | Shell | wingsofovnia/seba-webappeng-team24 | /dev.sh | UTF-8 | 754 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
while getopts ":a:pdkh" opt; do
case $opt in
a) auth_enabled="$OPTARG"; export auth=$auth_enabled
;;
p) docker-compose --file deploy/prod/docker-compose.yml up --build --remove-orphans
;;
d) docker-compose --file deploy/dev/docker-compose.yml up --build --remove-orphans
;;
k) docker-compose down
;;
h) echo " Usage: `basename $0`
-a true | false -> enables jwt authentication
-d runs docker-compose up with dev config
-p runs docker-compose up with prod config
-k kills docker-compose services
-h shows this "
exit 1
;;
\?) echo "Invalid option -$OPTARG" >&2
exit 1
;;
:) echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
| true |
92a45749995124190d0b98c7109746f93c5c9f34 | Shell | ArjunBEG/DOWNLOADS-ARCHIVE | /gists/c160d6376d7b543b16fe02612fb4f763/get-ssh-fingerprints.sh | UTF-8 | 1,055 | 3.953125 | 4 | [] | no_license | #!/bin/bash
#
# Reference: http://superuser.com/questions/929566/sha256-ssh-fingerprint-given-by-the-client-but-only-md5-fingerprint-known-for-se
#
# standard sshd config path
SSHD_CONFIG=/etc/ssh/sshd_config
# helper functions
function tablize {
awk '{printf("| %-7s | %-7s | %-47s |\n", $1, $2, $3)}'
}
LINE="+---------+---------+-------------------------------------------------+"
# header
echo $LINE
echo "Cipher" "Algo" "Fingerprint" | tablize
echo $LINE
# fingerprints
for host_key in $(awk '/^HostKey/ {sub(/^HostKey\s+/,"");print $0".pub"};' $SSHD_CONFIG); do
cipher=$(echo $host_key | sed -r 's/^.*ssh_host_([^_]+)_key\.pub$/\1/'| tr '[a-z]' '[A-Z]')
if [[ -f "$host_key" ]]; then
md5=$(ssh-keygen -l -f $host_key | awk '{print $2}')
sha256=$(awk '{print $2}' $host_key | base64 -d | sha256sum -b | awk '{print $1}' | xxd -r -p | base64)
echo $cipher MD5 $md5 | tablize
echo $cipher SHA-256 $sha256 | tablize
echo $LINE
fi
done
| true |
ceca3e8921444a77a2195f329b48cbed5543bff3 | Shell | mrupert/MTR | /.show_host | UTF-8 | 498 | 3.0625 | 3 | [] | no_license | #!/bin/sh
# SHOW THE NAME OF THE HOST I'M IN
DATE=`date`
# Save cursor pos
tput sc
# Change scroll region to exclude first line
tput csr 1 $((`tput lines` - 1))
# Move to upper-left corner
tput cup 0 0
# Clear to the end of the line
tput el
# Create a header row
tput setaf 2; tput rev;tput smso;tput bold;printf "%*s" $(tput cols) "$HOSTNAME "
# Restore cursor position
tput rc
# CLEAR THE HEADER WHEN I LOG OUT
trap ~/.logout EXIT
alias clearheader="tput sc;tput cup 0 0;tput el;tput rc"
| true |
63083d600cc14ee3dcfaa424350db14564e41775 | Shell | eth-sri/Spire | /scripts/run_tests.sh | UTF-8 | 779 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
PSI_PATH=""
SYNTHESIZER="./../Spire/bin/x64/Release/Spire.exe"
NUM_PROCESSES=16
NUM_REPEATS=10
Z3_TIMEOUT=3600000
PSI_TIMEOUT=3600000
find . -type f -name "*program.psi" | sed 's/.\///' | sed 's/_program.psi//g' | while read PREFIX; do
for run_id in $(seq 1 $NUM_REPEATS); do
OUTPUT="${PREFIX}_run${run_id}"
echo --psi-path="$PSI_PATH" --prior="${PREFIX}_prior.psi" --program="${PREFIX}_program.psi" --policy="${PREFIX}_policy.psi" --log="${OUTPUT}.log" --csv="${OUTPUT}.csv" --tmp-prefix="${OUTPUT}_" --z3timeout=$Z3_TIMEOUT --psitimeout=$PSI_TIMEOUT --opt-goal=singletons --smt-lib-log="{$OUTPUT}.smtlib" --iteration-log="${OUTPUT}_iteration_log.csv" ">" $OUTPUT.stdout "2>&1"
done
done | xargs -n 9 -I{} -P $NUM_PROCESSES sh -c "mono \"$SYNTHESIZER\" {}"
| true |
0673885b43039d963064344edbdfaefc47801003 | Shell | IBPA/FODMAPsAndGutMicrobiome | /preprocessing/shared/fetch.sh | UTF-8 | 1,190 | 2.8125 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
pushd $DIR
echo "Log| Fetching ./data/shared/"
wget http://www2.decipher.codes/Classification/TrainingSets/SILVA_SSU_r132_March2018.RData
wget https://gg-sg-web.s3-us-west-2.amazonaws.com/downloads/greengenes_database/gg_13_5/gg_13_5.fasta.gz
wget https://gg-sg-web.s3-us-west-2.amazonaws.com/downloads/greengenes_database/gg_13_5/gg_13_5_otus.tar.gz
tar xzf gg_13_5_otus.tar.gz
rm -rf ./gg_13_5_otus/rep_set_aligned/ # big directory which is not needed now.
## Activate Qiime2 envirnment
. ${DIR}/../../settings.txt
. $CONDA_ENV
conda activate $QIIME_ENV
qiime tools import \
--type 'FeatureData[Sequence]' \
--input-path $DIR/gg_13_5_otus/rep_set/97_otus.fasta \
--output-path $DIR/gg_97_otus.qza
qiime tools import \
--type 'FeatureData[Sequence]' \
--input-path $DIR/gg_13_5_otus/rep_set/94_otus.fasta \
--output-path $DIR/gg_94_otus.qza
qiime tools import \
--type 'FeatureData[Sequence]' \
--input-path $DIR/gg_13_5_otus/rep_set/91_otus.fasta \
--output-path $DIR/gg_91_otus.qza
# After picrust installaion:
download_picrust_files.py
popd
conda deactivate
| true |
a45f6f0e099adcf0a6e17bee0e625d05f642052d | Shell | Roditolus/hello-world | /bashScripts/bashTutorial.sh | UTF-8 | 2,670 | 4.125 | 4 | [] | no_license | #!/bin/bash
massage="hello world"
echo "$massage\n"
# variablen abtrennen
echo "${massage}!\n"
# call other commands
ls .. | grep Bash
# ~~~~~~~~~~~~~~~~~~~~Date~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
date # deutsch
LC_ALL=C date # englisch
# ~~~~~~~~~~~~~~~~~~~~Ausgaben~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
befehl=ls # unsicher
pfad="/home/rodito/*"
# liste=$($befehl $pfad);
# echo " Gefundene Elemente:"
# echo "$liste"
# besser:
for file in $pfad; do
echo "die Datei: $file"
fname=$(basename "$file")
echo "hat den Namen: $fname"
fdir=$(dirname "$file")
echo "im Verzeichnis: $fdir"
done
echo -e "\n~~~~~~~~~~~~~~~~~~ Abschneiden von Mustern ~~~~~~~~~\n\n"
pfad="/var/www/index.html"
echo "Ursprung: $pfad"
echo "${pfad%/*}"
echo "${pfad%%/*}"
echo "${pfad#*/}"
echo "${pfad##*/}"
echo -e "\n~~~~~~~~~~~~~~~~~~ Rechnen ~~~~~~~~~\n\n"
add1=100
add2=3
echo "$add1 / $add2 = $(($add1/$add2))"
add3=4
echo "$add1 + $add2 * $add3 = $(($add1 + $add2 * $add3))"
echo -e "\n~~~~~~~~~~~~~~~~~~Arrays~~~~~~~~~\n\n"
myArray=(Das sind vier Elemente)
myArray+=(und dahinter noch sechs weitere Elemente)
# print whole Array
echo "${myArray[*]}"
echo "number of elements in Arrray: ${#myArray[*]}"
echo -e "\n~~~~~~~~~~~~~~~~~~ Interaktion mit User ~~~~~~~~~\n\n"
# Begrüßung
read -p "Enter your name: " name
echo "Hello $name"
read -p "start Programm A (a) or Programm B (b)?" kommando
if [ $kommando == 'a' ];
then
echo "starte Programm A ... done";
elif [ $kommando == 'b' ]
then
echo "starte Programm B ... done";
else
echo "Abbruch";
fi
echo -e "\n~~~~~~~~~~~~~~~~~~ case and loops ~~~~~~~~~\n\n"
case "$name" in
Sascha) echo "Hello Admin"
echo "You have all rights"
;;
Klaus) echo "Hello second Admin"
echo "You have some rights"
;;
*) echo "I dont no you $name"
echo "you have no rights"
esac
# loops
myArray=("Das" "ist" "das" "Haus vom" "Nikolaus")
i=0;
for var in ${myArray[*]}
do
echo "array[$i] = $var"
((i++))
done
echo
# bildbearbeitung
cdir=$(pwd)
inputdir="${cdir}/buntbild"
outputdir="${cdir}/graubild"
# echo "debug: input = $inputdir | out: $outputdir"
for pic in "$inputdir"/*png ;
do
picname=$(basename "$pic")
echo "Bearbeite Bild: $picname"
convert "$pic" -colorspace Gray "${outputdir}/${picname}"
done
echo -e "\n~~~~~~~~~~~~~~~~~~ Funktionen ~~~~~~~~~\n\n"
#example function
howManyFiles() {
local path="$1"
local anzahl=0
for file in "$path"/* ; do
(( anzahl++ ))
done
echo "Es befinden sich $anzahl Dateien im Verzeichnis $path"
}
# Aufruf der Funktion
path="$(pwd)/buntbild"
howManyFiles $(pwd)
howManyFiles $path
| true |
866f61a9d4466385709338159980546452528b38 | Shell | ibragonza/igmonplugins | /src/check_mysql_galera | UTF-8 | 1,334 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
help_message="\n
This tool connects to mysql with user -u and password -p\n
gets global status of wsrep cluster and checks, if cluster in a valid state\n
USAGE: \n\t$0 -u username -p password\n
AUTHOR: \n\thttps://github.com/leoleovich"
while getopts ":u:p:" OPTION; do
case "$OPTION" in
u) user="$OPTARG" ;;
p) pass="$OPTARG" ;;
*) echo -e $help_message && exit 1 ;;
esac
done
if [ -z $user ]; then
mysqlAuthSection=''
elif [ -z $pass ]; then
mysqlAuthSection="-u ${user}"
else
mysqlAuthSection="-u ${user} -p${pass}"
fi
wsrep_local_state=$(mysql $mysqlAuthSection -Bse "SHOW GLOBAL STATUS LIKE 'wsrep_local_state'" 2>/dev/null | awk '{print $2}')
wsrep_cluster_status=$(mysql $mysqlAuthSection -Bse "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_status'" 2>/dev/null | awk '{print $2}')
wsrep_incoming_addresses=$(mysql $mysqlAuthSection -Bse "SHOW GLOBAL STATUS LIKE 'wsrep_incoming_addresses'" 2>/dev/null | awk '{print $2}')
wsrep_local_state_comment=$(mysql $mysqlAuthSection -Bse "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment'" 2>/dev/null | awk '{print $2}')
if [ "$wsrep_local_state" = "4" ] && [ "$wsrep_cluster_status" = "Primary" ]; then
echo "Node is a part of cluster $wsrep_incoming_addresses"
else
echo "Something is wrong with this node. Status is $wsrep_local_state_comment"
exit 2
fi
| true |
445602e106a5f7409f12596170348ae386b6e0e1 | Shell | Deeksha-Purushothama/gittest | /movingtoDirectories.sh | UTF-8 | 1,326 | 3.484375 | 3 | [] | no_license | #!/bin/bash -x
#title
echo -e "\n-----SHELL SCRIPT STORE A SET OF FILES IN A DIRECTORY AND MOVING THOSE FILES TO-----"
echo "THEIR RESPECTIVE FOLDERS BY CREATING THE DIRECTORIES WITH THE SAME NAME AS THE FILES"
echo "-----------------------------------------------------------------------------------"
echo -e "\nCreating a directory called 'dir'......"
mkdir dir
echo -e "\nListing the contents of present working directory"
ls
echo -e "\nChanging directory to 'dir'............"
cd dir
echo -e "\nCreating files in 'dir'....."
touch abc.txt
touch def.txt
touch ghi.txt
touch jkl.txt
touch mno.txt
echo -e "\nListing out files created in 'dir' directory"
ls
echo -e "\nChanging directory to parent dircetory......"
cd ..
echo -e "\nCreating Directories same as file name......"
mkdir abc
mkdir def
mkdir ghi
mkdir jkl
mkdir mno
echo -e "\nListing out Contents of Parent Directories......"
ls
echo -e "\nMoving files from 'dir' directory to their respective directories......"
mv dir/abc.txt abc/abc.txt
mv dir/def.txt def/def.txt
mv dir/ghi.txt ghi/ghi.txt
mv dir/jkl.txt jkl/jkl.txt
mv dir/mno.txt mno/mno.txt
echo -e "\nListing out abc directory"
ls abc
echo "Listing out def directory"
ls def
echo "Listing out ghi directory"
ls ghi
echo "Listing out jkl directory"
ls jkl
echo "Listing out mno directory"
ls mno
| true |
21fdf3f14cf9ee6ba98befeb3b5ba3f364c972d5 | Shell | floquation/optoFluids | /Codes/PreProcessing/OptoFluids/moveParticles.sh | UTF-8 | 1,645 | 3.53125 | 4 | [] | no_license | #! /usr/bin/env bash
# Quick script that calls moveParticles.py
# Usage example:
# mainDir=".."
# source "$mainDir/input_names" || exit 1 # Source name conventions
#
# fluidsInFN="./input_flow"
# optoFluidsInFN="$mainDir/input_time"
#
# ICfile="$(./makeInitParticles.sh "$fluidsInFN")" # Generate IC
# ./moveParticles.sh "$fluidsInFN" "$optoFluidsInFN" "$ICfile" "$mainDir/$particlesDN" "$@" # Evolve particles
# rm "$ICfile" # No need to keep the IC file
#
# -- Kevin van As, 04/02/2019
# Preamble
scriptDir="$(dirname "$0")"
inputFluidFN="$1" # File that holds input parameters for the flow; to be sourced.
inputOptofluidFN="$2" # File that holds input parameters for what the optics code needs: camera integration time and related parameters; to be sourced.
partPosFN="$3" # Initial particle positions
outDN="$4" # Output directory name; sanity check is done by moveParticles.py
shift 4;
if [ -f "$inputFluidFN" ]; then
source "$inputFluidFN"
else
>&2 echo "Input file \"$inputFluidFN\" does not exist."
exit
fi
if [ -f "$inputOptofluidFN" ]; then
source "$inputOptofluidFN"
else
>&2 echo "Input file \"$inputOptofluidFN\" does not exist."
exit
fi
if [ ! -f "$partPosFN" ]; then
>&2 echo "ParticlePositions file \"$partPosFN\" does not exist."
exit
fi
if [ "$outDN" == "" ]; then
outDN="particles"
fi
moveParticles.py -i "$partPosFN" -o "$outDN" \
-u "$Umean" --flow="$profile" \
--mod="$modulation" \
--modargs "$modargs" \
-L "$cyl_L" -R "$cyl_R" \
-T "$sim_T" -n "$sim_n_T" \
--t_int="$cam_t_int" --n_int="$cam_n_int" \
--dt="$dt" \
--origin="$geom_origin" \
"$@"
#-T $(mathPy "480e-6*20") -n $(mathPy "24*20")
| true |
573ee98d843ab42491d276198e1cb42cc85a36a3 | Shell | Jokeren/GPA | /bin/install.sh | UTF-8 | 1,213 | 3.6875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
SOURCE_DIR=$(pwd)
DIR=""
SPACK_DIR=""
if [ $# -eq 0 ]; then
DIR=$(pwd)/gpa
else
if [ $# -eq 1 ]; then
DIR=$1
else
if [ $# -eq 2 ]; then
DIR=$1
SPACK_DIR=$2
fi
fi
fi
if [ -z "$DIR" ]; then
echo $DIR
echo $SPACK_DIR
echo "Wrong prefix"
exit
fi
mkdir $DIR
cd $DIR
# Install spack
if [ -z $SPACK_DIR ]; then
git clone https://github.com/spack/spack.git
export SPACK_ROOT=$(pwd)/spack
export PATH=${SPACK_ROOT}/bin:${PATH}
source ${SPACK_ROOT}/share/spack/setup-env.sh
# Install hpctoolkit dependencies
spack install --only dependencies hpctoolkit ^dyninst@master ^binutils@2.34+libiberty~nls
spack install libmonitor@master+dlopen+hpctoolkit
spack install mbedtls gotcha
# Find spack dir
B=$(spack find --path boost | tail -n 1 | cut -d ' ' -f 3)
SPACK_DIR=${B%/*}
fi
CUDA_PATH=/usr/local/cuda/
CUPTI_PATH=$CUDA_PATH/extras/CUPTI/
# install hpctoolkit
cd $SOURCE_DIR
cd hpctoolkit
mkdir build
cd build
../configure --prefix=$DIR/hpctoolkit --with-cuda=$CUDA_PATH \
--with-cupti=$CUPTI_PATH --with-spack=$SPACK_DIR
make install -j8
echo "Install in "$DIR"/hpctoolkit"
cd $SOURCE_DIR
cp -rf ./bin $DIR
export PATH=$DIR/bin:${PATH}
| true |
f34693216515a81cd191c21860f44ffbd49b7878 | Shell | chiKaRau/Linux | /Code/sleep.sh | UTF-8 | 93 | 2.515625 | 3 | [] | no_license | #!/bin/sh
echo "Enter a sentece: \c"
read str
for word in $str
do
echo $word
sleep 2
done
| true |
a6be2b4c3f71bd45703a80aeef8a3be3e682865b | Shell | tdashroy/dev | /linux/debian/setup/zsh.sh | UTF-8 | 12,394 | 3.828125 | 4 | [] | no_license | #!/bin/bash
git_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../../" >/dev/null 2>&1 && pwd )"
source "$git_dir/linux/debian/setup/common.sh"
source "$git_dir/linux/debian/setup/args.sh"
# install packages needed for zsh
zsh_install() {
local packages="zsh"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite=false
local input="$g_input"
local input_required=false
local install_string='install zsh'
local overwrite_string=
local uninstall_string='uninstall zsh'
exists_cmd() { which zsh &> /dev/null; }
install_cmd() { echo "$packages" | xargs sudo apt-get -y install ; }
uninstall_cmd() { echo "$packages" | xargs sudo apt-get --auto-remove -y purge; }
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# oh-my-zsh install
omz_install() {
local omz_dir="$HOME/.oh-my-zsh"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite=false
local input="$g_input"
local input_required=false
local install_string='install oh-my-zsh'
local overwrite_string=
local uninstall_string='uninstall oh-my-zsh'
exists_cmd() { [[ -d "$omz_dir" ]]; }
install_cmd() { sh -c "$(wget -O- https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh) --unattended"; }
uninstall_cmd() {
if [[ ! -x "$omz_dir/tools/uninstall.sh" ]] && ! chmod +x "$omz_dir/tools/uninstall.sh" ; then
return 1
fi
"$omz_dir/tools/uninstall.sh"
}
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# install powerlevel10k
p10k_install() {
local omz_dir="$HOME/.oh-my-zsh"
local p10k_dir="$omz_dir/custom/themes/powerlevel10k"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite=false
local input="$g_input"
local input_required=false
local install_string='install powerlevel10k'
local overwrite_string=
local uninstall_string='uninstall powerlevel10k'
exists_cmd() { [[ -d "$p10k_dir" ]]; }
install_cmd() { git clone https://github.com/romkatv/powerlevel10k.git "$p10k_dir"; }
uninstall_cmd() { rm -rf "$p10k_dir"; }
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# set powerlevel10k as the ZSH_THEME
p10k_theme() {
local omz_theme="$(sed -n $'s/^ZSH_THEME=[\'"]\\(.\\+\\)[\'"]$/\\1/p' "$HOME/.zshrc")"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite="$g_overwrite"
local input="$g_input"
local input_required=false
local install_string='set zsh theme to powerlevel10k/powerlevel10k'
local overwrite_string="overwrite zsh theme from $omz_theme to powerlevel10k/powerlevel10k"
local uninstall_string='set zsh theme back to the default theme'
# todo: could have another command that is passed in that's something like "skip_condition".
# it would be just like it sounds, a condition that would say whether or not to skip
# the setup task. in this particular case, the exists command would be checking
# the existence of 'ZSH_THEME=', while the skip condition would be to skip if
# ZSH_THEME="powerlevel10k/powerlevel10k"
exists_cmd() { [[ "$omz_theme" == 'powerlevel10k/powerlevel10k' ]]; }
install_cmd() {
if [[ -n "$omz_theme" ]] ; then
sed -i 's/\(^ZSH_THEME=\).\+$/\1"powerlevel10k\/powerlevel10k"/' "$HOME/.zshrc"
else
echo 'ZSH_THEME="powerlevel10k/powerlevel10k"' >> "$HOME/.zshrc"
fi
}
uninstall_cmd() { sed -i 's/\(^ZSH_THEME=\).\+$/\1"robbyrussell"/' "$HOME/.zshrc"; }
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# install custom powerlevel10k profile
p10k_profile() {
local git_p10k_profile="$git_dir/linux/debian/p10k.zsh"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite="$g_overwrite"
local input="$g_input"
local input_required=false
local install_string="install git repo's powerlevel10k profile"
local overwrite_string='backup and overwrite current powerlevel10k profile'
local uninstall_string="uninstall git repo's powerlevel10k profile"
exists_cmd() { [[ -f "$HOME/.p10k.zsh" ]] ; }
install_cmd() {
p10k_profile_lines=('#!'"${zsh_shell}")
if exists_cmd ; then
p10k_backup="$HOME/.p10k_$(date +%Y%m%d%H%M%S).zsh"
cp "$HOME/.p10k.zsh" "$p10k_backup"
# copy failed, don't overwrite
if [[ "$?" != 0 ]] ; then
false; return
fi
p10k_profile_lines+=("# previous .p10k.zsh profile backed up to ${p10k_backup}")
fi
p10k_profile_lines+=("source '$git_p10k_profile'")
printf "%s\n" "${p10k_profile_lines[@]}" > "$HOME/.p10k.zsh"
}
uninstall_cmd() {
# todo: restore old p10k profile
rm -f
}
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# set up powerlevel10k config
p10k_config() {
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite=false
local input="$g_input"
local input_required=false
local install_string='use installed powerlevel10k config'
local overwrite_string=
local uninstall_string='stop using installed powerlevel10k config'
exists_cmd() { grep '\[\[ ! -f \(\$HOME\|~\)\/\.p10k\.zsh \]\] || source \(\$HOME\|~\)\/\.p10k\.zsh' "$HOME/.zshrc" &> /dev/null ; }
install_cmd() {
echo '' >> "$HOME/.zshrc"
echo '# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.' >> "$HOME/.zshrc"
echo '[[ ! -f $HOME/.p10k.zsh ]] || source $HOME/.p10k.zsh' >> "$HOME/.zshrc"
}
uninstall_cmd() {
# fine if this first command fails
sed -i '/# To customize prompt, run `p10k configure` or edit ~\/.p10k.zsh./d' "$HOME/.zshrc"
sed -i '/\[\[ ! -f \(\$HOME\|~\)\/\.p10k\.zsh \]\] || source \(\$HOME\|~\)\/\.p10k\.zsh/d' "$HOME/.zshrc"
}
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# add custom zsh profile
zsh_profile() {
local git_zsh_profile="$git_dir/linux/debian/profile.zsh"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite=false
local input="$g_input"
local input_required=false
local install_string="use git repo's zsh profile"
local overwrite_string=
local uninstall_string="stop using git repo's zsh profile"
exists_cmd() { grep "source \"$git_zsh_profile\"" "$HOME/.zshrc" &> /dev/null; }
install_cmd() { echo "source \"$git_zsh_profile\"" >> "$HOME/.zshrc"; }
uninstall_cmd() { sed -i "\=source \"$git_zsh_profile\"=d" "$HOME/.zshrc"; }
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# set default shell to zsh
zsh_shell() {
local zsh_shell="$(which zsh)"
# todo: restore previous shell instead of defaulting to bash for uninstall.
local bash_shell="$(which bash)"
local setup_type="$g_setup_type"
local ask="$g_ask"
local overwrite="$g_overwrite"
local input="$g_input"
local input_required=false
local install_string="change shell from $SHELL to $zsh_shell"
local overwrite_string="change shell from $SHELL to $zsh_shell"
local uninstall_string="change shell from $SHELL to $bash_shell"
exists_cmd() { [[ "$SHELL" == "$zsh_shell" ]]; }
install_cmd() { chsh -s "$zsh_shell"; }
uninstall_cmd() { chsh -s "$bash_shell"; }
run_setup_task "$setup_type" "$ask" "$overwrite" "$input" "$input_required" "$install_string" "$overwrite_string" "$uninstall_string" 'exists_cmd' 'install_cmd' 'uninstall_cmd'
local ret=$?
unset -f exists_cmd
unset -f install_cmd
unset -f uninstall_cmd
return $ret
}
# sets
# restart_required - true if at least one install requires a restart to take effect.
# false otherwise
# returns
# 0 - all success
# 1 - at least one failure
install() {
local last=
zsh_install
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
fi
omz_install
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
fi
p10k_install
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
fi
p10k_theme
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
fi
p10k_profile
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
p10k_config
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
zsh_profile
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
# intentionally last, only want to use zsh if everything was set up properly
zsh_shell
last=$?
if [[ $last == 1 ]] ; then
echo "Skipping the rest of the zsh setup."
return 1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
return 0
}
# sets
# restart_required - true if at least one uninstall requires a restart to take effect.
# false otherwise
# returns
# 0 - all success
# 1 - at least one failure
uninstall() {
local last=
local ret=0
zsh_shell
last=$?
if [[ $last == 1 ]] ; then
ret=1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
zsh_profile
last=$?
if [[ $last == 1 ]] ; then
ret=1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
p10k_config
last=$?
if [[ $last == 1 ]] ; then
ret=1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
p10k_profile
last=$?
if [[ $last == 1 ]] ; then
ret=1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
p10k_theme
last=$?
if [[ $last == 1 ]] ; then
ret=1
elif [[ $last == 0 ]] ; then
restart_required=true
fi
p10k_install
last=$?
if [[ $last == 1 ]] ; then
ret=1
fi
omz_install
last=$?
if [[ $last == 1 ]] ; then
ret=1
fi
zsh_install
last=$?
if [[ $last == 1 ]] ; then
ret=1
fi
}
restart_required=false
eval "$g_setup_type"
if [[ "$restart_required" == true ]] ; then
echo "***** NOTE: Restart shell when the script is done running *****"
fi | true |
3cb77751b1e48b70d037c9ca6d0f12a2a8254d07 | Shell | filecoin-project/filecoin-ffi | /rust/scripts/publish-release.sh | UTF-8 | 2,356 | 3.953125 | 4 | [
"Apache-2.0",
"MIT"
] | permissive | #!/usr/bin/env bash
set -Exeuo pipefail
main() {
if [[ -z "$1" ]]
then
(>&2 echo '[publish-release/main] Error: script requires a release (gzipped) tarball path, e.g. "/tmp/filecoin-ffi-Darwin-standard.tar.tz"')
exit 1
fi
if [[ -z "$2" ]]
then
(>&2 echo '[publish-release/main] Error: script requires a release name, e.g. "filecoin-ffi-Darwin-standard" or "filecoin-ffi-Linux-standard"')
exit 1
fi
local __release_file=$1
local __release_name=$2
local __release_tag="${CIRCLE_SHA1:0:16}"
# make sure we have a token set, api requests won't work otherwise
if [ -z $GITHUB_TOKEN ]; then
(>&2 echo "[publish-release/main] \$GITHUB_TOKEN not set, publish failed")
exit 1
fi
# see if the release already exists by tag
local __release_response=`
curl \
--header "Authorization: token $GITHUB_TOKEN" \
"https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases/tags/$__release_tag"
`
local __release_id=`echo $__release_response | jq '.id'`
if [ "$__release_id" = "null" ]; then
(>&2 echo '[publish-release/main] creating release')
RELEASE_DATA="{
\"tag_name\": \"$__release_tag\",
\"target_commitish\": \"$CIRCLE_SHA1\",
\"name\": \"$__release_tag\",
\"body\": \"\"
}"
# create it if it doesn't exist yet
#
__release_response=`
curl \
--request POST \
--header "Authorization: token $GITHUB_TOKEN" \
--header "Content-Type: application/json" \
--data "$RELEASE_DATA" \
"https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases"
`
else
(>&2 echo '[publish-release/main] release already exists')
fi
__release_upload_url=`echo $__release_response | jq -r '.upload_url' | cut -d'{' -f1`
curl \
--request POST \
--header "Authorization: token $GITHUB_TOKEN" \
--header "Content-Type: application/octet-stream" \
--data-binary "@$__release_file" \
"$__release_upload_url?name=$(basename $__release_file)"
(>&2 echo '[publish-release/main] release file uploaded')
}
main "$@"; exit
| true |
e3c7ae3b3737d96e7649beb92589e1b3d451eac8 | Shell | nc-lot/dotfiles | /bin/run-on-branch.sh | UTF-8 | 1,769 | 4.4375 | 4 | [] | no_license | #!/usr/bin/env bash
# Run a command against all the files changed in a branch, optionally matching a
# pattern.
#
# Synopsis:
# run-on-branch [--base <branch>] [--path <path>] <command>
# [<args-for-comand> ...] [-- <more-args> ...]
#
# Options:
# <comand>
# The command to run. Should accept any number of files as arguments.
# --base <branch>
# Compare the current working tree agains the given branch or commit.
# Defaults to master.
# --path <path-or-glob>
# Pass the given path or pattern to git diff, to restrict the compared
# files. May be given multiple times.
# --
# All further options are passed directly to <command>.
BASE=master
PATHS=()
COMMAND=
COMMAND_ARGS=()
while [[ $# -gt 0 ]]; do
case "$1" in
--base)
BASE="$2"
shift 2
;;
--path)
PATHS+=("$2")
shift 2
;;
--)
shift
break
;;
*)
if [[ -z $COMMAND ]]; then
COMMAND="$1"
else
COMMAND_ARGS+=("$1")
fi
shift
;;
esac
done
# After --, send remaining args to command
if [[ $# -gt 0 ]]; then
COMMAND_ARGS+=("$@")
fi
# Read file names in $files array. We need the subshell redirection to read from
# stdin but $files in the current scope. Previously this used xargs, but it had
# issues with foregrounding and stdin for interactive commands.
mapfile -t files < <(
git diff --name-status "$BASE" "${PATHS[@]}" | awk '/^(A|M)/ { print $2 }'
)
if [[ ${#files} == 0 ]]; then
echo "Warning: no files on the branch."
fi
# Put it all together
exec "$COMMAND" "${COMMAND_ARGS[@]}" "${files[@]}"
| true |
583f8b5fbfa77800dad14a2664e92bda20895c9f | Shell | kita12/project | /guessinggame.sh | UTF-8 | 345 | 3.65625 | 4 | [] | no_license | function guess {
n=$(ls -l | grep "^-" | wc -l)
while true
do
echo "Enter your guess:"
read nuser
if [[ $nuser -gt $n ]]
then
echo "Guess too big"
elif [[ $nuser -lt $n ]]
then
echo "Guess too small"
else
echo "Congratulations"
break
fi
done
}
echo "How many files are there in the current directory?"
guess
| true |
e07eb1846837e722f95bdcbdb75d4b56b745c2d7 | Shell | rafadevsantana/Trybe-Exercises | /Exercises_Shell_Script/exercise1.3.sh | UTF-8 | 102 | 2.84375 | 3 | [] | no_license | #!/bin/bash
DIA=$(date +%F)
for FILE in `ls *.png`
do
mv $FILE ${DIA}-${FILE}
done
| true |
dd14a7a18fff903376104d218404b3a7ebb91ada | Shell | ancaudaniel/first-try-at-bash | /ceva.sh | UTF-8 | 873 | 3.59375 | 4 | [] | no_license | #!/bin/bash
function quit {
exit 0
}
function details {
pwd
echo "$USER"
echo "$HOME"
}
function identitate {
who
}
function shell {
cat /etc/shells
}
function home {
cat /etc/passwd | awk -F : '{print "LoginUser: " $1 "; DefinedUser: " $5 "; HomePath: " $6 }'-P | head -5
}
while true
do
echo "Salutari !
1.Apasati 1 pentru a iesi din script!
2.Apasati 2 pentru a afla detali !
3.Apasati 3 pentru a afla cum sunteti logat!
4.Apasati 4 pentru a afla ce shelluri sunt instalate pe sistem!
5.Apasati 5 pentru a afla numele de login numele definit si a directorului home pentru userii logati pe sistem
"
read variabila
if [[ $variabila -eq 1 ]]; then
quit
fi
if [[ $variabila -eq 2 ]]; then
details
fi
if [[ $variabila -eq 3 ]]; then
identitate
fi
if [[ $variabila -eq 4 ]]; then
shell
fi
if [[ $variabila -eq 5 ]]; then
home
fi
done
| true |
06803b62efb03f5d0228a3ce61e7130a2a92a506 | Shell | pbhetwal/CSCI3308 | /Bhetwal_Eldar_HW1/RegexAnswers.sh | UTF-8 | 2,079 | 3.90625 | 4 | [] | no_license | #!/bin/bash
#Pari Bhetwal
#Omer Eldar
#Terminate if RegexAnswers.sh is not followed by a filename (the intended use)
#fi closes the if statement
#If greater than 1 argument or less than 1 argument, display usage
if [[ ($# -lt 1) || ($# -gt 1) ]]
then
echo "Usage: $0 filename"
exit 1
fi
#As the write up says, we must use egrep and pipe to wc -l
#"$" means ends with
echo "1. How many lines end with a number?"
egrep '[0-9]$' $1 | wc -l
#"^" means starts with
echo "2. How many lines start with a vowel?"
egrep '^[aeiouAEIOU]' $1 | wc -l
#[Aa-Zz] means the whole alphabet
#Then, use {9} to limit to nine words
echo "3. How many 9 letter (alphabet only) lines?"
egrep '^[Aa-Zz]{9}$' $1 | wc -l
#This question was from lab 2
#We weant any number (3 digits), any number(3 digits), any number(4 digits)
echo "4. How many phone numbers are in the dataset (format: ‘_ _ _-_ _ _-_ _ _ _’)?"
egrep '^[0-9]{3}-[0-9]{3}-[0-9]{4}' $1 | wc -l
#This question was from lab 2 but starting with only 303
#Similar to question 4. except 303 is static, not looking for range
echo "5. How many city of Boulder phone numbers (starting with 303)?"
egrep '^303-[0-9]{3}-[0-9]{4}' $1 | wc -l
#Start with any number, .+ means anything afterwards, then has to end with vowel
echo "6. How many lines begin with a number and end with a vowel?"
egrep '^[0-9].+[aeiouAEIOU]$' $1 | wc -l
#STarts with anything, ends with UC Denver address
echo "7. How many email addresses are from UC Denver? (Eg: end with UCDenver.edu)?"
egrep '.+@UCDenver.edu$' $1 | wc -l
#^ matches position just before the first character of the string
#First, we find lines that start in the (n-z) range that are upper or lower case
#Begin with n-z range, followed by any alphabet character, number, or hyphen, then literal dot, then any alphabet, number or hyphen then @ and then any other characters
echo "8. How many email addresses are in ‘first.last’ name format and involve someone whose first name starts with a letter in the second half of the alphabet (n-z)?"
egrep '^[n-zN-Z][a-zA-Z]*\.[a-zA-Z]*@.+$' $1 | wc -l | true |
cc0d009c157c0dcbf92bb4d4415fc95f4c6d737f | Shell | libmingw-w64/libboost-mingw-w64 | /build.debian.sh | UTF-8 | 551 | 2.734375 | 3 | [] | no_license | #!/bin/bash
# Tienipia <tienipia@gmail.com>
# https://github.com/libmingw-w64/mingw-w64-boost.git
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
BOOST_ROOT=$SCRIPT_DIR/boost
cat > $BOOST_ROOT/user-config.jam << EOF
using gcc : mingw64 : x86_64-w64-mingw32-g++
:
<rc>x86_64-w64-mingw32-windres
<archiver>x86_64-w64-mingw32-ar
;
EOF
mkdir -p $SCRIPT_DIR/libboost-mingw-w64
cd $BOOST_ROOT
./bootstrap.sh
./b2 --user-config=./user-config.jam --prefix=$SCRIPT_DIR/libboost-mingw-w64 target-os=windows address-model=64 variant=release install
| true |
98c7598e893d97d7ec7c11c10f0f2d8d3d290e2c | Shell | ditesh/shell-tools | /mysql-dropuser.sh | UTF-8 | 586 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# First param: mysql username
# Second param: mysql password (optional)
# Third param: mysql user to drop
MYSQL_PATH=`which mysql`
if [ "${MYSQL_PATH}" = "" ]; then
echo "Couldn't find mysql binary"
echo
exit 1
fi
if [ $# -gt 6 ] || [ $# -lt 3 ]; then
echo "Usage:"
echo " ./dropmyuser.sh -u mysqlusername (-p mysqlpassword) usertodrop@host"
echo
exit 2
fi
if [ "${3}" = "-p" ]; then
MYSQL_PASSWD="-p ${4}"
USER_TO_DROP=${5}
else
USER_TO_DROP=${3}
fi
echo "DROP USER ${USER_TO_DROP}" | ${MYSQL_PATH} ${1} ${2} ${MYSQL_PASSWD}
| true |
ce508fab70adda96ac3223caaa9f47ff24a096af | Shell | linlinas/ansible_cloudformation | /install.sh | UTF-8 | 1,163 | 3.828125 | 4 | [] | no_license | #!/usr/bin/env bash
# override base_dir by setting $BASE_DIR env variable
base_dir=${BASE_DIR:-/usr/local/ansible_cloudformation}
function create_config() {
cat << EOF > ~/.ansible.cfg
[defaults]
transport = local
gathering = explicit
host_key_checking = False
retry_files_enabled = False
inventory = ${base_dir}/hosts
filter_plugins = ${base_dir}/filter_plugins
lookup_plugins = ${base_dir}/lookup_plugins
library = ${base_dir}/library
EOF
}
if [[ $1 == '-f' ]];then
force=1
else
force=0
fi
echo "Installing ansible and deps using pip..."
pip install -r requirements.txt
echo "Installing filters/lookups under ${base_dir} ..."
mkdir $base_dir 2>/dev/null
echo 'localhost' > ${base_dir}/hosts
cp -r library $base_dir
cp -r filter_plugins $base_dir
cp -r lookup_plugins $base_dir
cp -r custom_utils $base_dir
echo "Creating ~/.ansible.cfg ..."
if ! [[ -f ~/.ansible.cfg ]];then
create_config
else
if [[ $force == 1 ]];then
echo "Moving ~/.ansible.cfg to ~/.ansible.cfg.orig.$$"
mv ~/.ansible.cfg{,.orig.$$}
create_config
else
echo "Warning: ~/.ansible.cfg already exists. not going to overwrite it unless -f flag is set."
fi
fi
| true |
b3cb2837962594740f050a0f7b2608aed3c90d8a | Shell | softwarevamp/damosproject | /System/persistent-cpu-overclock.sh | UTF-8 | 1,568 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# Author: Daniel Puckowski
# Date (last edit): 04/04/2014
# Purpose: Overclock the Exynos 4412 Prime SoC in the oDroid U3 from 1.7 GHz to
# 1.92 GHz at boot.
# Script version: 0.0.0.4
## Color map for script text.
txtblk='\e[0;30m' # Black - Regular
txtred='\e[0;31m' # Red
echo -e "${txtred}Enabling persistent CPU overclock now..." ; tput sgr0
if [ ! -f /etc/rc.local ]; then
touch /etc/rc.local
echo "Created file /etc/rc.local"
fi
## Uncomment the next line to create a backup of the /etc/rc.local file when this script is run.
# cp /etc/rc.local /etc/rc.local.bak
stringCheck=$(grep -R "echo 1920000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq" /etc/rc.local)
if [ ! -n "$stringCheck" ]; then
echo "Overclock statement already exists in /etc/rc.local."
echo "Done."
exit 0
fi
echo -e "Script will remove \"exit 0\" statement before appending to file /etc/rc.local..."
read -p "Would you like to more /etc/rc.local to ensure the last line of file is exit 0? " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo
more /etc/rc.local
echo
read -p 'Abort script now? ' -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo
echo "Done."
exit 0
fi
fi
echo
if [ -f /etc/rc.local ]; then
sed -i '$ d' /etc/rc.local
fi
echo "Appending overclock statement to file /etc/rc.local..."
echo "echo 1920000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq" >> /etc/rc.local
echo "exit 0" >> /etc/rc.local
echo -e "${txtred}Done." ; tput sgr0
exit 0
| true |
5cb01f6cf54984adff1fba245a71a04fa8e6cd0f | Shell | sznajder/hepaccelerate-cms | /tests/hmm/run.sh | UTF-8 | 1,778 | 2.890625 | 3 | [] | no_license | #!/bin/bash
#Abort the script if any step fails
set -e
#Use this many threads, should be around 1-4
export NTHREADS=4
#Set to -1 to run on all files, 1 for debugging/testing
export MAXCHUNKS=1
#This is where the intermediate analysis files will be saved and loaded from
#As long as one person produces it, other people can run the analysis on this
#Currently, use the cache provided by Joosep
#export CACHE_PATH=/storage/user/jpata/hmm/cache
#export CACHE_PATH=mycache
export CACHE_PATH=/storage/user/nlu/hmm/cache2
export SINGULARITY_IMAGE=/storage/user/jpata/gpuservers/singularity/images/cupy.simg
export PYTHONPATH=coffea:hepaccelerate:.
export NUMBA_THREADING_LAYER=tbb
export NUMBA_ENABLE_AVX=1
export NUMBA_CPU_FEATURES=+sse,+sse2,+avx,+avx2
export NUMBA_NUM_THREADS=$NTHREADS
export OMP_NUM_THREADS=$NTHREADS
export HEPACCELERATE_CUDA=0
export KERAS_BACKEND=tensorflow
#This is the location of the input NanoAOD and generally does not need to be changed
export INPUTDATAPATH=/storage/user/jpata
## Step 1: cache ROOT data (need to repeat only when list of files or branches changes)
## This can take a few hours currently for the whole run (using maxchunks -1 and --nthreads 24)
#singularity exec --nv -B /storage $SINGULARITY_IMAGE python3 tests/hmm/analysis_hmumu.py \
# --action cache --maxchunks $MAXCHUNKS --chunksize 1 \
# --nthreads 1 --cache-location $CACHE_PATH \
# --datapath $INPUTDATAPATH
## Step 2: Run the physics analysis
singularity exec --nv -B /storage $SINGULARITY_IMAGE python3 tests/hmm/analysis_hmumu.py \
--action analyze --action merge --maxchunks $MAXCHUNKS \
--cache-location $CACHE_PATH \
--nthreads $NTHREADS \
--out ./out --do-factorized-jec \
--datasets ggh_amcPS --eras 2016 \
--datapath $INPUTDATAPATH
| true |
6b1efa83048e9b86cf7d469d17b64405f63933da | Shell | savadev/SystemAdministration-1 | /find_and_replace.sh | UTF-8 | 348 | 3.734375 | 4 | [] | no_license | #START SCRIPT
#!/bin/bash
set -eo pipefail
error() {
printf "Error: $1\n"
exit 1
}
infile=$1
outfile=$2
findstr=$3
replacestr=$4
if [[ $# -ne 4 ]]; then
error "Requires exactly 4 arguments"
elif [[ ! -f "$infile" ]]; then
error "$infile must be valid file"
fi
sed s/"$findstr"/"$replacestr"/g $infile > $outfile
cat $outfile
#END SCRIPT
| true |
e0533fec657d05b065e1dd335be9b4adcd8cb116 | Shell | iot-alex/rocks | /admin/maintenance/removeBinariesGithub/removeFileFromHistoryKnownFiles.sh | UTF-8 | 1,417 | 3.859375 | 4 | [] | no_license | #!/bin/bash
#
#
# Clem
# this script removes from the current HEAD all the way back
# all the references to the files listed in the file passed
# as a first argument.
# Only file not currently present in the working direcotry
# are deleted
#
# This script can be used to reduce the size of a git repo
#
# ATT: this script will rewrite all the histrory of the repo
# aka it is dangerous: BACKUP YOU REPO
#
# If you have unmerged branches this script will not work properly,
# it will leave the unmerged branched untouched and they will
# keep on referencing the data.
#
# Check arugments
if [ -f "$1" ];then
inputFile=$1;
else
echo "Error no arguments specified"
echo "Usage: "
echo " $0 fileList "
exit -1;
fi
#
# check we are at the top directory
#
if [ ! -d ".git" ]; then
echo "Error this command should be run from the top level directory of your git repo"
exit -1
fi
fileList=""
for i in `awk '{print $1}' $inputFile`;do
fileList="$fileList $i"
done
echo git count-objects -v -- before files deletion
git count-objects -v
echo "Running filter branch"
git filter-branch --index-filter "git rm --cached --ignore-unmatch $fileList" --tag-name-filter cat -- --all
#
# now we really delete old unused stuff
#
#rm -rf .git/refs/original/
#git reflog expire --all --expire='0 days'
#git repack -A -d
#git prune
echo 'not clone this repo to reduce its size'
| true |
32750df2aba5569ab90372ebe2b2cd256ffc5b56 | Shell | chlandin/shell_scripts | /validAlphaNum | UTF-8 | 832 | 4.3125 | 4 | [] | no_license | #!/bin/sh
# validAlphaNum - Ensures that input consists only of alphabetical
# and numeric characters.
validAlphaNum() {
# Validate arg: returns 0 if all upper + lower + digits, else 1
# Remove all unacceeptable chars
compressed="$(echo $1 | sed -e 's/[^[:alnum:]]//g')"
# Force uppercase and allow space, comma and period:
# sed 's/[^[:upper:] ,.]//g'
# Validation for phone number, allowing integers, spaces, parentheses and dashes
# sed 's/[^[:digit:]\(\)- ]//g'
if [ "$compressed" != "$input" ] ; then
return 1
else
return 0
fi
}
# Sample usage of this function in a script
echo "Enter input: "
read input
if ! validAlphaNum "$input" ; then
echo "Your input must consist of only letters and numbers." >&2
exit 1
else
echo "Input is valid."
fi
exit 0
| true |
2db4d4ae6df8e3048041a4937fd11697e3cfc037 | Shell | hm1365166/opencsw | /csw/mgar/pkg/mod_wsgi/trunk/files/postinstall | UTF-8 | 561 | 3 | 3 | [] | no_license | #!/bin/sh
CSW_PREFIX=${PKG_INSTALL_ROOT}/opt/csw
AP2_PREFIX=$CSW_PREFIX/apache2
AP2_BINDIR=$AP2_PREFIX/sbin
AP2_LIBEXEC=$AP2_PREFIX/libexec
AP2_CONFDIR=$AP2_PREFIX/etc
AP2_EXTRADIR=$AP2_CONFDIR/extra
AP2_CONFIG=$AP2_CONFDIR/httpd.conf
# Enable the wsgi module
PKG_INSTALL_ROOT=${PKG_INSTALL_ROOT:-'/'}
chroot $PKG_INSTALL_ROOT \
$AP2_BINDIR/apxs -S LIBEXECDIR=$AP2_LIBEXEC -e -a -n wsgi mod_wsgi.so
# Finito
cat <<END
NOTICE: mod_wsgi is enabled in httpd.conf but the server was not restarted.
Please configure mod_wsgi and restart apache.
END
exit 0
| true |
c83b5e2fce5da1f53611d0f04cbb3f3d37b91763 | Shell | quantomb/MEM | /cold_atom/MEM_xzx/readmc_tdmx/gobetts | UTF-8 | 2,428 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# parameter:
# $1: do random swap data sequence or not (1/0)
# $2: index of momentum k
# $3: number of binned data
# $4: number of skip in imaginary time direction
# $5: coarse grain size for input data
# $6: number of Matsubara frequency used in MEM
#----------------------------------------------------
#path and filename
#----------------------------------------------------
source ../path_betts $1 $2
data=$dat_name
adex=.dat
adex1=_bak
#echo "my working path=" $my_path
#echo "mc data path=" $mcdat_path
#echo "mc data name=" $data$index$adex
#-----------------------------------------------------
# default file and initial images
#-----------------------------------------------------
if [ -z $5 ]
then
cp $anneal_file ../tmp_data/deflt.dat
cp $anneal_file ../tmp_data/f0.dat
echo "Using annealing method"
else
#for annealing algorithm
#echo "default model is:" $anneal_file/$index$adex1$adex
cp $flatpath/model_dos ../tmp_data/deflt.dat
cp $flatpath/model_dos ../tmp_data/f0.dat
echo "Using default model"
fi
#-----------------------------------------------------
#create parameter file for rdbin.f
#-----------------------------------------------------
#remove old one
rm -f rdinput.txt
#create new one by add a line for output file name
rdoutfile=$tmp_path/rdbinout.dat
echo $rdoutfile > rdinput.txt
#Which quantity to be calculated? G(0),Cha(1),XDD(8)
echo 0 >> rdinput.txt
#number of input file
echo 1 >> rdinput.txt
#number of bins
echo $3 >> rdinput.txt
#Pmin=?
echo 1e-3 >> rdinput.txt
#coarse grain size
echo $4 >> rdinput.txt
#print Histogram or not?
echo y >> rdinput.txt
#input file name
echo $data >> rdinput.txt
#Ick=?
echo 0 >> rdinput.txt
#run rddcams
./rddcams < rdinput.txt
#-------------------------------------------------------
#create parameter file for readmc.f
#-------------------------------------------------------
#remove the old one
rm -f mcinput.txt
#input file name, i.e. output file of rdbins.f
echo $rdoutfile > mcinput.txt
#output file name
rdmcoutfile=$tmp_path/mc.dat
echo $rdmcoutfile >> mcinput.txt
#default file name
dftfile=$tmp_path/deflt.dat
echo $dftfile >> mcinput.txt
#eigenvalue file name
eigfile=$tmp_path/eig.dat
echo $eigfile >> mcinput.txt
#value of range, machine precision
echo 1.12E-20 >> mcinput.txt
#type of kernel
echo 2 >> mcinput.txt
#run readme
./readmc_linuxms < mcinput.txt
#./rdmc.out < mcinput.txt
| true |
37641752dbcfd4cce7624ff753cdc177db85bad2 | Shell | pombredanne/rpm-py-installer | /scripts/lint_bash.sh | UTF-8 | 925 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/bash -v
set -e
# shellcheck disable=SC2039
pushd "$(dirname "${0}")/.." > /dev/null
ROOT_DIR=$(pwd)
popd > /dev/null
if ! which bashate > /dev/null; then
echo "ERROR: Install bashate." 1>&2
exit 1
fi
# if ! which shellcheck > /dev/null; then
# echo "ERROR: Install shellcheck." 1>&2
# exit 1
# fi
FILES="$(find "${ROOT_DIR}/ci" "${ROOT_DIR}/scripts" "${ROOT_DIR}/tests" \
-name "*.sh")"
STATUS=0
for FILE in ${FILES}; do
if ! bash -n "${FILE}"; then
STATUS=1
echo "${FILE}: NG at sh -n"
continue
fi
if ! BASHATE_RESULT=$(bashate --ignore E006 "${FILE}") || \
[ "${BASHATE_RESULT}" != "" ]; then
STATUS=1
echo "${FILE}: NG at bashate"
continue
fi
# if ! shellcheck "${FILE}"; then
# STATUS=1
# echo "${FILE}: NG at shellcheck"
# continue
# fi
echo "${FILE}: OK"
done
exit "${STATUS}"
| true |
47c3fcf248c73e91f5a838d345cf7c66f49c9631 | Shell | SebastianSB/dotfiles | /git/set-config.sh | UTF-8 | 610 | 2.609375 | 3 | [] | no_license | # Let git know who you are
git config --global user.name "$GIT_AUTHOR_NAME"
git config --global user.email "$GIT_AUTHOR_EMAIL"
# only push the current branch
git config --global push.default simple
# global ignore list
git config --global core.excludesfile ~/.gitignore_global
# tell git to use our nice repo template
git config --global init.templatedir '~/.git_template'
git config --global alias.ctags '!~/.git_template/hooks/ctags'
# use diff-so-fancy when diffing
git config --global pager.diff "diff-so-fancy | less --tabs=4 -RFX"
git config --global pager.show "diff-so-fancy | less --tabs=4 -RFX"
| true |
2df0f8a755befe0d18dc54f4d1119978acbaabb3 | Shell | ian505c5/dotfiles | /.brew | UTF-8 | 1,523 | 2.828125 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/usr/bin/env bash
# Make sure we’re using the latest Homebrew
brew update
# Upgrade any already-installed formulae
brew upgrade
# Install GNU core utilities (those that come with OS X are outdated)
brew install coreutils
echo "Don’t forget to add $(brew --prefix coreutils)/libexec/gnubin to \$PATH."
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, g-prefixed
brew install findutils
# Install Bash 4
brew install bash
# Install wget with IRI support
brew install wget --enable-iri
# Install more recent versions of some OS X tools
brew tap homebrew/dupes
brew install homebrew/dupes/grep
# These two formulae didn’t work well last time I tried them:
#brew install homebrew/dupes/vim
#brew install homebrew/dupes/screen
# Install other useful binaries
brew install ack
#brew install exiv2
brew install git
brew install imagemagick
# brew install lynx
brew install node
# brew install pigz
# brew install rename
# brew install rhino
# brew install tree
brew install webkit2png
# brew install zopfli
brew install trash
brew install thefuck
brew install rbenv ruby-build
brew tap homebrew/versions
# Install native apps
brew install caskroom/cask/brew-cask
brew tap caskroom/versions
brew cask install atom
brew cask install google-chrome
brew cask install charles
brew cask install encryptr
brew cask install evernote
brew cask install firefox
brew cask install gitter
brew cask install slack
brew cask install hipchat
brew cask install flux
# Remove outdated versions from the cellar
brew cleanup
| true |
231b3f7d8d4ee858da1ed894b76e95a33e623adf | Shell | sspiderd/scripts | /sshf_bash | UTF-8 | 333 | 3.5625 | 4 | [] | no_license | #!/bin/bash
#Replaces ssh calls with ip instead of dns for fast access
#Author Ilan G
for ARG in $@
do
if [[ $ARG =~ (.*)@(.*) ]]
then
REPLACED=`dig ${BASH_REMATCH[2]} | grep IN | head -2 | tail -1 | sed 's/\s\+/ /g' | cut -d' ' -f5`
RUN="$RUN ${BASH_REMATCH[1]}@$REPLACED"
else
RUN="$RUN $ARG"
fi
done
ssh $RUN | true |
8286a3ea87033b3da78abcf196232db8fdb49094 | Shell | raul338/main | /gamin/PKGBUILD | UTF-8 | 883 | 2.71875 | 3 | [] | no_license |
pkgname=gamin
pkgver=0.1.10
pkgrel=4
pkgdesc='File and directory monitoring system defined to be a subset of the FAM (File Alteration Monitor).'
url='http://www.gnome.org/~veillard/gamin'
license=('GPL2')
arch=('x86_64')
depends=('glib2' 'python2')
source=("https://people.gnome.org/~veillard/gamin/sources/${pkgname}-${pkgver}.tar.gz"
'const.patch'
'gam_server.patch')
md5sums=('b4ec549e57da470c04edd5ec2876a028'
'f679aeb48fe9dd376c8828cc9b6941ab'
'4784359a3206bfa3c0dce1c23468f87f')
build() {
cd ${pkgname}-${pkgver}
patch -p1 -i ${srcdir}/gam_server.patch
patch -p1 -i ${srcdir}/const.patch
./configure \
--disable-debug \
--disable-debug-api \
--disable-static \
--libexecdir=/usr/lib/gamin \
--prefix=/usr \
--with-threads
make
}
package() {
cd ${pkgname}-${pkgver}
make DESTDIR=${pkgdir} install
}
| true |
ff0d24b6fc037f94fe593ec70ef959ddaaddc0ea | Shell | pacificclimate/prism-netcdf-prep | /convert_8110_outformats.sh | UTF-8 | 3,234 | 3.21875 | 3 | [] | no_license | #!/bin/bash
#Want to convert the output PRISM ASCII grids to a georeferenced format/netcdfs
i=0;
waitevery=8;
for infile in /home/data/projects/PRISM/bc_climate/bc_8110_maps/grids/bc_*8110.*
do
(
gdal_translate -of netCDF -a_srs '+proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs' $infile $infile.nc;
mv $infile.nc /home/data/projects/PRISM/bc_climate/bc_8110_maps/grids/netcdfs/
) & ((i++%waitevery==0)) && wait;
done >/dev/null 2>&1
wait;
#Precip prep and metadata
indir=/home/data/projects/PRISM/bc_climate/bc_8110_maps/grids/netcdfs/
F=${indir}bc_ppt_8110.nc.prep
ncecat -O --netcdf4 -u time $indir*ppt*.nc $F
ncrename -O -v Band1,pr $F
ncatted -O -a long_name,pr,m,c,"Precipitation Climatology" \
-a long_description,pr,a,c,"Climatological mean of monthly total precipitation" \
-a standard_name,pr,a,c,"lwe_thickness_of_precipitation_amount" \
-a units,pr,a,c,"mm" \
-a cell_methods,pr,a,c,"time: sum within months time: mean over years" $F
ncatted -O -a axis,lat,c,c,Y $F
ncatted -O -a axis,lon,c,c,X $F
ncap2 -O -s 'defdim("bnds",2)' $F $F
#make an r code file to run the time addition then execute it
#Tmax prep and metadata
F=${indir}bc_tmax_8110.nc.prep
ncecat -O --netcdf4 -u time $indir*tmax*.nc $F
ncrename -O -v Band1,tmax $F
ncatted -O -a long_name,tmax,m,c,"Temperature Climatology (Max.)" \
-a long_description,tmax,a,c,"Climatological mean of monthly mean maximum daily temperature" \
-a standard_name,tmax,a,c,air_temperature \
-a units,tmax,a,c,"celsius" \
-a cell_methods,tmax,a,c,"time: maximum within days time: mean within months time: mean over years" $F
ncatted -O -a axis,lat,c,c,Y $F
ncatted -O -a axis,lon,c,c,X $F
ncap2 -O -s 'defdim("bnds",2)' $F $F
#Tmin prep and metadata
F=${indir}bc_tmin_8110.nc.prep
ncecat -O --netcdf4 -u time $indir*tmin*.nc $F
ncrename -O -v Band1,tmin $F
ncatted -O -a long_name,tmin,m,c,"Temperature Climatology (Min.)" \
-a long_description,tmin,a,c,"Climatological mean of monthly mean minimum daily temperature" \
-a standard_name,tmin,a,c,air_temperature \
-a units,tmin,a,c,"celsius" \
-a cell_methods,tmin,a,c,"time: minimum within days time: mean within months time: mean over years" $F
ncatted -O -a axis,lat,c,c,Y $F
ncatted -O -a axis,lon,c,c,X $F
ncap2 -O -s 'defdim("bnds",2)' $F $F
Rscript add_time_dim.r /home/data/projects/PRISM/bc_climate/bc_8110_maps/grids/netcdfs/bc_ppt_8110.nc.prep 1981 2010
Rscript add_time_dim.r /home/data/projects/PRISM/bc_climate/bc_8110_maps/grids/netcdfs/bc_tmin_8110.nc.prep 1981 2010
Rscript add_time_dim.r /home/data/projects/PRISM/bc_climate/bc_8110_maps/grids/netcdfs/bc_tmax_8110.nc.prep 1981 2010
for F in $(ls $indir*.nc.prep)
do
ncatted -O -a long_name,time,c,c,time -a calendar,time,c,c,gregorian $F
ncatted -O -a climatology,time,c,c,"climatology_bounds" $F
done
ncap2 -O -s 'tmax=tmax/100;' ${indir}bc_tmax_8110.nc.prep ${indir}bc_tmax_8110.nc.prep
ncap2 -O -s 'tmin=tmin/100;' ${indir}bc_tmin_8110.nc.prep ${indir}bc_tmin_8110.nc.prep
mv ${indir}bc_ppt_8110.nc.prep pr_monClim_PRISM_historical_run1_198101-201012.nc
mv ${indir}bc_tmax_8110.nc.prep tmax_monClim_PRISM_historical_run1_198101-201012.nc
mv ${indir}bc_tmin_8110.nc.prep tmin_monClim_PRISM_historical_run1_198101-201012.nc | true |
33a8e91244bf100718fbbf7122a7dad5e091a0fe | Shell | christaotaoz/shkd-work | /work/panabit_plugin/pa_plugin/cfy/Setup/src/policy_getgrp | GB18030 | 11,171 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#This script is created by ssparser automatically. The parser first created by MaoShouyan
printf "Content-type: text/html;charset=gb2312
Cache-Control: no-cache
"
echo -n "";
. ../common/common.sh
if [ "${CGI_policy}" = "" ]; then
for policy in `${FLOWEYE} policy listgrp | awk '{print $1}'`
do
CGI_policy="${policy}"
break
done
fi
myself="/cgi-bin/Setup/`basename $0`"
echo -n "
<script type=\"text/javascript\" src=\"/img/common.js\"></script>
<script languate=javascript>
function onAddPolicy()
{
window.location.href = \"/cgi-bin/Setup/policy_addgrp\";
}
function onClonePolicy(policy)
{
window.location.href = \"/cgi-bin/Setup/policy_clonegrp?fromgrp=\" + policy;
}
function onAddRule(policy)
{
window.location.href = \"/cgi-bin/Setup/policy_addrule?policy=\" + policy;
}
function onDynaRate()
{
var url = \"/cgi-bin/Setup/policy_listlink\";
ShowWindow(url, \"\", 780, 620);
}
function modifyRule(group, polno)
{
var url;
url = \"/cgi-bin/Setup/policy_setrule?group=\" + group + \"&polno=\" + polno;
window.location.href = url;
}
function onSelectPolicy(obj)
{
window.location.href = \"${myself}?policy=\" + obj.value;
}
function onDSCPChanged(obj)
{
var url;
url = \"${myself}?action=tos&tos=\" + obj.value;
window.location.href = url;
}
function deleteRule(group, rule)
{
if (confirm(\"ȷҪɾù?\")) {
window.location.href = \"${myself}?action=rmvrule\" +
\"&policy=\" + group + \"&ruleid=\" + rule;
}
}
function onDeletePolicy(policy)
{
if (confirm(\"ȷҪɾ˲?\"))
window.location.href = \"${myself}?action=deletegroup\" + \"&policy=\" + policy;
}
function enablePolicy(grp, polno)
{
var url;
url = \"${myself}?action=enable&link=${CGI_link}\" + \"&group=\" + grp + \"&policyid=\" + polno;
window.location.href = url;
}
function disablePolicy(grp, polno)
{
var url;
url = \"${myself}?action=disable&link=${CGI_link}\" + \"&group=\" + grp + \"&policyid=\" + polno;
window.location.href = url;
}
function onloaddoc()
{
if (document.body.clientWidth > 1300) {
document.getElementById(\"tbl1\").style.width=\"100%\";
document.getElementById(\"tbl2\").style.width=\"100%\";
}
else {
document.getElementById(\"tbl1\").style.width=\"1300\";
document.getElementById(\"tbl2\").style.width=\"1300\";
}
}
</script>
";
if [ "${CGI_action}" = "deletegroup" ]; then
operator_check "${myself}?policy=${CGI_policy}"
errmsg=`${FLOWEYE} policy rmvgrp ${CGI_policy}`
if [ "$?" != "0" ]; then
afm_dialog_msg "ִ:${errmsg}"
else
WEB_LOGGER "ɾ" "group=${CGI_policy}"
afm_dialog_msg "ɹ!"
afm_load_page 0 "${myself}"
exit 0
fi
fi
if [ "${CGI_action}" = "rmvrule" ]; then
operator_check "${myself}?policy=${CGI_policy}"
errmsg=`${FLOWEYE} policy rmvrule ${CGI_policy} ${CGI_ruleid}`
if [ "$?" != "0" ]; then
afm_dialog_msg "ִ:${errmsg}"
else
WEB_LOGGER "ɾ" "group=${CGI_policy},id=${CGI_ruleid}"
afm_dialog_msg "ɹ!"
fi
fi
if [ "${CGI_action}" = "enable" ]; then
operator_check "${myself}"
errmsg=`${FLOWEYE} policy disable group=${CGI_group} id=${CGI_policyid} disable=0`
if [ "$?" != "0" ]; then
afm_dialog_msg "ʧ:${errmsg}"
else
afm_dialog_msg "ɹ!"
fi
fi
if [ "${CGI_action}" = "disable" ]; then
operator_check "${myself}"
errmsg=`${FLOWEYE} policy disable group=${CGI_group} id=${CGI_policyid} disable=1`
if [ "$?" != "0" ]; then
afm_dialog_msg "ʧ:${errmsg}"
else
afm_dialog_msg "ɹ!"
fi
fi
if [ "${CGI_action}" = "tos" ]; then
operator_check "${myself}"
errmsg=`${FLOWEYE} policy config force_tos=${CGI_tos}`
if [ "$?" != "0" ]; then
afm_dialog_msg "ִ: ${errmsg}"
fi
fi
policylist=`${FLOWEYE} policy listgrp`
for nameval in `${FLOWEYE} policy stat`
do
tag=`echo ${nameval} | cut -d'=' -f1`
val=`echo ${nameval} | cut -d'=' -f2-`
export ${tag}=${val}
done
CGI_tos="${force_tos}"
echo -n "
<body onload=\"onloaddoc()\">
"; cgi_show_title "->"
echo -n "
<table id=tbl1 style=\"width:100%;font-bold:true;font-size:14px;\">
<tr>
<td align=left>
<select name=policy value=\"${CGI_policy}\" style=\"width:120;height:19\" onchange=\"onSelectPolicy(this)\">
";
${FLOWEYE} policy listgrp | while read policy policyname
do
if [ "${CGI_policy}" = "${policy}" ]; then
echo "<option value=${policy} selected>${policyname}</option>"
else
echo "<option value=${policy}>${policyname}</option>"
fi
done
if [ "${policylist}" = "" ]; then
echo "<option value=0>(ûжκβ)</option>"
fi
echo -n "
</select></td>
<td width=* align=right>DSCP
<select name=tos value=${CGI_tos} style=\"width:80\" onchange=\"onDSCPChanged(this)\">
";
if [ ${CGI_tos} -eq 0 ]; then
echo "<option value=1>ģʽ</option>"
echo "<option value=0 selected>ģʽ</option>"
else
echo "<option value=1 selected>ģʽ</option>"
echo "<option value=0>ģʽ</option>"
fi
echo -n "
</select>
<a style=\"color:#0000ff;font-size:14px;\" href=\"javascript:onDynaRate()\"><b>̬...</b></a>
"; if [ "${CGI_policy}" != "" ]; then
echo -n "
<a style=\"color:#0000ff;font-size:14px;\" href=\"javascript:onDeletePolicy('${CGI_policy}')\"><b>ɾ</b></a>
<a style=\"color:#0000ff;font-size:14px;\" href=\"javascript:onClonePolicy('${CGI_policy}')\"><b>Ʋ>></b></a>
"; fi
echo -n "
<a style=\"color:#0000ff;font-size:14px;\" href=\"javascript:onAddPolicy()\"><b>>></b></a>
</td>
</tr>
</table>
";
if [ "${CGI_policy}" != "" ]; then
rowno=1
echo -n "
<table id=tbl2 width=100% border=0 cellspacing=1 cellpadding=1>
<tr id=tblhdr height=22>
<td width=40 align=center></td>
<td width=80 align=right>·</td>
<td width=40 align=right></td>
<td width=50 align=right>VLAN</td>
<td width=50 align=right>TTL</td>
<td width=120 align=right>ַ</td>
<td width=120 align=right>ַ</td>
<td width=40 align=right>Э</td>
<td width=100 align=right>Ӧ</td>
<td width=150 align=right>û</td>
<td width=80 align=right></td>
<td width=45 align=right>IP</td>
<td width=30 align=right>DSCP</td>
<td width=40 align=right>ȼ</td>
<td width=40 align=right>ƥ</td>
<td width=* align=right>ע</td>
<td width=75 align=right><a style=\"color:#0000ff;font-size:14px\" href=\"javascript:onAddRule('${CGI_policy}')\">Ӳ</a> </td>
</tr>
";
idname="row1"
${FLOWEYE} policy getgrp group=${CGI_policy} | \
while read polno bridge dir appid appname proto from intype inip inport to outtype outip outport \
action nextstep iprate prclevel bps1 bps2 tos pri natip linkid disabled hasms qqcnt vlan ttl desc theothers
do
[ "${ttl}" = "0-255" ] && ttl=""
[ "${desc}" = "NULL" ] && desc=""
echo -n "
"; if [ ${disabled} -ne 0 ]; then
echo -n "
<tr id=row4>
"; else
echo -n "
<tr id=${idname}>
"; fi
echo -n "
<td align=center>${polno}</td>
";
bdgname="${bridge#_wg.}"
[ "${bdgname}" = "0" ] && bdgname="any"
echo -n "
<td align=right>${bdgname}</td>
"; if [ "${dir}" = "both" ]; then
echo -n "
<td align=right>any</td>
"; elif [ "${dir}" = "in" ]; then
echo -n "
<td align=right></td>
"; else
echo -n "
<td align=right></td>
"; fi
echo -n "
"; [ "${vlan}" = "0" ] && vlan=""
echo -n "
<td align=right>${vlan}</td>
<td align=right>${ttl}</td>
"; if [ "${inport}" = "any" -o "${inport}" = "0" ]; then
echo -n "
<td align=right>${inip}</td>
"; else
echo -n "
<td align=right>${inip}:${inport}</td>
"; fi
echo -n "
"; if [ "${outport}" = "any" ]; then
echo -n "
<td align=right>${outip}</td>
"; else
echo -n "
<td align=right>${outip}:${outport}</td>
"; fi
echo -n "
<td align=right>${proto}</td>
"; if [ "${appid}" = "any" ]; then
echo -n "
<td align=right>any</td>
"; else
echo -n "
<td align=right>${appname}</td>
"; fi
echo -n "
";
userattrs=""
if [ "${natip}" != "0" ]; then
userattrs=">=${natip}"
fi
if [ "${hasms}" != "0" ]; then
if [ "${userattrs}" != "" ]; then
userattrs="${userattrs};ƶն>=${hasms}"
else
userattrs="ƶն>=${hasms}"
fi
fi
if [ "${qqcnt}" != "0" -a "${qqcnt}" != "" ]; then
if [ "${userattrs}" != "" ]; then
userattrs="${userattrs};QQû>=${qqcnt}"
else
userattrs="QQû>=${qqcnt}"
fi
fi
echo "<td align=right>${userattrs}</td>"
echo -n "
";
case "${action}" in
"deny")
echo "<td align=right><img src=\"/img/no.png\" style=\"margin-right:5px;vertical-align:middle;\" /></td>"
;;
"permit")
echo "<td align=right><img src=\"/img/yes.png\" style=\"margin-right:5px;vertical-align:middle;\" /></td>"
;;
ROUTE*)
pxy=`echo ${action} | cut -d'-' -f2-`
echo "<td align=right>ת->${pxy}</td>"
;;
PROXY*)
echo "<td align=right>${action}</td>"
;;
DUP*)
pxy=`echo ${action} | cut -d'-' -f2`
echo "<td align=right>ľ->${pxy}</td>"
;;
*)
echo "<td align=right>${action}</td>"
;;
esac
echo -n "
"; if [ "${iprate}" = "0" ]; then
echo -n "
<td></td>
"; else
echo -n "
<td align=right>${iprate}</td>
"; fi
echo -n "
<td align=right>${tos}</td>
<td align=right>${pri}</td>
"; if [ "${nextstep}" = "continue" ]; then
echo -n "
<td align=right></td>
"; else
echo -n "
<td align=right>ֹͣ</td>
"; fi
echo -n "
<td align=right>${desc}</td>
<td align=right>
<img src=\"/img/edit.png\" style=\"margin-top: 3px\" onclick=\"modifyRule('${CGI_policy}', '${polno}')\" title=\"IJ\" />
<img src=\"/img/delete.png\" style=\"margin-top: 3px\" onclick=\"deleteRule('${CGI_policy}', '${polno}', '${linkid}')\" title=\"ɾ\" />
"; if [ ${disabled} -eq 0 ]; then
echo -n "
<img src=\"/img/disable.png\" style=\"margin-top: 3px\" onclick=\"disablePolicy('${CGI_policy}', '${polno}')\" title=\"ò\" />
"; else
echo -n "
<img src=\"/img/enable.png\" style=\"margin-top: 3px\" onclick=\"enablePolicy('${CGI_policy}', '${polno}')\" title=\"ò\" />
"; fi
echo -n "
</td>
</tr>
";
if [ "${idname}" = "row1" ]; then
idname="row2"
else
idname="row1"
fi
done
fi
echo -n "
</body>
</html>
"; | true |
72f7d9383eae72c692810bee17472e965ef2d835 | Shell | nirandaperera/wrf-scripts | /run/run-wrf.bash | UTF-8 | 3,719 | 3.328125 | 3 | [] | no_license | #!/bin/bash
tot_start=$(date +%s)
lib_path="/opt/lib"
wrf_home="/mnt/disks/wrf-mod"
geog_home="$wrf_home/DATA/geog/"
gfs_home="$wrf_home/DATA/GFS/"
src_home="$wrf_home/wrf-scripts/src"
run_home="$wrf_home/wrf-scripts/run"
ncl_home="$wrf_home/wrf-scripts/ncl"
log_home="$wrf_home/logs"
log_file="wrf.run."$(date +"%Y-%m-%d_%H%M")".log"
wrf_output="$wrf_home/OUTPUT"
echo "Redirecting logs to $log_home/$log_file"
mkdir -p $log_home
exec > "$log_home/$log_file"
export NETCDF="$lib_path"/netcdf
export LD_LIBRARY_PATH="$lib_path"/mpich/lib:"$lib_path"/grib2/lib:$LD_LIBRARY_PATH
export LD_INCLUDE_PATH="$lib_path"/mpich/include:/usr/include:"$lib_path"/grib2/include:$LD_INCLUDE_PATH
export PATH=$PATH:"$lib_path"/mpich/bin/
echo "WRF run start"
rundate=$(date '+%Y%m%d' --date="1 days ago")
year1=${rundate:0:4}
month1=${rundate:4:2}
date1=${rundate:6:2}
rundate2=$(date '+%Y%m%d' --date " 2 days")
year2=${rundate2:0:4}
month2=${rundate2:4:2}
date2=${rundate2:6:2}
cd $gfs_home || exit
find1="${rundate}.gfs.t00z.pgrb2.0p50.f075"
if [ -f "${find1}" ]; then
find2=$(find ./ -size 0 | grep "${rundate}")
if [ ! -e "${find2}" ];
then
echo "${rundate} Data available";
else
echo "Data not yet available";
exit;
fi
else
echo "Data not yet available";
exit;
fi
function print_elapsed_time {
printf '%s - Time elapsed %dh:%dm:%ds\n' $1 $(($2/3600)) $(($2%3600/60)) $(($2%60))
}
cd $wrf_home || exit
lockfile="wrflock.txt"
if [ -f ${lockfile} ]
then
echo "Simulation has already started";
exit;
else
echo "start simulation ${rundate}";
touch wrflock.txt
fi
ulimit -s unlimited
mpdboot
cd $wrf_home/WPS || exit
sed -e 's@YY1@'$year1'@g;s@MM1@'$month1'@g;s@DD1@'$date1'@g;s@YY2@'$year2'@g;s@MM2@'$month2'@g;s@DD2@'$date2'@g;s@GEOG@'$geog_home'@g' $src_home/namelist.wps2 > namelist.wps
rm -f FILE:*
rm -f PFILE:*
rm -f met_em*
ln -sf ungrib/Variable_Tables/Vtable.NAM Vtable
./link_grib.csh $gfs_home/"$rundate"
start=$(date +%s)
./ungrib.exe
end=$(date +%s)
secs=$((end-start))
print_elapsed_time "Ungrib" $secs
start=$(date +%s)
./geogrid.exe
end=$(date +%s)
secs=$((end-start))
print_elapsed_time "Geogrid" $secs
start=$(date +%s)
./metgrid.exe
end=$(date +%s)
secs=$((end-start))
print_elapsed_time "Metgrid" $secs
cd $wrf_home/WRFV3/test/em_real/ || exit
sed -e 's@YY1@'$year1'@g;s@MM1@'$month1'@g;s@DD1@'$date1'@g;s@YY2@'$year2'@g;s@MM2@'$month2'@g;s@DD2@'$date2'@g' $src_home/namelist.input2 > namelist.input
rm -f met_em*
rm -f rsl*
ln -sf $wrf_home/WPS/met_em.d0* .
start=$(date +%s)
mpirun -np 4 ./real.exe
end=$(date +%s)
secs=$((end-start))
print_elapsed_time "Real.exe" $secs
start=$(date +%s)
mpirun -np 4 ./wrf.exe
end=$(date +%s)
secs=$((end-start))
print_elapsed_time "wrf.exe" $secs
echo "WRF run completed"
echo "Move WRF Output"
mkdir -p $wrf_output
mv wrfout_d0* $wrf_output/
echo "Move WRF Output completed"
echo "Running NCL scripts"
cd $run_home || exit
./create-images.bash
echo "Extracting data"
cd $run_home || exit
#./extract-data.bash
python read_wrf_output.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_predicted_observed_dailyinputs.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_Daraniyagala.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_GlencourseF.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_Hanwella.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_Holombuwa.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_Kitulgala.py
# /opt/Python/anaconda3/bin/python3.5 Plot_Rainfall_Norwood.py
# cp *.pdf /var/www/html/slg/
# mv *.pdf /run/media/sherath/_wrfout/SriLanka/Graphs/
rm -f $wrf_home/wrflock.txt
end=$(date +%s)
secs=$((end-tot_start))
print_elapsed_time "completed!" $secs
exit;
| true |
23473362818072f57e78f77529e9d3b19f24acb9 | Shell | ZhangXiao96/BiasAndVariance | /run.sh | UTF-8 | 738 | 2.8125 | 3 | [] | no_license | #!/bin/bash
noise=0.1
data_list=("cifar10" "cifar100" "svhn")
model_list=("vgg13" "resnet18" "vgg16" "resnet34" "vgg11" "resnet18")
opt="adam"
lr=0.0001
gpu_id=0
for loop in 0 1 2
do
data=${data_list[$loop]}
for in_loop in 0 1
do
model_index=`expr 2 \* $loop + $in_loop`
model=${model_list[$model_index]}
for run_id in 0 1 2 3 4
do
echo "CUDA_VISIBLE_DEVICES=${gpu_id} nohup python3 -u train_epoch_wise_noise.py $data $model $noise True $opt $lr $run_id > ${loop}_${gpu_id}_${run_id}.log &"
CUDA_VISIBLE_DEVICES=${gpu_id} nohup python3 -u train_epoch_wise_noise.py $data $model $noise True $opt $lr $run_id > ${loop}_${gpu_id}_${run_id}.log &
gpu_id=`expr $gpu_id + 1`
gpu_id=`expr $gpu_id % 8`
done
done
done
| true |
c9ddef29299f10177397be2f4b5103584e65ad9c | Shell | jack-dolan/resume | /update.sh | UTF-8 | 308 | 3.03125 | 3 | [] | no_license | #!/bin/bash
# Run this each time resume.tex gets updated to generate new resume.pdf and README.md with new date & resume
pdflatex resume.tex
echo '# Jack Dolan - Résumé \n\n' > './README.md'
date '+%Y-%m-%d' >> './README.md'
echo '\n\n' >> 'README.md'
pandoc resume.tex -t markdown_github >> 'README.md'
| true |
97de48de1dc54e0ef4547f2e8aedd33347287741 | Shell | evenator/dotfiles | /.bash_aliases | UTF-8 | 2,546 | 3.5 | 4 | [] | no_license | # Bash aliases file
# Author: Ed Venator (evenator@gmail.com)
unalias -a
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -halF'
alias la='ls -A'
alias l='ls -CF'
# Cuz why not
alias quit='exit'
alias launchgrep="grep -r --include='*.launch' --include='*.xml' --exclude='manifest.xml' --exclude='package.xml'"
alias cppgrep="grep -r --include='*.cpp' --include='*.h' --include='*.hpp'"
alias msggrep="grep -r --include='*.msg' --include='*.srv'"
alias rosfind='find $ROS_WORKSPACE'
alias lsnodes='ps aux | grep "ros" | grep -v grep | awk -F" " \"/python/{print $12; next}{print $11}\" | sort'
alias rosdep='rosdep --os="ubuntu:trusty"'
alias bashrc='source ~/.bashrc'
# Less with color and line numbers
alias less='less -NR'
# Add an "open" command to open a file using the file browser
# Apparently the OSX terminal can do this. It's handy.
# Usage:
# open # Opens the current directory
# open file # Open the file or directory in the browser
# Add an "open" command to open a file using the file browser
open(){
if [ $# -lt 1 ]; then
gnome-open . 1>/dev/null 2>/dev/null
else
for FILE in $@; do
gnome-open "$FILE" 1>/dev/null 2>/dev/null
done
fi
}
export -f open
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
swri_rostopic() {
case "$1" in
table)
shift
pub_text='Published topics:'
sub_text='Subscribed topics:'
print_cmd='print $2 " " $3 " " $4 " " substr($5, 0, 1)'
rostopic list "$@" --verbose | \
awk "!/(${pub_text})|(${sub_text})/{${print_cmd}}" | \
sort --key=2 --human-numeric-sort | \
column -t -c 120
;;
nopub)
# subscribed, but not published
comm -13 --nocheck-order <(rostopic list -p) <(rostopic list -s)
;;
nosub)
# published, but not subscribed
comm -23 --nocheck-order <(rostopic list -p) <(rostopic list -s)
;;
*)
rostopic "$@"
;;
esac
}
export -f swri_rostopic
alias rostopic='swri_rostopic'
| true |
a38af7a0bc7da321027919a1edf965d6c7c5dd3d | Shell | AthiraLekshmiCV/Operating-Systems-Lab | /name.sh | UTF-8 | 228 | 3.421875 | 3 | [] | no_license | #! /bin/bash
<<COMMENT
Write a script that will take a person's name as a parameter to the program name.
The script should greet that person, as "Good Day name."
COMMENT
echo "Enter your name"
read name
echo "Good Day $name"
| true |
be219c29c9e021d30f2223fec800722cb66fac4a | Shell | bjha96/public | /rsync-backup.sh | UTF-8 | 1,430 | 3.140625 | 3 | [] | no_license | #!/bin/bash
#set -x
#print date
echo "***************STARTING**************"
date
DEST_HOST=192.168.0.5
DEST_SHARE=NetBackup
SRC_DIR="${HOME}/"
RSYNC_USER=${USER}
#Embed the current hostane and username in the destination folder
DEST_LOC="${DEST_HOST}::${DEST_SHARE}/$(hostname)-$(uname)/${USER}"
#Default rsync options
RSYNC_OPTS="-azrmuPhF --delete --delete-excluded -e ssh"
#Check debug is enabled, set verbose flag
if [ "a1" == "a$1" ]; then
RSYNC_OPTS="-v ${RSYNC_OPTS}"
fi
#Prepare a list of files to be included, by extension
RSYNC_INC_LIST="${SRC_DIR}/rsync.include"
if [[ ! -e ${RSYNC_INC_LIST} ]]; then
mkdir -p $(dirname "${RSYNC_INC_LIST}")
touch "${RSYNC_INC_LIST}"
echo "#List of files to be included in rsync" > "${RSYNC_INC_LIST}"
echo "*/" >> "${RSYNC_INC_LIST}"
echo "*.*" >> "${RSYNC_INC_LIST}"
fi
RSYNC_OPTS="${RSYNC_OPTS} --include-from=${RSYNC_INC_LIST} --exclude=*"
#Src: https://gist.github.com/StefanHamminga/2b1734240025f5ee916a
RSYNC_SKIP_COMPRESS="3g2/3gp/3gpp/7z/aac/ace/amr/apk/appx/appxbundle/arc/arj/asf/avi/bz2/cab/crypt5/crypt7/crypt8/deb/dmg/drc/ear/gz/flac/flv/gpg/iso/jar/jp2/jpg/jpeg/lz/lzma/lzo/m4a/m4p/m4v/mkv/msi/mov/mp3/mp4/mpeg/mpg/mpv/oga/ogg/ogv/opus/pack/png/qt/rar/rpm/rzip/s7z/sfx/svgz/tbz/tgz/tlz/txz/vob/wim/wma/wmv/xz/z/zip/zst"
rsync ${RSYNC_OPTS} --skip-compress="${RSYNC_SKIP_COMPRESS}" "${SRC_DIR}" ${RSYNC_USER}@${DEST_LOC}
| true |
1029b4105a2117a22a53802fb78b12f58f19caf0 | Shell | redfive/dotfiles | /bin/ihr | UTF-8 | 177 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env bash
# if no arg passed in, get hostsroles on localhost
if [ $# = 0 ]; then
info-sb hostsroles $(hostname)
fi
if [ $# = 1 ]; then
info-sb hostsroles $1
fi
| true |
aaa2a49b32b8561dccc9b8f92dd88eb790820667 | Shell | LinuxOnCloud/Docker_Sample | /VM_Scripts/App42_Mysql_Conf/postgresql-proxyha1/app42RDS/sbin/check_db.sh | UTF-8 | 655 | 3.34375 | 3 | [] | no_license | #!/bin/bash
EMAIL="abc@example.com"
setup_name=`hostname|cut -d"-" -f1`
#d=`netstat -npl|grep mysqld|grep 3306|rev|awk '{print $1}'|rev|cut -d"/" -f1`
d=`netstat -tunpl|grep postgres|grep 5432|grep -v tcp6|rev|awk '{print $1}'|rev|cut -d"/" -f1`
/bin/echo "d=$d"
if [ -z $d ]; then
/etc/init.d/postgresql restart
if [ $? -eq 0 ]; then
mail -s "$setup_name : PostgreSQL Service Running Successfully : PostgreSQLHA2" $Email < /var/log/postgresql/postgresql-9.6-main.log
else
mail -s "$setup_name : PostgreSQL Service Starting Failed : PostgreSQLHA2" $Email < /var/log/postgresql/postgresql-9.6-main.log
fi
else
/bin/echo "Process $1 Is Running"
fi
| true |
8a6c78a8de73722488192d4a6065b369cb5cb885 | Shell | Runsheng/bioinformatics_scripts | /blasz/RunLastzChain_sh | UTF-8 | 4,304 | 3.34375 | 3 | [] | no_license | #!/bin/sh
echo "example script to run lastz and chaining on two genomes in 2bit files"
echo "adjust this script for your local example, you have a couple of choices"
echo "for parameter sets. You will need a parasol cluster computer system"
echo "to run the large number of lastz instances."
echo "requires companion script constructLiftFile.pl and"
echo "partitionSequence.pl"
echo
echo "The point is to illustrate the steps of:"
echo "1. partitioning the two genomes into:"
echo " a. 10,000,000 overlapping 10,000 chunks for the target sequence"
echo " b. 20,000,000 no overlap chunks for the query sequence"
echo "2. setup cluster run target.list query.list lastz run script"
echo "3. chaining the psl results from the lastz procedure"
exit 255
# typical axtChain and lastz parameter sets:
export chainNear="-minScore=5000 -linearGap=medium"
export chainMedium="-minScore=3000 -linearGap=medium"
export chainFar="-minScore=5000 -linearGap=loose"
export lastzNear="B=0 C=0 E=150 H=0 K=4500 L=3000 M=254 O=600 Q=/scratch/data/blastz/human_chimp.v2.q T=2 Y=15000"
export lastzMedium="B=0 C=0 E=30 H=0 K=3000 L=3000 M=50 O=400 T=1 Y=9400"
export lastzFar="B=0 C=0 E=30 H=2000 K=2200 L=6000 M=50 O=400 Q=/scratch/data/blastz/HoxD55.q T=2 Y=3400"
# select one of three different parameter sets
# Near == genomes close to each other
# Medium == genomes at middle distance from each other
# Far == genomes distant from each other
export chainParams="$chainNear"
export lastzParams="$lastzNear"
# WRKDIR is where your 2bit files are and where you want this to work
export WRKDIR="/full/path/to/testLastz"
cd ${WRKDIR}
export TNAME=ce9
export QNAME=cb3
export TARGET=${WRKDIR}/${TNAME}.2bit
export QUERY=${WRKDIR}/${QNAME}.2bit
ls -ld $TARGET $QUERY
if [ ! -s ${TNAME}.chrom.sizes ]; then
twoBitInfo ${TARGET} stdout | sort -k2nr > ${TNAME}.chrom.sizes
rm -fr ${TNAME}PartList ${TNAME}.part.list
mkdir ${TNAME}PartList
fi
if [ ! -s ${QNAME}.chrom.sizes ]; then
twoBitInfo ${QUERY} stdout | sort -k2nr > ${QNAME}.chrom.sizes
rm -fr ${QNAME}PartList ${QNAME}.part.list
mkdir ${QNAME}PartList
fi
if [ ! -s ${TNAME}.part.list ]; then
partitionSequence.pl 10000000 10000 ${TARGET} ${TNAME}.chrom.sizes 1 \
-lstDir ${TNAME}PartList > ${TNAME}.part.list
fi
if [ ! -s ${QNAME}.part.list ]; then
partitionSequence.pl 20000000 0 ${QUERY} ${QNAME}.chrom.sizes 1 \
-lstDir ${QNAME}PartList > ${QNAME}.part.list
fi
grep -v PartList ${TNAME}.part.list > target.list
for F in ${TNAME}PartList/*.lst
do
cat ${F}
done >> target.list
grep -v PartList ${QNAME}.part.list > query.list
for F in ${QNAME}PartList/*.lst
do
cat ${F}
done >> query.list
./constructLiftFile.pl ${TNAME}.chrom.sizes target.list > target.lift
./constructLiftFile.pl ${QNAME}.chrom.sizes query.list > query.lift
echo "#LOOP" > template
echo 'runLastz $(path1) $(path2) $(file1) $(file2) {check out exists+ psl/$(file1).$(file2).psl.gz}' >> template
echo "#ENDLOOP" >> template
cat <<_EOF_ > runLastz
#!/bin/csh -fe
set T = \$1
set Q = \$2
set FT = \$3
set FQ = \$4
set tmpDir = /scratch/tmp/\${FT}
mkdir -p raw psl \${tmpDir}
twoBitToFa \${T} \${tmpDir}/\${FT}.fa
twoBitToFa \${Q} \${tmpDir}/\${FQ}.fa
/cluster/bin/penn/lastz-distrib-1.02.00/bin/lastz \${tmpDir}/\${FT}.fa \
\${tmpDir}/\${FQ}.fa \
${lastzParams} \
> raw/\${FT}.\${FQ}.lav
lavToPsl raw/\${FT}.\${FQ}.lav stdout \
| liftUp -type=.psl stdout target.lift error stdin \
| liftUp -nohead -pslQ -type=.psl stdout query.lift error stdin \
| gzip -c > psl/\${FT}.\${FQ}.psl.gz
rm -f \${tmpDir}/\${FT}.fa \${tmpDir}/\${FQ}.fa
rmdir --ignore-fail-on-non-empty \${tmpDir}
_EOF_
echo "ready to run lastz kluster job:"
echo "gensub2 target.list query.list template jobList"
echo "para make jobList"
echo "when finished, run the commands in chainJobs.csh to perform the chaining"
mkdir -p chain
echo "#!/bin/csh -fe" > chainJobs.csh
for T in `cat target.list | sed -e "s#${WRKDIR}/##"`
do
echo "zcat psl/${T}.*.psl.gz \\"
echo " | axtChain -psl -verbose=0 ${chainParams} \\"
echo -e "\tstdin ${TARGET} ${QUERY} stdout \\"
echo " | chainAntiRepeat ${TARGET} ${QUERY} stdin chain/${T}.chain"
done >> chainJobs.csh
echo "find ./chain -name \"*.chain\" | chainMergeSort -inputList=stdin | gzip -c > ${TNAME}.${QNAME}.all.chain.gz" >> chainJobs.csh
| true |
b991ab3e9294b1d0132c4b8811caf4cf0bb64ffe | Shell | wisonzhu/devops | /clone.sh | UTF-8 | 2,523 | 3.421875 | 3 | [] | no_license | #!/bin/bash
# ghost must delete /etc/udev/rules.d/70-persistent-net.rules
PATH=/usr/lib64/qt-3.3/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin
set -x
if [ $# -gt 4 ]
then
VM=$1
ip=$3
id=$2
br="br$id"
netmask=$4
gateway=$5
#mem=$6
#cpu=$7
#disk=$8
vm_sn=$9
mac=`echo $ip|awk -F'.' '{printf "f0:00:%.2x:%.2x:%.2x:%.2x",$1,$2,$3,$4}'`
uuid=`echo $ip|md5sum|awk '{print $1}'|sed 's/\(.\{8\}\)\(.\{4\}\)\(.\{4\}\)\(.\{4\}\)\(.\{12\}\)/\1-\2-\3-\4-\5/'`
if [ -z $6 ]
then
memory=$((15 * 1024 * 512 ))
else
memory=`echo "$6 * 1024 * 1024" | bc`
memory=`echo $memory | awk '{printf("%d\n",$1 + 0.5)}'`
fi
if [ -z $7 ]
then
cpu=4
else
cpu=$7
fi
if [ -z $8 ]
then
disk=80
else
disk=$8
fi
cp /export/image/image$disk.qcow2 /export/kvm/$VM.qcow2
if [ -e /etc/sysconfig/network-scripts/ifcfg-$br ]
then
echo
else
bash /export/clone/mk_vlancfg.sh bond0 $id
vms=`virsh list --name |grep -v '^$'|wc -l`
if [ $vms == 0 ];then
service network restart
fi
fi
if [ -e /etc/sysconfig/network-scripts/ifcfg-bond0.$id ]
then
ifup bond0.$id
fi
sed -e "/name/ s/test/$VM/" -e "/uuid/ s/13ef9ecc-3f87-43a7-349a-61c74cdcfa61/$uuid/" -e "/memory/ s/7864320/$memory/" -e "/currentMemory/ s/7864320/$memory/" -e "/vcpu/ s/4/$7/" -e "/mac address/ s/52:54:00:93:58:73/$mac/" -e "/source bridge/ s/br0/$br/" -e "/source file/ s/test.qcow2/$VM.qcow2/" -e "s/11111111/$vm_sn/" /export/clone/gho.xml > /etc/libvirt/qemu/$VM.xml
virsh define /etc/libvirt/qemu/$VM.xml
sed -e "s/IP_ADDRESS_GOES_HERE/$ip/g" -e "s/VM_NAME_GOES_HERE/$VM/g" -e "s/NETMASK_GOES_HERE/$netmask/g" -e "s/GATEWAY_GOES_HERE/$gateway/g"< /export/clone/configure.sh > /export/clone/logs/configure.sh.$VM
chmod a+x /export/clone/logs/configure.sh.$VM
virt-sysprep -d $VM \
--enable udev-persistent-net,script,bash-history,hostname,logfiles,utmp,script \
--hostname $VM \
--script /export/clone/logs/configure.sh.$VM > /dev/null
virsh start $VM
virsh autostart $VM
else
echo "Usage: $0 <Hostname> <vlanID> <IP> <Netmask> <Gateway> <Memory Size> <N CPUs> <Disk size> <vm_sn>"
echo " Notice: Hostname must be full qualitified DNS name (FQDN). If vlanID is not supported by network"
echo " environment, use 0 (zero) as vlanID. Disk size only have choice of 80G or 160G. Please carefully"
echo " calculate the CPU and memory usage of the host. "
echo " Recommend configuration are (calculate for 64G memory on host, 8 guests per host):"
echo " Memory Size: 7.5"
echo " N CPUs: 4"
echo " Disk Size: 80 or 160"
exit 0
fi
| true |
d901bf7d8fc20ca64c675ba6c3250cc2c7774b8f | Shell | vyrp/Mjollnir | /tools/install-subtree.sh | UTF-8 | 198 | 2.796875 | 3 | [] | no_license | #!/bin/bash -ex
cd ~
git clone https://github.com/git/git.git
cd git/contrib/subtree/
make
sudo install -m 755 git-subtree /usr/lib/git-core
if [ "$1" = "rm" ]; then
cd ../../..
rm -rf git
fi
| true |
b3af85abaa3d14eb59a3f7af5182a26de6308fbd | Shell | MichalMaruska-TomTom/mmc-shell | /bin/cron-mail | UTF-8 | 1,173 | 3.796875 | 4 | [] | no_license | #! /usr/bin/zsh -feu
usage()
{
cat <<EOF
Usage: $0 [-s subject] cmd args
Invoke "CMD args", redirecting its output (also stderr!) into a file, and
email it at the end.
If the exit status is 0, the email subject starts with "OK [subject]",
otherwise "FAIL [subject]".
EOF
}
# fallback (if empty)
: ${MAILTO:=Michal.Maruska@tomtom.com}
# or SENDER
: ${MAILFROM:=Michal.Maruska@tomtom.com}
# <CRON-DAEMON>"
debug=n
while getopts :dhs: OPT; do
case $OPT in
h|+h)
usage
exit 0
;;
d)
debug=y
;;
s)
subject=$OPTARG
;;
*)
usage >&2
exit 2
esac
done
shift OPTIND-1
OPTIND=1
CMD=$1; shift
TEMP=$(mktemp $CMD.XXXX.log)
# how to suppress the exit-on-failure here?
if $CMD "$@" &> $TEMP;
then
result=0
else
result=$?
# echo "this way $result"
fi
# default/fall-back value
: ${subject=$CMD}
if [ $result = 0 ];
then
subject="OK $subject"
else
subject="FAIL $subject"
fi
# stderr during this command is not controlled:
# and it might invoke gnome-keyring.
mail -r "$MAILFROM" -s $subject -a "X-sender: cron-daemon" $MAILTO < $TEMP
# need to find out why no mail arrived.
if [ $debug = y ]; then
echo $TEMP
else
rm $TEMP
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.