blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
686391267ee9ce9dedd911fd67fe7c47a29a92ba | Shell | delkyd/alfheim_linux-PKGBUILDS | /git-lfs-test-server-git/PKGBUILD | UTF-8 | 1,098 | 2.9375 | 3 | [] | no_license | pkgname=git-lfs-test-server-git
pkgver=v0.3.0.r0.ge76e479
pkgrel=2
pkgdesc="Standalone Git LFS server"
url="https://github.com/github/lfs-test-server"
license=('custom')
arch=('x86_64' 'i686')
makedepends=('mercurial' 'git' 'go')
conflicts=('git-lfs-test-server' 'git-lfs-test-server-bin')
provides=('git-lfs-test-server')
md5sums=('SKIP')
options=('!strip' '!emptydirs')
source=('git://github.com/github/lfs-test-server')
pkgver() {
cd lfs-test-server
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
mkdir -p "$srcdir/bin"
GOBIN="$srcdir/bin" GOPATH="$srcdir" go get -v -x ./lfs-test-server/
}
package() {
mkdir -p "$pkgdir/usr/bin"
install -p -m755 "$srcdir/bin/"* "$pkgdir/usr/bin"
mkdir -p "$pkgdir/usr/lib/go"
cp -Rv --preserve=timestamps "$srcdir/"{src,pkg} "$pkgdir/usr/lib/go"
# Package license (if available)
for f in LICENSE COPYING LICENSE.* COPYING.*; do
if [ -e "$srcdir/src/$_gourl/$f" ]; then
install -Dm644 "$srcdir/src/$_gourl/$f" \
"$pkgdir/usr/share/licenses/$pkgname/$f"
fi
done
}
| true |
610b8e14aab46312156342fd2c78ee352dee1976 | Shell | pinda-kaas/saai | /docker/docker-global.sh | UTF-8 | 973 | 3.90625 | 4 | [] | no_license | #!/bin/bash
DOCKER_APP_NAME="eoc"
DOCKER_IMAGE_NAME="art.auiag.corp/eoc"
DOCKER_IMAGE_BASE="art.auiag.corp/node"
DOCKER_TAG_NAME="master"
tag_name_check() {
PARAM1=$1
if [ -z "$PARAM1" ]; then
DOCKER_TAG_NAME="latest"
fi
if [ "$PARAM1" = "master" ]; then
DOCKER_TAG_NAME="latest"
else
DOCKER_TAG_NAME="$PARAM1"
fi
}
banner_msg() {
MSG1=$1
printf " *** %-50s *** \n" "$MSG1"
}
banner_hr() {
echo " ********************************************************** "
}
show_log() {
cid=$1
echo "---------------------------------------------------"
printf "Start-up logs: "
pause 2
docker logs $cid
}
pause() {
LOOP_COUNT=$1
for i in $(seq 1 $LOOP_COUNT)
do
printf "."
sleep 1
done
printf "\n"
}
startup_stats() {
CID=$1
# Display ID
banner_msg "ID: $CID"
# Display IP
CIP=$( docker inspect --format='{{.NetworkSettings.IPAddress}}' $1 )
banner_msg "IP: $CIP"
# Display logs
show_log $CID
}
| true |
f39c8a5337989704001f24eafa0ec811464bc353 | Shell | konnorve/genome-closing-workflow | /shell_scripts/alt_canu_assem.sbatch | UTF-8 | 1,179 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
#SBATCH --job-name=assem_alt
#
# Specifies using a centos7 node
#SBATCH -C centos7
#
# wall clock limit:
#SBATCH --time 12:00:00
#
# Partition to use:
#SBATCH --partition sched_mit_chisholm
#
# Number of tasks/cores for job
#SBATCH -n 10
#
#SBATCH --comment="fly assembly"
#
# emails all notifications
#SBATCH --mail-type=ALL
#SBATCH --mail-user=kve@mit.edu
#
# Request nodes:
#SBATCH --nodes=1
#
#SBATCH --mem 250000
#
#SBATCH --array=1012,1016,1017,1018,1019,1020,1021,1022
#
#SBATCH -o logs/alt_assem_canu/%j_%a_slurm_output.txt
#SBATCH -e logs/alt_assem_canu/%j_%a_slurm_error.txt
lima_dir="/nobackup1/kve/2021_PacBioGenomeClosing/batch1/analysis/alternative/lima_demultiplexed"
canu_dir="/nobackup1c/users/kve/2021_PacBioGenomeClosing/batch1/analysis/alternative/canu_assembly"
# "bc1012" "bc1016" "bc1017" "bc1018" "bc1019" "bc1020" "bc1021" "bc1022"
barcode="bc${SLURM_ARRAY_TASK_ID}"
echo ${barcode}
fastq_path=${lima_dir}/"lima_demultiplexed.${barcode}.fastq"
canu_bc_dir=${canu_dir}/${barcode}
mkdir ${canu_bc_dir}
echo ${fastq_path}
echo ${canu_bc_dir}
canu -p ${barcode} -d ${canu_bc_dir} \
genomeSize=2.5m \
-pacbio ${fastq_path}
| true |
111f3d4ac93e3ab71c24987d0c998c170d18d3a9 | Shell | cryptix/randomcode | /sh/cryptoRundown.sh | UTF-8 | 1,607 | 3.25 | 3 | [] | no_license | $!/usr/bin/bash
## Generate Keys
echo '\n\nGenerating new RSA key'
# Generate a RSA key named 'RSAKey.private.pem'
openssl genrsa -out RSAKey.private.pem 2048
# Encrypt that key so only you can use it
#openssl rsa -in RSAKey.private.pem -des3 -out RSAKey.secured.pem
# Extract the public part
openssl rsa -in RSAKey.private.pem -pubout -out RSAKey.public.pem
## En/Decryption - RSA LowLevel
# since rsa is a block ciphere 'small.txt' needs to be smaller than keyfile/8
echo '\n\nGenerating small random data for ENC/DEC'
openssl rand -out small.bin 240
# Encrypt
openssl rsautl -encrypt -pubin -inkey RSAKey.public.pem -in small.bin -out small.ciphered
# Decrypt
openssl rsautl -decrypt -inkey RSAKey.private.pem -in small.ciphered -out small2.bin
# test
echo '\n\nsmall.bin == small2.bin?'
openssl dgst -sha1 small.bin small2.bin
## Signing/Verifying
echo '\n\nGenerating large random data for Sign/Verify'
openssl rand -out test1.bin 4096
# Sign the content of 'test.bin' with our Key
openssl dgst -sha1 -sign RSAKey.private.pem -out test.bin.signature test1.bin
# Verify that signature (with the public key)
openssl dgst -sha1 -verify RSAKey.public.pem -signature test.bin.signature test1.bin
## En/Decryption - AES with keyfile
echo '\n\nGenerating random session key for AES ENC/DEC'
openssl rand -out skey.bin 1024
# encrypt
openssl enc -aes-256-cbc -e -salt -in test1.bin -out test1.ciphered -kfile skey.bin
# decrypt
openssl enc -aes-256-cbc -d -salt -in test1.ciphered -out test2.bin -kfile skey.bin
# test
echo '\n\test1.bin == test2.bin?'
openssl dgst -sha1 test1.bin test2.bin | true |
eb3c231e12765379605d5bf68b3719c666cd05d2 | Shell | w4-pwr/studia | /semestr-vi/so-jacek/lab2/zad1 | UTF-8 | 328 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#Dwa argumenty, skrypt ma odpowiedzieć na pytanie czy oba obiekty czy dowiązania wskazują na ten sam obiekt
if [ ! $# -eq 2 ]
then
echo "za malo argumentow"
exit 1
fi
path1=`readlink -f $1`
path2=`readlink -f $2`
if [ $path1 == $path2 ]
then
echo "wskazuja na te same pliki"
else
echo "nie wskazuja"
fi | true |
18960a2bc62f4cf2a0a3f9758b3072484f4ff3f4 | Shell | gestos/keuch | /get_song | UTF-8 | 567 | 2.921875 | 3 | [] | no_license | #!/bin/bash
file_url=$(mocp -Q %file)
title=$(mocp -Q %title)
artist=$(mocp -Q %artist)
song=$(mocp -Q %song)
datum=$(date '+%d-%m-%y %H:%M')
songhash=$(echo -e "$file_url$title$artist$song" | md5sum | awk '{print $1}')
printf ',\n{\n"hash":"%s",\n"filename":"%s",\n"title":"%s",\n"artist":"%s",\n"songname":"%s"\n}' "${songhash:-null}" "${file_url:-null}" "${title:-null}" "${artist:-null}" "${song:-null}" >> $1
#echo -e $json_obj >> $1
xmessage -center -buttons "" -timeout 1 -fg blue -bg yellow "$(mocp -Q %title) added to $1" 2>/dev/null
#echo -e $json_obj
| true |
c911e5f6cf25aa37d936ebc73ccfa622cd7307d8 | Shell | closescreen/geek | /28_URL_CAT | UTF-8 | 624 | 2.75 | 3 | [] | no_license | #!/usr/bin/env bash
#>
(
set -u
set +x
set -o pipefail
cd `dirname $0`
job=${1:? Job! }
day=${2:? Day! }
src_base_name="urls30days.gz"
tn=$(
href -dict="
net => '0',
google => 3,
mail => 3,
ssp => 3,
" -val="$job"
)
src_f=`echo ../RESULT/10/$day/$job/$tn/$src_base_name`
chk $src_f "source file for URL_CAT" "-s" "exists-and-noempty" nop || exit 2
echo "$src_f" |
washing -res="s|$src_base_name|url_cat.gz|" -v_flag="
zcat %s |
grep -v -P 'anonymous\.google' |
./28_process |
LANG=POSIX sort -t\\* -S 333M -k1,1 --compress-program=gzip" -comp=gzip -time=00_all.time.log
)>>"$0.log" 2>&1
| true |
02dc0d3aa397243ad217c33b86285c9e6aaf9e26 | Shell | AnnaSkachkauskaite/project-2016 | /find.sh | UTF-8 | 112 | 2.75 | 3 | [] | no_license | #!/bin/bash
echoerr() { echo "$@" 1>&2; }
for file in ./*.csv
do
python3 try.py ${file}
echoerr ${file}
done | true |
06402793646f5766ab86aa4c21a852e9f1c3fda8 | Shell | BaobabHealthTrust/migration_imports | /migration_fixes_scripts/migration_fixes.sh | UTF-8 | 2,080 | 3.1875 | 3 | [] | no_license | #!/bin/bash
usage(){
echo "Usage:"
echo
echo "ENVIRONMENT should be: bart2"
echo "Available SITES:"
ls -1 db/data
}
set -x # turns on stacktrace mode which gives useful debug information
if [ ! -x config/database.yml ] ; then
cp config/database.yml.example config/database.yml
fi
USERNAME=`ruby -ryaml -e "puts YAML::load_file('config/database.yml')['bart2']['username']"`
PASSWORD=`ruby -ryaml -e "puts YAML::load_file('config/database.yml')['bart2']['password']"`
DATABASE=`ruby -ryaml -e "puts YAML::load_file('config/database.yml')['bart2']['database']"`
HOST=`ruby -ryaml -e "puts YAML::load_file('config/database.yml')['bart2']['host']"`
now=$(date +"%T")
echo "start time : $now"
echo "creating dispensation, appointment and exit from HIV care encounters....."
mysql --user=$USERNAME --password=$PASSWORD --host=$HOST $DATABASE<<EOFMYSQL
CALL proc_import_from_temp;
EOFMYSQL
echo "calculating adherence................................"
mysql --user=$USERNAME --password=$PASSWORD --host=$HOST $DATABASE<<EOFMYSQL
CALL proc_update_obs_order_id;
EOFMYSQL
echo "fixing retired drugs"
script/runner script/all_after_migration_scripts/fix_program_locations.rb
echo "fixing equivalent daily dose"
script/runner script/all_after_migration_scripts/fix_for_equivalent_daily_dose.rb
echo "adding the hanging pills"
script/runner script/all_after_migration_scripts/include_hanging_pills_to_drug_orders.rb
echo "recalculating adherence"
script/runner script/all_after_migration_scripts/recalculate_adherence.rb
echo "creating OPD program"
script/runner script/all_after_migration_scripts/creating_patient_opd_program.rb
echo "fixing earliest_start_date"
script/runner script/all_after_migration_scripts/fix_earliest_start_date.rb
echo "fixing arv numbers"
script/runner script/arv_format_fix.rb
echo "deleting temp_encounter and temp_obs tables..........."
mysql --user=$USERNAME --password=$PASSWORD $DATABASE<<EOFMYSQL
DROP table temp_encounter;
DROP table temp_obs;
EOFMYSQL
later=$(date +"%T")
echo "start time : $now"
echo "end time : $later"
| true |
e6d9ed8ec90115aa9b8ea1a01a34e73838ac010b | Shell | mouhyadin-raguer/tp_l2_info | /s3/outil_dev/tp4/tatin2.sh | UTF-8 | 354 | 3.59375 | 4 | [] | no_license | #!/bin/bash
if [ $# -eq 1 ] ; then
nomfic=$1
else
read nomfic
fi
while read ligne ; do
# nombre de mots de la ligne
nbm=$(echo $ligne | wc -w)
phrase=''
for i in $(seq $nbm -1 1); do
# on extrait le mot de rang i
mot=$(echo $ligne | cut -d' ' -f$i)
# on le colle en fin de phrase
phrase="$phrase $mot"
done
echo $phrase
done < $nomfic
| true |
ae829d3c08d845035bdca240af838371ec419f8d | Shell | angad777/vscode | /scripts/check-update.sh | UTF-8 | 1,454 | 3.625 | 4 | [] | no_license | #!/bin/sh
set -eu
# For local development, in production, the environment will be set though GH actions and GH secrets
if [ -f ".envrc" ]; then
echo "Loading .envrc"
# shellcheck disable=SC1091
. .envrc
else
echo "No .envrc"
fi
RELEASE_CHANNEL=$1
echo "RELEASE_CHANNEL: $RELEASE_CHANNEL"
if [ "$RELEASE_CHANNEL" = "dev" ]; then
CURRENT_VERSION=$(cat scripts/prisma_version_insider)
else
CURRENT_VERSION=$(cat scripts/prisma_version_stable)
fi
echo "CURRENT_VERSION: $CURRENT_VERSION"
NPM_VERSION=$(sh scripts/prisma-version.sh "$RELEASE_CHANNEL")
echo "NPM_VERSION: $NPM_VERSION"
EXTENSION_VERSION=$(sh scripts/extension-version.sh "$RELEASE_CHANNEL" "")
echo "EXTENSION_VERSION: $EXTENSION_VERSION"
# Setup the repo with GH_TOKEN to avoid running jobs when CI commits
if [ "$ENVIRONMENT" = "PRODUCTION" ]; then
git config --global user.email "prismabots@gmail.com"
git config --global user.name "Prismo"
git remote add github "https://$GITHUB_ACTOR:$GH_TOKEN@github.com/$GITHUB_REPOSITORY.git" || true
else
echo "Not setting up repo because ENVIRONMENT is not set"
fi
if [ "$CURRENT_VERSION" != "$NPM_VERSION" ]; then
NEXT_EXTENSION_VERSION=$(node scripts/extension-version.js "$NPM_VERSION" "$EXTENSION_VERSION")
echo "NEXT_EXTENSION_VERSION: $NEXT_EXTENSION_VERSION"
echo "::set-output name=new_updates::true"
else
echo "CURRENT_VERSION ($CURRENT_VERSION) and NPM_VERSION ($NPM_VERSION) are same"
fi | true |
2f26b6572d8058273635494d59201b45e2e618a5 | Shell | LlorandoLau/share-code | /Newone-code/mywork/ch01/first | UTF-8 | 83 | 2.578125 | 3 | [] | no_license | #!/bin/sh
for file in *
do
if grep -q bill $file
then
echo $file
fi
done
exit 0
| true |
5ae486fe743fe97ac4a528fcc92350e505964872 | Shell | elgatosf/jonah | /jonah/support_files/jonah/spinup.sh | UTF-8 | 424 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# This script initializes the Django project. It will be executed (from
# supervisord) every time the Docker image is run.
cd /code/ddp/
# Initialize Django project
python /code/ddp/manage.py collectstatic --noinput --clear
python /code/ddp/manage.py migrate --noinput
# (re)compile Translations
python /code/ddp/manage.py compilemessages || echo "Did not find messages to compile (or other error occurred)"
| true |
48e5add76bc6d145b7f1730b82b6549270038e90 | Shell | smhuang426/performous | /osx-utils/performous-app-build.sh | UTF-8 | 1,490 | 3.0625 | 3 | [] | no_license | # the very first step is to check that dylibbundler exists,
# without it the bundle would be broken
if which dylibbundler &> /dev/null; then
echo "dylibbundler found!"
else
echo "dylibbundler not found! you need to install it before creating the bundle."
exit
fi
# first compile performous, build dir shouldn't exist at this stage
mkdir build
cd build
cmake ../../ -DCMAKE_INSTALL_PREFIX=./out/Performous.app/Contents -DENABLE_TOOLS=OFF
make install
# then create the rest of the app bundle
mkdir out/Performous.app/Contents/MacOS
mkdir out/Performous.app/Contents/Resources
mkdir out/Performous.app/Contents/Frameworks
mv out/Performous.app/Contents/bin/* out/Performous.app/Contents/MacOS/
cp ../resources/performous-launcher out/Performous.app/Contents/MacOS/
cp ../resources/performous.icns out/Performous.app/Contents/Resources
cp ../resources/Info.plist out/Performous.app/Contents/
cp -R ../resources/etc out/Performous.app/Contents/Resources
cp -R /Library/Frameworks/SDL.framework out/Performous.app/Contents/Frameworks/SDL.framework
dylibbundler -od -b -x ./out/Performous.app/Contents/MacOS/performous -d ./out/Performous.app/Contents/libs/
# then build the disk image
ln -sf /Applications out/Applications
/usr/bin/hdiutil create -srcfolder out -volname Performous -fs HFS+ -fsargs "-c c=64,a=16,e=16" -format UDRW RWPerformous.dmg
/usr/bin/hdiutil convert RWPerformous.dmg -format UDZO -imagekey zlib-level=9 -o Performous.dmg
rm -f RWPerformous.dmg
cd ..
| true |
8fe299079fce1276a3d3cc7f9a0cb1e2e0f0e6b3 | Shell | wisehead/shell_scripts | /mysql/mon_mem.sh | UTF-8 | 388 | 2.5625 | 3 | [] | no_license | #############################################################
# File Name: mon_mem.sh
# Autohor: Hui Chen (c) 2017
# Mail: chenhui13@baidu.com
# Create Time: 2017/09/25-19:33:56
#############################################################
#!/bin/sh
SLEEP_TIME=$1
while [ true ]
do
#iostat -xkt 1 1
free|grep -i mem|awk '{print$3}'
#sleep $((SLEEP_TIME-1))
sleep 1
done
| true |
306eba687db51fa88f4b5bdea2198faa53a776de | Shell | AenBleidd/tagir-vengerberg-linux-scripts | /alac2mp3 | UTF-8 | 2,528 | 3.21875 | 3 | [] | no_license | #!/usr/bin/sh
# convert all files from m4a to mp3
# reguire:
#
# alac (alac_decoder)
# lame (lame)
# mp4info (mp4v2)
# php (php5)
# eyeD3 (python-eyeD3)
# python (python)
#
for file in *.m4a; do
$(alac "${file}" | lame --cbr -b 320 --resample 44.1 - "${file%.m4a}.mp3");
tag=`mp4info "${file}"`
tagName=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Name:\s([\p{L}[:print:]]+)/u", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagArtist=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Artist:\s([\p{L}[:print:]]+)/u", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagAlbum=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Album:\s([\p{L}[:print:]]+)/u", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagGenre=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Genre:\s([\p{L}[:print:]]+)/u", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagYear=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Release Date:\s([[:digit:]]{4})/", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagTrackNumber=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Track:\s([[:digit:]]{1,2})/", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagTrackTotal=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Track:\s[[:digit:]]{1,2}\sof\s([[:digit:]]{1,2})/", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
tagCover=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/Cover Art pieces:\s([[:digit:]]{1,2})/", $_GET["tag"], $out)) {echo "{$out[1]}";} else {echo "";}' tag="$tag"`
eyeD3 --set-encoding=utf8 --title="$tagName" --artist="$tagArtist" --album="$tagAlbum" --set-text-frame="TCON:$tagGenre" --year="$tagYear" --set-text-frame="TDRC:$tagYear" --track="$tagTrackNumber" --track-total="$tagTrackTotal" "${file%.m4a}.mp3"
if [ $tagCover -gt 0 ]
then
coverPath=`mp4art --extract "${file}"`
coverName=`php -r 'parse_str(implode("&", array_slice($argv, 1)), $_GET); if(preg_match("/->\s([\p{L}[:print:]]+)/u", $_GET["cover"], $out)) {echo "{$out[1]}";} else {echo "";}' cover="$coverPath"`
eyeD3 --set-encoding=utf8 --add-image "$coverName:FRONT_COVER" "${file%.m4a}.mp3"
rm "$coverName"
fi
done
| true |
c52a3fee279b4a8a457a607515d8bf699c4911ab | Shell | pappu8871/PracticeAssignment | /day5pdf2/day5pdf2-Sel-3.sh | UTF-8 | 2,088 | 3.375 | 3 | [] | no_license | #!/bin/bash
echo -value "Give the number: "
read price
thousands=$((price/1000))
hundreds=$((price%1000/100))
teens=$((price%100/10))
units=$((price%10))
for ((i=0 ; i<=$thousands; i++ ))
do
case $thousands in
0) echo -value "Zero";;
1) echo -value "onethousands";;
2) echo -value "twothousands";;
3) echo -value "threethousands";;
4) echo -value "fourthousands";;
5) echo -value "fivethousands";;
6) echo -value "sixthousands";;
7) echo -value "seventhousands";;
8) echo -value "eightthousands";;
9) echo -value "ninethousands";;
10) echo -value "tenthousands";;
esac
done
for ((i=0 ; i<=$hundreds; i++ ))
do
case $hundreds in
0) echo -value "Zero";;
1) echo -value "onehundreds";;
2) echo -value "twohundreds";;
3) echo -value "threehundreds";;
4) echo -value "fourhundreds";;
5) echo -value "fivehundreds";;
6) echo -value "sixhundreds";;
7) echo -value "sevenhundreds";;
8) echo -value "eighthundreds";;
9) echo -value "ninehundreds";;
10) echo -value "tenhundreds";;
esac
done
for ((i=0 ; i<=$teens; i++ ))
do
case $teens in
0) echo -value "Zero";;
1) echo -value "one";;
2) echo -value "two";;
3) echo -value "three";;
4) echo -value "four";;
5) echo -value "five";;
6) echo -value "six";;
7) echo -value "seven";;
8) echo -value "eight";;
9) echo -value "nine";;
10) echo -value "ten";
esac
done
for ((i=0 ; i<=$units; i++ ))
do
case $units in
0) echo -value "Zero";;
1) echo -value "one";;
2) echo -value "two";;
3) echo -value "three";;
4) echo -value "four";;
5) echo -value "five";;
6) echo -value "six";;
7) echo -value "seven";;
8) echo -value "eight";;
9) echo -value "nine";;
10) echo -value "ten";
esac
done
echo "The price is: " 'expr $thousands + $hundreds + $teens + $units'
| true |
0d2cc8c0cd4b9b79f037c7112a22e305c475cb0d | Shell | jheffer/Physics | /beam-profile/run.sh | UTF-8 | 1,933 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# ------------------------------------------------------------------
# [Joe Heffer] Beam profiling 2.0
# Analyse BMP files into a beam profile
# ------------------------------------------------------------------
# Options
FILES=*.bmp # list of bitmaps to analyse
PLOTDIR=plots
DATADIR=data
# Clear old data
rm radii.txt
echo -e "# file\tsig_x\tsig_x_err\tsig_y\tsig_y_err" > radii.txt
rm -r data
rm -r $PLOTDIR
mkdir data
mkdir $PLOTDIR
for file in $FILES # loop through bitmaps
do
echo "Analysing $file"
./bmptotxt $file # BMP -> TXT
mv $file.txt $DATADIR/$file.txt
python rowcol.py $DATADIR/$file.txt
# Naive fit properties
MAXINTX="$(python max.py $DATADIR/$file-x.txt)"
MAXINTY="$(python max.py $DATADIR/$file-y.txt)"
PEAKPOSX="$(python peakpos.py $DATADIR/$file-x.txt)"
PEAKPOSY="$(python peakpos.py $DATADIR/$file-y.txt)"
SIGMAX="$(python sigma.py $DATADIR/$file-x.txt)"
SIGMAY="$(python sigma.py $DATADIR/$file-y.txt)"
gnuplot -e "filename='$file';psname='$PLOTDIR/$file-x.ps';title='$file x-axis';datafile='$DATADIR/$file-x.txt';maxint=$MAXINTX;peakpos=$PEAKPOSX;sigma=$SIGMAX;" plot.gnu
mv fit.log $PLOTDIR/$file-x.log
gnuplot -e "filename='$file';psname='$PLOTDIR/$file-y.ps';title='$file y-axis';datafile='$DATADIR/$file-y.txt';maxint=$MAXINTY;peakpos=$PEAKPOSY;sigma=$SIGMAY;" plot.gnu
mv fit.log $PLOTDIR/$file-y.log
# echo -e "$file\t"$(grep "+/-" $PLOTDIR/$file-x.log | grep "sigma")"\t"$(grep "+/-" $PLOTDIR/$file-y.log | grep "sigma") >> radii.txt
resultx=$(grep "+/-" $PLOTDIR/$file-x.log | grep "sigma")
stringarrayx=($resultx)
sig_x=${stringarrayx[2]}
sig_x_err=${stringarrayx[4]}
resulty=$(grep "+/-" $PLOTDIR/$file-y.log | grep "sigma")
stringarrayy=($resulty)
sig_y=${stringarrayy[2]}
sig_y_err=${stringarrayy[4]}
echo -e "$file\t$sig_x\t$sig_x_err\t$sig_y\t$sig_y_err" >> radii.txt
done
gnuplot profile.gnu
gnuplot profile_lin.gnu
| true |
a11a01373d9429fe6a5e466c2e6375da1aea7084 | Shell | jesgogu27/holberton-system_engineering-devops | /0x05-processes_and_signals/3-show_your_bash_pid_made_easy | UTF-8 | 161 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env bash
# Write a Bash script that displays the PID,
# along with the process name,
# of processes whose name contain the word bash.
pgrep -l bash | true |
260915e0fe8bbd5575826772f00b552ec666d47f | Shell | wiremoore/import-ova | /import-ova.sh | UTF-8 | 1,611 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Import an OVA and auto provision a VM
OVA_TEMPLATE=$1
EXTRACT_PATH=".import_ova_tmp"
echo -e "Extracting OVA file........."
mkdir $EXTRACT_PATH
echo $OVA_TEMPLATE
tar -xvf "${OVA_TEMPLATE}" -C $EXTRACT_PATH
echo -e "\nSetting up VM: "
read -p "VM ID: " VM_ID
read -p "VM Name: " VM_NAME
read -p "RAM (MB)[2048]: " VM_RAM
VM_RAM=${VM_RAM:-2048}
read -p "Sockets [1]: " VM_SOCK
VM_SOCK=${VM_SOCK:-1}
read -p "Cores [1]: " VM_CORES
VM_CORES=${VM_CORES:-1}
read -p "Autostart enable [0]: " VM_AUTO_START
VM_AUTO_START=${VM_AUTO_START:-0}
read -p "KVM Virtualization enable [0]: " VM_KVM
VM_KVM=${VM_KVM:-0}
read -p "Bridge for network interface [vmbr0]: " VM_BRIDGE
VM_BRIDGE=${VM_BRIDGE:-vmbr0}
qm create ${VM_ID} --autostart ${VM_AUTO_START} --cores ${VM_CORES} --kvm ${VM_KVM} --memory ${VM_RAM} --name ${VM_NAME} --sockets ${VM_SOCK} --scsihw virtio-scsi-pci --net0 virtio,bridge=${VM_BRIDGE},firewall=0
echo -e "\nThe following disk will be convert: "
cd $EXTRACT_PATH
ls -1 *.vmdk
read -p "Do you want to proceed [n]: " CONVERT_CONFIRM
if [ $CONVERT_CONFIRM = "y" ]
then
echo -e "\nConverting disk......"
disk_nb=0
for disk in *.vmdk; do
read -p "Select Controller for ${disk} [ide/sata/scsi]: " CONTROLLER
#qemu-img convert -f vmdk -O qcow2 "${disk}" image-${disk_nb}.qcow2
qm importdisk ${VM_ID} "${disk}" local-lvm -format qcow2
echo "Attach disk number ${disk_nb} to the VM......"
qm set ${VM_ID} --${CONTROLLER}${disk_nb} local-lvm:vm-${VM_ID}-disk-${disk_nb}
disk_nb=$((disk_nb+1))
done
else
echo "Operations cancelled, exiting....."
fi
cd ..
rm -r $EXTRACT_PATH
| true |
8849405690f2ac10f79e154a8c960035acd708b6 | Shell | Kolyavolkov/trainingcenter | /Nikolai_Volkov_Lection1-2_V2.sh | UTF-8 | 2,174 | 4.46875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# Shows info about RAM, environment, disk usage and directory statistic.
set -e
LOGS=./full_env_info.log #variable to a log file
timestamp() {
date +"%T"
}
###########################################
# Shows RAM , basic environment variables
# and writes it into a log file in your current working directory.
# Arguments:
# None
###########################################
function basic() {
timestamp
echo -e "\nInformation about RAM"
echo -e "\nINFORMATION ABOUT RAM\n" > $LOGS
free -h | tee -a $LOGS
cat /proc/meminfo >> $LOGS
echo -e "\nHere is some basic environment information"
printenv | grep ^SHELL | tee -a $LOGS
printenv | grep ^TERM | tee -a $LOGS
printenv | grep ^USERNAME | tee -a $LOGS
printenv | grep ^PWD | tee -a $LOGS
printenv | grep ^PATH | tee -a $LOGS
printenv | grep ^LANG= | tee -a $LOGS
echo -e "\nENVIRONMENT VARIABLES INFORMATION\n" >> $LOGS
printenv >> $LOGS
echo -e "\n$LOGS file created"
}
###########################################
# Shows total disk usage, most heavy files in current directory,
# all executables and stat.
# Arguments:
# Number for tail output
###########################################
function printstat() {
timestamp
echo -e "\nSummary of total disk usage by $(pwd)"
du -sh $(pwd)
echo -e "\n$OPTARG most heavy files in $(pwd)"
ls -lSr | tail -$OPTARG
echo -e "\nAll executable files in here"
find $(pwd) -perm /a=x
echo -e "\nStat command"
stat $(pwd)
}
help="$0 [-b] [-s n] -- this script shows information about environment and
\ndirectory statistics. Where: -h --help shows this manual.
\n-b --basic shows basic environment information, writes it to fullenvinfo.log
\nfile and appends output of printenv command.
\n-s --stat shows summary of total disk usage, most heavy files
\nand stat command. [n] -argument for tailing output"
if [ -z "$1" ]; then #checking for empty argument list
echo "No arguments, try [-h] for help"
fi
while getopts "s:bh" opt; do #parcing arguments
case $opt in
s ) printstat $OPTARG ;;
b ) basic ;;
h ) echo -e $help ;;
\?) echo "Use : cmd [-s n] [-b] [-h]";;
esac
done
| true |
8ddf3658c6236dcd771af75f1b925401aaffa3bb | Shell | marian-nmt/marian-regression-tests | /tests/training/features/data-weighting/test_validation.sh | UTF-8 | 1,183 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash -x
#####################################################################
# SUMMARY:
# TAGS: dataweights
#####################################################################
# Exit on error
set -e
# Test code goes here
rm -rf valid valid_script.temp
mkdir -p valid
$MRT_MARIAN/marian \
--seed 4444 --no-shuffle --maxi-batch 1 --maxi-batch-sort none --dim-rnn 64 --dim-emb 32 --cost-type ce-mean \
-m valid/model.npz -t train.1k.{de,en} -v vocab.{de,en}.yml \
--disp-freq 5 --valid-freq 15 --after-batches 50 \
--data-weighting train.1k.weights.txt --data-weighting-type sentence \
--valid-metrics cross-entropy valid-script --valid-script-path ./valid_script.sh \
--valid-sets $MRT_DATA/europarl.de-en/toy.bpe.{en,de} \
--valid-log valid/valid.log --log valid/train.log
test -e valid/model.npz
test -e valid/valid.log
test -e valid/train.log
$MRT_TOOLS/strip-timestamps.sh < valid/valid.log > valid.out
$MRT_TOOLS/extract-costs.sh < valid/train.log > train.out
$MRT_TOOLS/diff-nums.py valid.out valid.expected -p 1.99 -o valid.diff
$MRT_TOOLS/diff-nums.py train.out train.expected -p 1.99 -o train.diff
# Exit with success code
exit 0
| true |
baa34b3e13714f7eb7cccbc011e0326d8eafa2e2 | Shell | regro-cf-autotick-bot/xalan-c-feedstock | /recipe/build.sh | UTF-8 | 272 | 2.71875 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | #!/bin/bash
export XERCESCROOT=${PREFIX}
export XALANCROOT=${SRC_DIR}
if [[ ${target_platform} == osx-64 ]]; then
platform=macosx
else
platform=linux
export CXXCPP=${CPP}
fi
./runConfigure -p ${platform} -c $CC -x $CXX -b 64 -P ${PREFIX}
make
make install
| true |
6ad9bdb270fd35855cbc57113425106e2c009e6b | Shell | Cli-Appss/Linux-notes-app | /task-remove.sh | UTF-8 | 259 | 3.40625 | 3 | [] | no_license | #!/bin/bash
read -p 'Please enter Filename you want to delete>' file
cd tasks/
if [ -f "$file" ]
then
rm $file
echo 'removed note successfully'
elif [ ! -f "$file" ]
then
echo "file was not found in $file please try again with .txt format"
fi
| true |
3bd6e412f3ec48f8d92bcfda9ff88521899ceb96 | Shell | falconray0704/golang-armbuilds | /build-all.sh | UTF-8 | 876 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# --- build all version of Go1.4
# interate over all available Go1.4 versions
go14_list=( 1.4 1.4.1 1.4.2 1.4.3 )
go14_list=( 1.4.3 )
for go14 in "${go14_list[@]}"
do
# interate over all available GOARM versions
goarm_list=( 5 6 7 )
for goarm in "${goarm_list[@]}"
do
export GOARM=${goarm}
export GO_VERSION=${go14}
time ./make-tarball-go1.4.sh | tee "make-tarball-go${GO_VERSION}-armv${GOARM}.log"
done
done
# --- build all version of Go1.5
# interate over all available Go1.5 versions
go15_list=( 1.5 1.5.1 1.5.2 )
go15_list=( 1.5.1 1.5.2 )
for go15 in "${go15_list[@]}"
do
# interate over all available GOARM versions
goarm_list=( 5 6 7 )
for goarm in "${goarm_list[@]}"
do
export GOARM=${goarm}
export GO_VERSION=${go15}
time ./make-tarball-go1.5.sh | tee "make-tarball-go${GO_VERSION}-armv${GOARM}.log"
done
done
| true |
edd88b02a679fc91e0cdec1698549725d8177618 | Shell | jpentland/rcfiles | /scripts/bspstatus | UTF-8 | 5,002 | 3.671875 | 4 | [] | no_license | #!/bin/bash
fifo=/tmp/$USER-statusbar
font_size=12
font="SauceCodePro Nerd Font:antialias=true:autohint=true:pixelsize=$font_size:style=Medium,Regular"
self=$0
mods="$HOME/.local/dzen2"
left_bar_ratio=0.45
restart_file=/tmp/$USER-statusbar-restart
# Unicode Chars
right_triangle="\ue0b0"
left_triangle="\ue0b2"
monocle="\ufbce"
tiled="\ue75a"
# Colors
cbg1="#444455"
# Create fifo
if ! [ -p $fifo ]; then
rm -f $fifo
mkfifo $fifo
fi
# -k option to kill statusbar
if [ "$1" == "-k" ]; then
echo "kill 0" >> $fifo
exit 0
fi
# -r option to restart statusbar
if [ "$1" == "-r" ]; then
echo "restart 0" >> $fifo
exit 0
fi
# Formatting functions
function cbg {
color=$1; shift
echo -n "^bg($color)$@^bg()"
}
function cfg {
color=$1; shift
echo -n "^fg($color)$@^fg()"
}
function shorten {
len=$1; shift
var="$@"
if [ "${#var}" -gt "$len" ]; then
echo -n "${var:0:$len}\u2026"
else
echo -n "$var"
fi
}
# Add a space to the right of non-empty strings
function padr {
if grep -q "[^ ]" <<< "$@"; then
echo "$@ "
else
echo
fi
}
# Add a space to the left of non-empty strings
function padl {
if grep -q "[^ ]" <<< "$@"; then
echo " $@"
else
echo
fi
}
# Get Screen Width
screen_width=$(xrandr --listactivemonitors | sed -n '2s/.* \([0-9]\+\)\/.*/\1/p')
left_bar_width=$(bc <<< "( $screen_width * $left_bar_ratio )" | sed 's/\..*//')
echo "screen width = $screen_width"
echo "left bar = $left_bar_width"
# dzen commands
function dzen_left {
sleep 0.2s
LC_ALL=c stdbuf -oL -eL uniq \
| dzen2 -xs 1 -ta l -dock -fn "$font" -tw $left_bar_width -bg "#000000"
}
function dzen_right {
LC_ALL=c stdbuf -oL -eL uniq \
| dzen2 -xs 1 -ta r -dock -fn "$font" -bg "#000000"
}
# Cleanup on exit
function cleanup {
trap - INT TERM
killjobs
exit 0
}
function killjobs {
kill $(jobs -p)
}
trap cleanup INT TERM
# Periodically run a command and pipe to statusbar
# Usage: period <name> <period> <command>
function period {
name=$1; shift
slp=$1; shift
cmd=$@
while true; do
$cmd 2> /dev/null | sed -u "s/^.*$/$name &/"
sleep $slp
done
}
function check_tabs {
if [ "$(bsptab check-add-new)" == "true" ]; then
echo " - tabs"
else
echo ""
fi
}
period battery 1m "$mods/battery.sh" >> $fifo &
period bitcoin 1m "$mods/bitcoin.sh" >> $fifo &
period bluetooth 10s "$mods/bluetooth.sh" >> $fifo &
period kbmap 10s "$mods/kbmap.sh" >> $fifo &
period mail 1m "$mods/mail.sh" >> $fifo &
period mem 5s "$mods/mem.sh" >> $fifo &
period network 5s "$mods/network.sh" >> $fifo &
period song 5s "$mods/song.sh" >> $fifo &
period volume 5s "$mods/volume.sh" >> $fifo &
period weather 1m "$mods/weather.sh" >> $fifo &
period tabs 5s "check_tabs" >> $fifo &
xtitle -sf 'title %s\n' >> $fifo &
clock -sf "date %a %b %_d" >> $fifo &
clock -sf "time %k:%M" >> $fifo &
bspc subscribe report | $mods/process_report.py | while read line; do
echo $line
echo "tabs $(check_tabs)"
done >> $fifo &
while true; do cat /tmp/detect-caps; done | while read line; do
caps=$(xset -q | sed -n 's/.*Caps Lock:\s\+\(on\|off\).*/\1/p')
echo "caps $caps"
done >> $fifo &
# Set padding
bspc config top_padding 0
bspc config -m primary top_padding $(bc <<< "$font_size + 6")
# Working Varaibles
ws=""
layout=""
title=""
datev=""
battery=""
bitcoin=""
bluetooth=""
kbmap=""
mail=""
mem=""
network=""
song=""
volume=""
weather=""
tabs=""
dorestart=false
(sleep 5 && xdo above -t $(xdo id -n root) $(xdo id -n dzen2 | tac)) &
# Parse information from fifo and pass to dzen2
cat $fifo | while read cmd line; do
case $cmd in
ws) ws="$(cbg $cbg1 " $line ")$(cfg $cbg1 "$right_triangle")"
;;
layout) layout="$(cfg "#22ff22" "$line")"
;;
tabs) tabs=$(cfg '#ffff00' "$(padl $line)")
;;
title) title="$(shorten 50 $line)"
;;
date) datev="$(cfg "#88ff88" $line)"
;;
time)
timev="$line"
;;
battery) battery="$line"
;;
bitcoin) bitcoin="$line"
;;
bluetooth) bluetooth="$line"
;;
kbmap) kbmap="$line"
;;
mail) mail="$(padr $line)"
;;
mem) mem="$line"
;;
network) network="$line"
;;
song) song=$(padr $(shorten 30 $line))
;;
volume) volume="$line"
;;
weather) weather="$line"
;;
kill) echo "Got kill command, exiting..." 1>&2
break
;;
restart) echo "Got restart command, exiting..." 1>&2
touch $restart_file
break
;;
caps) if [ "$line" == "off" ]; then
echo "caps off"
cbg1="#444455"
elif [ "$line" == "on" ]; then
echo "caps on"
cbg1="#ff0000"
fi
;;
esac
# Date
datetime="$datev $(cfg "#000000" $(cbg $cbg1 $right_triangle))$(cbg $cbg1 $timev)"
# Left Bar
echo -e "$ws $layout$tabs $(cfg "#000000" $(cbg $cbg1 $right_triangle))$(cbg $cbg1 ' '$title' ')$(cfg $cbg1 $right_triangle)" 1>&5
# Right Bar
echo -e "$bitcoin $mail$song$bluetooth$network $kbmap $volume $mem $battery $weather $datetime" 1>&6
done 6> >(dzen_right) 5> >(dzen_left)
killjobs
if [ -f $restart_file ]; then
echo "restarting $0..."
rm $restart_file
($0 &)
fi
| true |
d909f7f363ab820dac973222cb97e1d0ed6fcae6 | Shell | dory1/My-scripts | /scripts/mysql/partition | UTF-8 | 535 | 2.984375 | 3 | [] | no_license | #!/bin/bash
January=31
February=28 # 29 in Leap Years
March=31
April=30
May=31
June=30
July=31
August=31
September=30
October=31
November=30
December=31
ARRAY=(0 31 28 31 30 31 30 31 31 30 31 30 31)
echo ${ARRAY[09]}
par=$2
month=$1
echo "alter table ips add partition ("
for i in `seq 01 ${ARRAY[$month]}`
do
par=$(($par+1))
if [ $i -lt 10 ]
then
i=0$i
echo "PARTITION p2015$month$i VALUES LESS THAN ($par) ENGINE = InnoDB,"
else
echo "PARTITION p2015$month$i VALUES LESS THAN ($par) ENGINE = InnoDB,"
fi
done
echo ');'
| true |
1df8ba0b1a9d5fbda2935d2c2d65c3f0df7c50c7 | Shell | ngiambla/syn_sugar | /install.sh | UTF-8 | 522 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo "--> Installing Dependencies."
echo "Upgrading [pip]"
if [ "$EUID" -ne 0 ]
then echo "[ALERT] -> Requires sudo access."
fi
sudo pip install --upgrade pip
sudo pip install numpy
sudo pip install nltk
sudo pip install rouge
sudo pip install pdfminer
sudo pip install bs4
sudo pip install pyteaser
sudo pip install gensim
sudo pip install langdetect
sudo pip install matplotlib
sudo pip install flask
sudo apt-get install python-tk
echo "Downloading NLTK [post packages.]"
python -m nltk.downloader all
| true |
d0055d005acac440b2bf86bd7232daf427627408 | Shell | novas0x2a/virtualenvwrapper | /tests/test_dir_stack.sh | UTF-8 | 919 | 3.046875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive"
] | permissive | #!/bin/sh
test_dir=$(cd $(dirname $0) && pwd)
source "$test_dir/setup.sh"
oneTimeSetUp() {
test_begin_dir=$(pwd)
}
oneTimeTearDown() {
cd "$test_begin_dir"
}
setUp () {
rm -rf "$WORKON_HOME"
mkdir -p "$WORKON_HOME"
source "$test_dir/../virtualenvwrapper.sh"
mkdir "$WORKON_HOME/start_here"
mkdir "$WORKON_HOME/on_the_stack"
echo
}
tearDown() {
if type deactivate >/dev/null 2>&1
then
deactivate
fi
rm -rf "$WORKON_HOME"
}
test_ticket_101 () {
mkvirtualenv some_env
deactivate
cd "$WORKON_HOME/start_here"
pushd "$WORKON_HOME/on_the_stack"
rmvirtualenv some_env
mkvirtualenv some_env >/dev/null 2>&1
#echo "After mkvirtualenv: `pwd`"
deactivate
#echo "After deactivate: `pwd`"
popd
#echo "After popd: `pwd`"
current_dir=$(pwd)
assertSame "$WORKON_HOME/start_here" "$current_dir"
}
. "$test_dir/shunit2"
| true |
4f976662d3040ef6be4d09b8337b5919e7dff84b | Shell | mammawhy9/sru | /core/bin/locale2php.sh | UTF-8 | 356 | 2.984375 | 3 | [] | no_license | #!/bin/bash
if [[ -z $1 ]]; then
function escape() {
echo ${*//\'/\\\'}
}
read line
echo -ne "\t"
echo -n "'$(escape $MSGEXEC_MSGID)' => '$(escape $line)',"
echo -ne "\t"
echo "// $MSGEXEC_LOCATION"
exit;
fi
echo '<?'
echo '// NIE MODYFIKUJ TEGO PLIKU'
echo '$dict = array('
for file in `seq $#`; do
msgexec -i "$1" "$0"
shift
done
echo ');'
| true |
c1430d95108791cf4249fe8e1f83aa4a1598a608 | Shell | alex-liu-sudo/shell-script | /create-shell.sh | UTF-8 | 156 | 2.875 | 3 | [] | no_license | #!/bin/sh
if ! grep "^#!" $1 &>/dev/null;
then
cat >> $1 << EOF
#!/bin/sh
#Author: AlexLiu
#Date & Time: `date +"%F %T"`
#Description:
EOF
fi
vim +5 $1
| true |
522a480aaf890e6feebc45d91f291c99b2922cd8 | Shell | Hedroed/i3-config | /bin/nordlock | UTF-8 | 1,239 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
FORK_OPT="--nofork"
if [ "$1" == "--fork" ]; then
FORK_OPT=""
fi
image="$HOME/Images/wallpapers/mountain-unsplash-5-small.jpg"
if [ ! -f "$HOME/tmp/i3lock.jpg" ]; then
size=$(xdpyinfo | grep dimensions | cut -d\ -f7)
echo "Resize $size"
convert "$image" -resize "$size^" "$HOME/tmp/i3lock.jpg"
fi
notify-send "DUNST_COMMAND_PAUSE"
i3lock $FORK_OPT \
-i "$HOME/tmp/i3lock.jpg" -t -c "2e3440" \
--insidecolor='#2e3440ff' --ringcolor='#eceff4ff' --line-uses-inside \
--keyhlcolor='#5e81acff' --bshlcolor='#5e81acff' --separatorcolor=00000000 \
--insidevercolor='#8fbcbbff' --insidewrongcolor='#ebcb8bff' \
--ringvercolor='#eceff4ff' --ringwrongcolor='#eceff4ff' \
--indpos="x+50+r:y+(h-50-r)" --radius=16 \
--veriftext="" --wrongtext="" --noinputtext="" \
--locktext="" --lockfailedtext="" --modifpos="x-100:y-100" \
--indicator --clock --force-clock \
--timecolor='#d8dee9ff' --datecolor='#eceff4ff' \
--timestr="%H:%M" --datestr="%A %d %b" \
--timepos="x+(w-50):y+(h-80)" --datepos="tx:ty+30" \
--time-align=2 --date-align=2
notify-send "DUNST_COMMAND_RESUME"
# sleep 1 adds a small delay to prevent possible race conditions with suspend
sleep 1
exit 0
| true |
1f6119cb6fbfd7a5354d99d171f7c8582eb3d9da | Shell | CharlottevNoort/Call-Variants-with-UMIs | /ConsVar.sh | UTF-8 | 8,037 | 3.1875 | 3 | [] | no_license | #!/bin/bash
###################################################################################
# #
# **** UMI-based scRNA-seq Consensus Variant Calls **** #
# #
# Calls consensus sequences using fgbio CallMolecularConsensusReads #
# and variants using GATK MuTect2. #
# #
# Takes as input FASTA file (-i) with cell barcode and UMI in read names. #
# #
# All other input files are hard-coded for now: #
# /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa #
# /data/ngs/genomes/Human/hg19/for_gatk/STARidx_hg19_ucsc #
# /data/ngs/genomes/Human/hg19/Homo_sapiens_onlyChr.chrM.ERCC.gfp.GRCh37.75.gtf #
# /data/ngs/genomes/Human/hg19/Homo_sapiens_onlyChr.ERCC.GRCh37.75.refFlat #
# /data/share/htp/Charlotte_UMI_SNV/contaminant_list.txt #
# /data/ngs/genomes/Human/hg19/Human_polymorphism/ExAC/release0.3.1/NonTCGA #
# /ExAC_nonTCGA.r0.3.1.sites.vep.chr.vcf #
# #
# Output files have sample name (-s) in file names. #
# #
###################################################################################
# #
# Tools used: #
# #
# UMI-tools 0.5.1 #
# STAR 2.5.3a #
# Samtools 1.1 #
# fgbio 0.3.0 #
# Genome Analysis ToolKit 3.6 #
# #
###################################################################################
### 1) READ ARGUMENTS
# Get options
while getopts s:i: option
do
case "${option}"
in
s) SAMPLE=${OPTARG};;
i) FASTA=${OPTARG};;
esac
done
### 2) MAP READS
# /data/share/htp/Charlotte_UMI_SNV/AML491/SC_McSCRB_ChristianeMouseman/GATK/alignment/get_STARidx.sh must have been run first
# Map reads to reference genome with STAR
STAR --genomeDir /data/ngs/genomes/Human/hg19/for_gatk/STARidx_hg19_ucsc \
--runThreadN 10 \
--readFilesCommand zcat \
--sjdbGTFfile /data/ngs/genomes/Human/hg19/Homo_sapiens_onlyChr.chrM.ERCC.gfp.GRCh37.75.gtf \
--outFileNamePrefix $SAMPLE. \
--outSAMtype BAM SortedByCoordinate \
--outSAMmultNmax 1 \
--outFilterMultimapNmax 50 \
--outSAMunmapped Within \
--sjdbOverhang 49 \
--twopassMode Basic \
--readFilesIn $FASTA
### 3) CONSENSUS
# Filter out reads with mapping quality <30
samtools view -q 30 -b -o $SAMPLE.Aligned.fitered.bam $SAMPLE.Aligned.sortedByCoord.out.bam
# Index mapped reads
samtools index $SAMPLE.Aligned.filtered.bam
# Sort/group by coordinate and UMI (only if they also have same cell barcode)
umi_tools group \
-I $SAMPLE.Aligned.filtered.bam \
--group-out=groups_perCell.tsv \
--output-bam -S $SAMPLE.grouped.bam \
--method=directional \
--edit-distance-threshold=1 \
--per-cell
# Call consensus reads
java -jar /data/share/htp/Charlotte_UMI_SNV/fgbio/fgbio-0.3.0.jar CallMolecularConsensusReads \
-i $SAMPLE.grouped.bam \
-o $SAMPLE.consensus.bam \
-r $SAMPLE.consensus_rejects.bam --tag BX --min-reads 1
### 4) FASTQ
# Convert BAM to FASTQ format
samtools bam2fq $SAMPLE.consensus.bam > $SAMPLE.consensus.fq
# Run fastQC (optional)
mkdir -p fastq_consensus_$SAMPLE
fastqc -o fastq_consensus_$SAMPLE \
-f fastq \
-contaminants /data/share/htp/Charlotte_UMI_SNV/contaminant_list.txt \
$SAMPLE.consensus.fq
### 5) MAP CONSENSUS SEQS
# STAR alignment
STAR --genomeDir /data/ngs/genomes/Human/hg19/for_gatk/STARidx_hg19_ucsc \
--runThreadN 10 --sjdbGTFfile /data/ngs/genomes/Human/hg19/Homo_sapiens_onlyChr.chrM.ERCC.gfp.GRCh37.75.gtf \
--outFileNamePrefix $SAMPLE.consensus. \
--outSAMtype BAM SortedByCoordinate \
--outSAMmultNmax 1 \
--outFilterMultimapNmax 50 \
--outSAMunmapped Within \
--sjdbOverhang 49 \
--twopassMode Basic \
--readFilesIn $SAMPLE.consensus.fq
### 6A) PICARD (optional)
# Get Picard metrics
picard-tools CollectMultipleMetrics I=$SAMPLE.consensus.Aligned.sortedByCoord.out.bam \
O=$SAMPLE.consensus_alignment.multiple_metrics R=/data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa
picard-tools CollectRnaSeqMetrics I=$SAMPLE.consensus.Aligned.sortedByCoord.out.bam \
O=$SAMPLE.consensus_alingnment.RNAseq_Metrics \
REF_FLAT=/data/ngs/genomes/Human/hg19/Homo_sapiens_onlyChr.ERCC.GRCh37.75.refFlat \
STRAND=FIRST_READ_TRANSCRIPTION_STRAND #RIBOSOMAL_INTERVALS=/data/share/htp/Charlotte_UMI_SNV/hg38.rRNA.interval_list
### 6B) PREPARE FOR VARIANT CALLING
# Add read groups
picard-tools AddOrReplaceReadGroups I=$SAMPLE.consensus.Aligned.sortedByCoord.out.bam \
O=$SAMPLE.consensus.Aligned.RG.bam \
SO=coordinate \
RGLB=McSCRB \
RGPL=illumina \
RGPU=hiseq \
RGSM=AML491
# Index
samtools index $SAMPLE.consensus.Aligned.RG.bam
# Split into exon segments and hard clip parts in intronic regions, reassign mapping quality
java -jar /opt/bin/GenomeAnalysisTK.jar -T SplitNCigarReads \
-R /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa \
-I $SAMPLE.consensus.Aligned.RG.bam \
-o $SAMPLE.consensus.Aligned.split.bam \
-rf ReassignOneMappingQuality \
-RMQF 255 -RMQT 60 \
-U ALLOW_N_CIGAR_READS
# Decompress VCF file (GATK tools do not take piped input!)
#gunzip -k /data/ngs/genomes/Human/hg19/Human_polymorphism/ExAC/release0.3.1/NonTCGA/ExAC_nonTCGA.r0.3.1.sites.vep.chr.vcf.gz
## BQSR
# Analyze covariation before recalibration
java -jar /opt/bin/GenomeAnalysisTK.jar -T BaseRecalibrator \
-R /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa \
-I $SAMPLE.consensus.Aligned.split.bam \
-knownSites /data/ngs/genomes/Human/hg19/Human_polymorphism/ExAC/release0.3.1/NonTCGA/ExAC_nonTCGA.r0.3.1.sites.vep.chr.vcf \
-o recal_data.table
# Analyze covariation after recalibration
java -jar /opt/bin/GenomeAnalysisTK.jar -T BaseRecalibrator \
-R /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa \
-I $SAMPLE.consensus.Aligned.split.bam \
-knownSites /data/ngs/genomes/Human/hg19/Human_polymorphism/ExAC/release0.3.1/NonTCGA/ExAC_nonTCGA.r0.3.1.sites.vep.chr.vcf \
-BQSR recal_data.table \
-o post_recal_data.table
# Plot covariation before and after recalibration
java -jar /opt/bin/GenomeAnalysisTK.jar -T AnalyzeCovariates \
-R /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa \
-before recal_data.table \
-after post_recal_data.table \
-plots recalibration_plots.pdf
# Apply recalibration
java -jar /opt/bin/GenomeAnalysisTK.jar -T PrintReads \
-R /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa \
-I $SAMPLE.consensus.Aligned.split.bam \
-BQSR recal_data.table \
-o $SAMPLE.consensus.Aligned.recalibrated.bam
# Delete uncompressed VCF file
#rm /data/ngs/genomes/Human/hg19/Human_polymorphism/ExAC/release0.3.1/NonTCGA/ExAC_nonTCGA.r0.3.1.sites.vep.chr.vcf
### 7) CALL VARIANTS
java -jar /opt/bin/GenomeAnalysisTK.jar -T MuTect2 \
-R /data/ngs/genomes/Human/hg19/for_gatk/hg19_ucsc.fa \
-I:tumor $SAMPLE.consensus.Aligned.recalibrated.bam \
-o $SAMPLE.consensus.MuTect.vcf
| true |
b7d6570f27f276e688cbc6230d8c90630cabcc48 | Shell | yhj39300/kcpraw | /build-release.sh | UTF-8 | 2,687 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
MD5='md5sum'
unamestr=`uname`
if [[ "$unamestr" == 'Darwin' ]]; then
MD5='md5'
fi
UPX=false
#if hash upx 2>/dev/null; then
# UPX=true
#fi
VERSION=`date -u +%Y%m%d`
LDFLAGS="-X main.VERSION=$VERSION -s -w"
GCFLAGS=""
OSES=(linux darwin)
ARCHS=(amd64 386)
for os in ${OSES[@]}; do
for arch in ${ARCHS[@]}; do
suffix=""
if [ "$os" == "windows" ]
then
suffix=".exe"
fi
cgo_enabled=1
if [ "$os" == "linux" ]
then
cgo_enabled=0
fi
env CGO_ENABLED=$cgo_enabled GOOS=$os GOARCH=$arch go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_client_${os}_${arch}${suffix} github.com/ccsexyz/kcpraw/client
env CGO_ENABLED=$cgo_enabled GOOS=$os GOARCH=$arch go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_server_${os}_${arch}${suffix} github.com/ccsexyz/kcpraw/server
if $UPX; then upx -9 kcpraw_client_${os}_${arch}${suffix} kcpraw_server_${os}_${arch}${suffix};fi
tar -zcf kcpraw-${os}-${arch}-$VERSION.tar.gz kcpraw_client_${os}_${arch}${suffix} kcpraw_server_${os}_${arch}${suffix}
$MD5 kcpraw-${os}-${arch}-$VERSION.tar.gz
done
done
# ARM
ARMS=(5 6 7)
for v in ${ARMS[@]}; do
env CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=$v go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_client_linux_arm$v github.com/ccsexyz/kcpraw/client
env CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=$v go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_server_linux_arm$v github.com/ccsexyz/kcpraw/server
done
if $UPX; then upx -9 kcpraw_client_linux_arm* kcpraw_server_linux_arm*;fi
tar -zcf kcpraw-linux-arm-$VERSION.tar.gz kcpraw_client_linux_arm* kcpraw_server_linux_arm*
$MD5 kcpraw-linux-arm-$VERSION.tar.gz
#MIPS32LE
env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_client_linux_mipsle github.com/ccsexyz/kcpraw/client
env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_server_linux_mipsle github.com/ccsexyz/kcpraw/server
env CGO_ENABLED=0 GOOS=linux GOARCH=mips go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_client_linux_mips github.com/ccsexyz/kcpraw/client
env CGO_ENABLED=0 GOOS=linux GOARCH=mips go build -ldflags "$LDFLAGS" -gcflags "$GCFLAGS" -o kcpraw_server_linux_mips github.com/ccsexyz/kcpraw/server
if $UPX; then upx -9 kcpraw_client_linux_mips* kcpraw_server_linux_mips*;fi
tar -zcf kcpraw-linux-mipsle-$VERSION.tar.gz kcpraw_client_linux_mipsle kcpraw_server_linux_mipsle
tar -zcf kcpraw-linux-mips-$VERSION.tar.gz kcpraw_client_linux_mips kcpraw_server_linux_mips
$MD5 kcpraw-linux-mipsle-$VERSION.tar.gz
$MD5 kcpraw-linux-mips-$VERSION.tar.gz
| true |
1dc59395d153adec28ae8e6e0d7085645e17026a | Shell | ClickHouse/ClickHouse | /tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh | UTF-8 | 1,545 | 3.140625 | 3 | [
"Apache-2.0",
"BSL-1.0"
] | permissive | #!/usr/bin/env bash
# Tags: long, no-replicated-database
# Tag no-replicated-database: Fails due to additional replicas or shards
set -e
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
NUM_REPLICAS=6
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
DROP TABLE IF EXISTS r$i SYNC;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum_many', 'r$i') ORDER BY x;
"
done
function thread {
i=0 retries=300
while [[ $i -lt $retries ]]; do # server can be dead
$CLICKHOUSE_CLIENT --insert_quorum 3 --insert_quorum_parallel 1 --insert_keeper_max_retries=100 --insert_keeper_retry_max_backoff_ms=10 --query "INSERT INTO r$1 SELECT $2" && break
((++i))
sleep 0.1
done
}
for i in $(seq 1 $NUM_REPLICAS); do
for j in {0..4}; do
a=$((($i - 1) * 10 + $j))
# Note: making 30 connections simultaneously is a mini-DoS when server is build with sanitizers and CI environment is overloaded.
# That's why we repeat "socket timeout" errors.
thread $i $a 2>&1 | grep -v -P 'SOCKET_TIMEOUT|NETWORK_ERROR|^$' &
done
done
wait
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
SYSTEM SYNC REPLICA r$i;
SELECT count(), min(x), max(x), sum(x) FROM r$i;
"
done
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS r$i SYNC;"
done
| true |
564421074dff2a5414a7ba126c3eedbe7e046141 | Shell | tomchristie/mkautodoc | /scripts/install | UTF-8 | 263 | 2.859375 | 3 | [] | no_license | #!/bin/sh -ex
if [ "${CONTINUOUS_INTEGRATION}" = "true" ]; then
BIN_PATH=""
else
rm -rf venv
python -m venv venv
BIN_PATH="venv/bin/"
fi
${BIN_PATH}pip install --upgrade pip
${BIN_PATH}pip install -r requirements.txt
${BIN_PATH}pip install -e .
| true |
8bb0b720194f050a3418419124f06b1519556a30 | Shell | ppc64le/build-scripts | /w/watchdog/watchdog_ubi_8.5.sh | UTF-8 | 1,241 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
# -----------------------------------------------------------------------------
#
# Package : watchdog
# Version : v2.0.3
# Source repo : https://github.com/gorakhargosh/watchdog
# Tested on : UBI: 8.5
# Language : Python
# Travis-Check : True
# Script License: Apache License, Version 2 or later
# Maintainer : Sunidhi Gaonkar / Vedang Wartikar<Vedang.Wartikar@ibm.com>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=watchdog
PACKAGE_VERSION=${1:-v2.0.3}
PACKAGE_URL=https://github.com/gorakhargosh/watchdog
yum install -y python36 python36-devel git python2 python2-devel python3 python3-devel ncurses git gcc gcc-c++ libffi libffi-devel sqlite sqlite-devel sqlite-libs python3-pytest make cmake
mkdir -p /home/tester
cd /home/tester
git clone $PACKAGE_URL
cd $PACKAGE_NAME
git checkout $PACKAGE_VERSION
python3 -m pip install -e .
python3 -m pip install tox
python3 -m tox -e py
| true |
456b4aa0c2272067a3fc945db9819714f8d15f80 | Shell | nickweinberg/smash-dash | /docker-up.sh | UTF-8 | 435 | 2.65625 | 3 | [] | no_license | #!/bin/sh
# idk why but gotta run these manually lol???
docker run --name smash-db -d \
-e MYSQL_ROOT_PASSWORD=123 \
-e MYSQL_DATABASE=smash -e MYSQL_USER=nick -e MYSQL_PASSWORD=123 \
-p 3306:3306 \
mysql:latest
echo "Waiting for DB to start up..."
docker exec smash-db mysqladmin --silent --wait=30 -unick -p123 ping
echo "Setting up initial data..."
docker exec -i smash-db mysql -unick -p123 smash < setup.sql
| true |
3560e1a651af1d6cc75631bd9d62f31faec22c60 | Shell | deloschang/bash-spy | /count_files.sh | UTF-8 | 1,690 | 4.375 | 4 | [] | no_license | #!/bin/bash
# script name: count_files.sh
#
# Description: Counts the number of files of each filetype in
# the current dir. and its subdirectories, producing a summary
# report.
#
# Command line options: None
#
# Input: None
#
# Output: A summary of the filetype extensions, including those
# without file extensions.
#
# Special considerations: If a file has no '.' in its name or there
# is a '.' that has no characters following it, it is considered to be
# a 'noext' file. Current dir '.' and parent directory '..' are not counted.
# Also, only files are counted.
#
# Pseudocode: First, search for files with the wildcard format '*.*'. Exclude
# files with only a '.' and no characters following (handle it later as a noext). From
# this list, reverse the letters and cut at the '.'. Extract the first slice
# for the file extension. Reverse letters again to restore the original
# filetype name. Sort and use unique to count the unique filetypes.
# To handle noext files, invert the original search but include "*." type
# files and exclude the current directory ".". Use word count to sum up
# such no extension files. Echo this number and the name 'noext' in a
# similarly formatted manner.
# start with finding files with extensions
# exclude the edge case with a . with no characters following
find . -type f -name '*.*' -not -name '*.' | rev \
| cut -d'.' -f1 | rev | sort | uniq -c
# now handle the no extension files
# exclude current directory
# include edge case with a . with no character following
noext_count=`find . -type f ! -name "*.*" -o -name "*." -not -name "." | wc -l`
# format similarly with the above output
echo -e " $noext_count noext"
exit 0
| true |
52a216877f0d780ea5414bf47806177fc61abc17 | Shell | dynverse/competition | /containers/tc-scorer/score.sh | UTF-8 | 392 | 3.03125 | 3 | [] | no_license | #!/bin/bash
rm /outputs/AGGREGATED_SCORE || true
rm /outputs/dataset_scores.csv || true
for filename in /outputs/*/; do
test_case=$(basename $filename)
echo "Scoring" $test_case
/code/main.R --groundtruth /data/ground-truths/$test_case.h5 "$@" \
--output /outputs/$test_case \
--output_scores /outputs/$test_case/scores.json
done
echo "Aggregating"
/code/aggregate-scores.R
| true |
5b1bcba29917dac10db01f72dac1ee83edbc88d9 | Shell | lisadsu/OocyteStudy | /EmbryoProject-master/RNA_seq_analysis/Mouse_Shell_Scripts/CollapseAndCountDuplicates.sh | UTF-8 | 8,466 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#This script first collapses the trimmed and clipped fastq files into fasta files with only unique reads, the number of identical reads for each unique in parentheses:
#Copy the following line as many times as the number of samples you are working with, then
#replace "YOURFILE" with the names of your samples e.g. "1_Hot":
# ========================== NEG1 ====================================== #
FILENAME_IN="neg1"
NUM_FILES_PER_READ=4
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== POS1 ====================================== #
FILENAME_IN="pos1"
NUM_FILES_PER_READ=5
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== POS2 ====================================== #
FILENAME_IN="pos2"
NUM_FILES_PER_READ=2
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
##### END OF CONTROLS ... SAMPLES NEXT #####
# ========================== BAD1 ====================================== #
FILENAME_IN="bad1"
NUM_FILES_PER_READ=4
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== BAD2 ====================================== #
FILENAME_IN="bad2"
NUM_FILES_PER_READ=6
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== GOOD1 ====================================== #
FILENAME_IN="good1"
NUM_FILES_PER_READ=6
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== GOOD2 ====================================== #
FILENAME_IN="good2"
NUM_FILES_PER_READ=6
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== MIXED1 ====================================== #
FILENAME_IN="mixed1"
NUM_FILES_PER_READ=6
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
# ========================== MIXED2 ====================================== #
FILENAME_IN="mixed2"
NUM_FILES_PER_READ=6
for ((i=1; i<=$NUM_FILES_PER_READ; i++))
do
echo $FILENAME_IN"_R1_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R1_00"$i"_clipped.fastq" -o $FILENAME_IN"_R1_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R1_00"$i"_collapsed.txt" $FILENAME_IN"_R1_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
echo $FILENAME_IN"_R2_00"$i"_clipped.fastq"
echo $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastx_collapser -v -i $FILENAME_IN"_R2_00"$i"_clipped.fastq" -o $FILENAME_IN"_R2_00"$i"_collapsed.txt"
fastqduplicatecounter.py $FILENAME_IN"_R2_00"$i"_collapsed.txt" $FILENAME_IN"_R2_00"$i"_collapsed_headers.txt" >> $FILENAME_IN"_R1_00"$i"_duplicateCount.txt"
done
#This last line removes the dummy headerfiles, which are no longer necessary.
rm *_collapsed_headers.txt
# mkdir ../DuplicateCount
mv *duplicateCount.txt ../DuplicateCount
| true |
7b78d2ce635d7f71cc18bf47e0ce5ed632b5c217 | Shell | knowuh/riteshost | /portal_db.sh | UTF-8 | 1,489 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
BASE_DIR=~/rites_host
cd $BASE_DIR
if [ -e $BASE_DIR/database_properties.sh ]; then
echo "using db properties found in $BASE_DIR/database_properties.sh"
echo "delete this file ($BASE_DIR/database_properties.sh ) to reset"
source $BASE_DIR/database_properties.sh
else
echo -n "Please enter the db USERNAME for ccportal db (eg: portal_admin): "
read PORTAL_USER
echo -n "Please enter the db password for ccportal db (eg: s33kr3t): "
read PORTAL_PASS
echo -n "Please enter the db USERNAME for rails db (eg: rails): "
read RAILS_USER
echo -n "Please enter the db PASSWORD for rails db (eg: s33kr3t): "
read RAILS_PASS
echo "PORTAL_USER=$PORTAL_USER" >> $BASE_DIR/database_properties.sh
echo "PORTAL_PASS=$PORTAL_PASS" >> $BASE_DIR/database_properties.sh
echo "RAILS_USER=$RAILS_USER" >> $BASE_DIR/database_properties.sh
echo "RAILS_PASS=$RAILS_PASS" >> $BASE_DIR/database_properties.sh
fi
###################################################
# CREATE TABLES:
###################################################
mysqladmin -f -u root drop mystery4
mysqladmin -f -u root drop ccportal
mysqladmin -f -u root drop sunflower
mysqladmin -f -u root drop rails
mysqladmin -f -u root create mystery4
mysqladmin -f -u root create ccportal
mysqladmin -f -u root create sunflower
mysqladmin -f -u root create rails
$BASE_DIR/db-user.rb $PORTAL_USER $PORTAL_PASS mystery4 ccportal sunflower
$BASE_DIR/db-user.rb $RAILS_USER $RAILS_PASS rails
| true |
d17801f962016cc96306882d5933939957fc05e7 | Shell | Fed984/AutoMoDeFSMAnalysis | /iraceTools/tune-main-cluster-mpi-script | UTF-8 | 1,539 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# This is a version of tune-main for launching irace using the --mpi
# option in a SGE Cluster.
set -e
set -o pipefail
if [ $# == 0 ]; then
echo "Usage: ./tune-main-cluster-mpi <BINDIR> <EXECDIR> --parallel <NB_SLAVES> --rack <rack_number> --queue <queue> additional_args_to_irace"
exit 1
fi
BINDIR=$1
EXECDIR=$2
shift 2
NB_SLAVES=0
RACK_NUM=1
QUEUE_ARG=long
PARAMS=
while [ $# -gt 0 ]; do
case "$1" in
--parallel) shift; NB_SLAVES="$1"; shift;;
--rack) shift; RACK_NUM="$1"; shift;;
--queue) shift; QUEUE_ARG="$1"; shift;;
*) PARAMS="$PARAMS $1"; shift;;# terminate case
esac
done
if [ $NB_SLAVES -lt 2 ]; then
echo "$0: error: --parallel must be larger than 1"
exit 1
fi
QUEUE=long
case $QUEUE_ARG in
long) QUEUE=long;;
short) QUEUE=short;;
*) ;;
esac
JOBNAME=JOB-$$
MACHINE=opteron2216
case $RACK_NUM in
2) MACHINE=xeon5410;;
3)MACHINE=opteron6128;;
4)MACHINE=opteron6272;;
5)MACHINE=xeon2680;;
6)MACHINE=xeon6138;;
*) ;;
esac
MPIRUN=/opt/openmpi/bin/mpirun
PARALLEL_ENV=mpich_fu
#PARALLEL_ENV=mpich_rr
let NB_PARALLEL_PROCESS=NB_SLAVES+1
exec qsub -v PATH <<EOF
#!/bin/sh
#$ -N $JOBNAME
#$ -l $MACHINE
#$ -l $QUEUE
#$ -binding linear:256
#$ -pe $PARALLEL_ENV $NB_PARALLEL_PROCESS
#$ -m ase
#$ -o $EXECDIR/irace-$$.stdout
#$ -e $EXECDIR/irace-$$.stderr
#$ -cwd
export OMPI_MCA_plm_rsh_disable_qrsh=1
export PATH
$MPIRUN -x OMPI_MCA_plm_rsh_disable_qrsh -x PATH \
-np 1 \
$BINDIR/irace --exec-dir=$EXECDIR --parallel $NB_SLAVES --mpi 1 $PARAMS
EOF
| true |
482b6031fbde657e4872b138e2c8102ab691a6d4 | Shell | DarovskikhAndrei/hpscan2linux | /install/install.sh | UTF-8 | 914 | 3.203125 | 3 | [] | no_license | #!/bin/bash
#params
read -p "Enter printer HostName or Ip:" -e host
read -p "Enter folder for scaned images:" -e scanpath
#coping files and scripts
cp ../target/HPScan2Linux-1.0.1-jar-with-dependencies.jar /usr/bin/hpscan2linux.jar
cp profiles /etc/scanserver/
chmod +x hpscan2linux
cp hpscan2linux /etc/init.d/
cp hpscan2linux.service /etc/systemd/system/
systemct enable hpscan2linux.service
#add user for daemon
useradd -U scanuser
#reload services configs
systemctl daemon-reload
#config folder
mkdir /etc/scanserver
echo "<settings>" > /etc/scanserver/conf.xml
echo " <scanpath>$scanpath</scanpath>" >> /etc/scanserver/conf.xml
echo " <profiles>/etc/scanserver/profiles/</profiles>" >> /etc/scanserver/conf.xml
echo " <printer_addr>$host</printer_addr>" >> /etc/scanserver/conf.xml
echo " <printer_port>8080</printer_port>" >> /etc/scanserver/conf.xml
echo "</settings>" >> /etc/scanserver/conf.xml
| true |
3eb1a11ddb41112f2de5873de8b6f9ead9128d9a | Shell | MarcosBan/fdp | /Repositorios.sh | UTF-8 | 899 | 3.609375 | 4 | [] | no_license | #!/bin/bash
ATPCT(){
AT=$( dialog --stdout --yes-label Sim --no-label Não \
--title 'Atualização de pacotes' \
--yesno ' Deseja realmente atualizar todos os pacotes ?'\
0 0)
AT=$?
if [ $AT = 0 ]; then
apt-get update #> TRASH
fi
}
INPCT(){
PA=$( dialog --stdout \
--title 'Instalação de pacotes' \
--inputbox 'Qual o nome do pacote que deseja instalar? '\
0 0)
apt-get install $PA #> TRASH
}
DNPCT(){
RE=$( dialog --stdout \
--title 'Remoção de pacotes' \
--inputbox 'Qual o nome do pacote que deseja remover? ' \
0 0)
apt-get autoremove $RE --purge #> TRASH
}
MENUS(){
CENTRAL=$( dialog --stdout \
--title "Menu repositorios" \
--menu "Selecione uma opção" \
0 0 0 \
1 'Atualizar pacotes' \
2 'Instalar pacotes' \
3 'Desinstalar pacotes' \
4 'Voltar')
case $CENTRAL in
1) ATPCT ;;
2) INPCT ;;
3) DNPCT ;;
#4)
esac
}
MENUS
| true |
2ec5e0ea8311e4c1d95eb4ecfbd220c0a91febde | Shell | suvit/dot-files | /scripts/odoo/migrate_11_14.sh | UTF-8 | 677 | 3.0625 | 3 | [] | no_license | #!/bin/bash
set -Eeuxo pipefail
script_dir=$(dirname "$BASH_SOURCE")
# Запускать
export PGHOST=localhost
export PGPORT=65432
OU_ROOT=/opt/suvit/odoo/OpenUpgrade
function migrate {
old_ver=$1
new_ver=$2
dropdb --if-exists ${new_ver}_mrp
createdb -O $USER -T ${old_ver}_mrp ${new_ver}_mrp
MIG_ROOT=$OU_ROOT/mrp-${old_ver}-${new_ver}
cd $MIG_ROOT
# export OPENUPGRADE_TARGET_VERSION=14.0
~/dotfiles/scripts/odoo/run_odoo.sh -d ${new_ver}_mrp -u all --stop-after-init # > $MIG_ROOT/migration.log 2> $MIG_ROOT/error.log
}
# Исходная база 11_mrp, мигрируем ее в 14_mrp
migrate 11 12
migrate 12 13
migrate 13 14
| true |
2fb0d66bda5f5eae3a9200863ae2b7b321f146a7 | Shell | iveney/bin | /getid3 | UTF-8 | 487 | 3.25 | 3 | [] | no_license | #!/bin/bash
# put the unspecified tags here...
ARTIST="Lisa Ono"
ALBUM="Cheek To Cheek - Jazz Standards from RIO"
GENRE="Jazz"
YEAR=""
TOTAL=`ls *.mp3 | wc -l`
for i in *.mp3
do
echo "Processing $i"
#set - $(IFS=" ."; echo $i) // split according to IFS
#ARTIST=$1
#TRACK=$3
#NAME=$4
TRACK=`echo "$i" | cut -d ' ' -f 1`
NAME=`echo "$i" | cut -d ' ' -f 2-`
mid3v2 -a "$ARTIST" \
-t "$NAME" \
-A "$ALBUM" \
-g "$GENRE" \
-T "$TRACK/$TOTAL" "$i"
done
| true |
99dfa0564b993ccec2b17c0ef6b650599f8fa6f5 | Shell | mehidou/INEUCE | /bootstrap.sh | UTF-8 | 269 | 2.78125 | 3 | [] | no_license | #!/bin/bash
if [ ! -e "composer.phar" ]; then
curl -sS https://getcomposer.org/installer | php
fi
# if composer.phar is over 30 days old
if test "`find composer.phar -mtime +30`"; then
php composer.phar --ansi self-update
fi
php composer.phar --ansi install
| true |
e19f7982ba365ed3d15e8ade3f3e2f75eded532d | Shell | cyber-dojo-retired/avatars | /sh/build_tagged_images.sh | UTF-8 | 2,160 | 4 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash -Eeu
# - - - - - - - - - - - - - - - - - - - - - -
build_tagged_images()
{
local -r dil=$(docker image ls --format "{{.Repository}}:{{.Tag}}")
remove_all_but_latest "${dil}" "${CYBER_DOJO_AVATARS_IMAGE}"
remove_all_but_latest "${dil}" "${CYBER_DOJO_AVATARS_CLIENT_IMAGE}"
build_images
tag_images_to_latest
check_embedded_env_var
}
# - - - - - - - - - - - - - - - - - - - - - -
remove_all_but_latest()
{
local -r docker_image_ls="${1}"
local -r name="${2}"
for image_name in `echo "${docker_image_ls}" | grep "${name}:"`
do
if [ "${image_name}" != "${name}:latest" ]; then
if [ "${image_name}" != "${name}:<none>" ]; then
docker image rm "${image_name}"
fi
fi
done
docker system prune --force
}
#- - - - - - - - - - - - - - - - - - - - - - - -
build_images()
{
docker-compose \
--file "${ROOT_DIR}/docker-compose.yml" \
build \
--build-arg COMMIT_SHA="$(git_commit_sha)"
}
#- - - - - - - - - - - - - - - - - - - - - - - -
tag_images_to_latest()
{
docker tag $(image_name):$(image_tag) $(image_name):latest
docker tag ${CYBER_DOJO_AVATARS_CLIENT_IMAGE}:$(image_tag) ${CYBER_DOJO_AVATARS_CLIENT_IMAGE}:latest
echo
echo "CYBER_DOJO_AVATARS_TAG=$(image_tag)"
echo "CYBER_DOJO_AVATARS_SHA=$(image_sha)"
echo
}
# - - - - - - - - - - - - - - - - - - - - - -
check_embedded_env_var()
{
if [ "$(git_commit_sha)" != "$(sha_in_image)" ]; then
echo "ERROR: unexpected env-var inside image $(image_name):$(image_tag)"
echo "expected: 'SHA=$(git_commit_sha)'"
echo " actual: 'SHA=$(sha_in_image)'"
exit 42
fi
}
# - - - - - - - - - - - - - - - - - - - - - -
git_commit_sha()
{
echo $(cd "${ROOT_DIR}" && git rev-parse HEAD)
}
# - - - - - - - - - - - - - - - - - - - - - -
image_name()
{
echo "${CYBER_DOJO_AVATARS_IMAGE}"
}
# - - - - - - - - - - - - - - - - - - - - - -
image_tag()
{
echo "${CYBER_DOJO_AVATARS_TAG}"
}
# - - - - - - - - - - - - - - - - - - - - - -
image_sha()
{
echo "${CYBER_DOJO_AVATARS_SHA}"
}
# - - - - - - - - - - - - - - - - - - - - - -
sha_in_image()
{
docker run --rm $(image_name):$(image_tag) sh -c 'echo -n ${SHA}'
}
| true |
eecc2a3b1281b4f8a0436f3b825e9aca76d63330 | Shell | subzik/Zabbix | /04/install_webserver.sh | UTF-8 | 716 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# install packages
yum install -y deltarpm epel-release
yum -y update
yum -y install python-pip wget bzip2 tree man
pip install --upgrade pip
pip install requests
pip install simplejson
pip install pyzabbix
# run zabbix Agent
rpm -Uvh https://repo.zabbix.com/zabbix/4.2/rhel/7/x86_64/zabbix-release-4.2-1.el7.noarch.rpm
yum install -y zabbix-agent zabbix-sender pip
ZABBIX_AGENT_CONF="/etc/zabbix/zabbix_agentd.conf";
ZABBIX_SERVER="192.168.0.50";
echo "ListenPort=10050" >> $ZABBIX_AGENT_CONF
echo "Server=$ZABBIX_SERVER" >> $ZABBIX_AGENT_CONF;
echo "ServerActive=$ZABBIX_SERVER" >> $ZABBIX_AGENT_CONF;
systemctl start zabbix-agent
systemctl enable zabbix-agent
# run python's script
python zabbix.py
| true |
e6959fe82c5935167f4ba6970b67ef71326abdca | Shell | kim3163/kimjoon.github | /selenium_sms/SMS-master/topmon.sh | UTF-8 | 1,338 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# OS
OS=`uname`
PS_LOG_DIR="/home/tacs/SMS-master/logs/PS/"
TOP_LOG_DIR="/home/tacs/SMS-master/logs/TOP/"
SWAP_LOG_DIR="/home/tacs/SMS-master/logs/SWAP/"
TOP="top -d2 -s1 -n 20"
DISPLAY="tail -28"
if [ ${OS} = 'Linux' ]; then
TOP="top -b -n 1"
SWAP="/home/tacs/SMS-master/smem --sort=swap"
DISPLAY="cat"
fi
cd /home/tacs/SMS-master
. /home/tacs/.bash_profile
umask 0 2> /dev/null
mkdir -p ${PS_LOG_DIR} 2> /dev/null
echo ============================================ >> ${PS_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
echo `date '+%H:%M:%S'` >> ${PS_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
ps -ef > /tmp/ps.tmp 2> /dev/null
cat /tmp/ps.tmp >> ${PS_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
mkdir -p ${TOP_LOG_DIR} 2> /dev/null
echo ============================================ >> ${TOP_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
echo `date '+%H:%M:%S'` >> ${TOP_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
${TOP} > /tmp/top.tmp 2> /dev/null
${DISPLAY} /tmp/top.tmp >> ${TOP_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
mkdir -p ${SWAP_LOG_DIR} 2> /dev/null
echo ============================================ >> ${SWAP_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
echo `date '+%H:%M:%S'` >> ${SWAP_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
${SWAP} > /tmp/swap.tmp 2> /dev/null
${DISPLAY} /tmp/swap.tmp >> ${SWAP_LOG_DIR}`date '+%Y%m%d'` 2> /dev/null
| true |
728844e858481b1b470673664b77b489c9808338 | Shell | vanhoefm/nordsec-passivescan | /tools/experiments/scan_duration.sh | UTF-8 | 1,476 | 3.703125 | 4 | [] | no_license | #!/bin/bash
set -e
EXPERIMENT="duration2"
#EXPERIMENT="specific_ap"
#EXPERIMENT="coverage"
function scan_and_log
{
for i in $(seq 1 1 5)
do
echo "Measuring $1 run $i"
# Reset Wi-Fi and logcat
adb shell svc wifi disable
sleep 2
adb logcat -c
adb shell svc wifi enable
sleep 1
# Trigger a scan
adb shell am start -a android.intent.action.MAIN -n com.android.settings/.wifi.WifiSettings
adb shell input keyevent KEYCODE_WAKEUP
sleep $2
# Now log the output
adb logcat -d > ../../logs/${EXPERIMENT}/scan_$1_run${i}.log
done
}
# Initialization
adb root
adb remount
adb shell mount -o rw,remount /
adb shell svc wifi disable
sleep 1
# For fair comparison, do active scanning for all strateges
adb push dwell_time_configs/WCNSS_qcom_cfg.orig.ini /system/etc/firmware/wlan/qca_cld/WCNSS_qcom_cfg.ini
for STRATEGY in 0 # 1 2 3 4
do
adb shell settings put global passive_mode_on 0
adb shell settings put global wifi_scan_strategy $STRATEGY
scan_and_log "active_strategy${STRATEGY}" 8
done
# Passive scanning for all the strategies
for STRATEGY in 0 1 2 3 # 0 1 2 3 4
do
adb shell settings put global passive_mode_on 1
adb shell settings put global wifi_scan_strategy $STRATEGY
for DWELLTIME in 20 50 100 120 150
do
adb push dwell_time_configs/WCNSS_qcom_cfg.$DWELLTIME.ini /system/etc/firmware/wlan/qca_cld/WCNSS_qcom_cfg.ini
scan_and_log "passive_strategy${STRATEGY}_dwell${DWELLTIME}" $(expr 100 \* $DWELLTIME / 1000 + 10)
done
done
| true |
7cfd87aa15a82e17c7aff70459a9a7dcea4d3909 | Shell | harrisonlab/Metabarcoding_projects | /Cotton/Cultivars/ion_torrent/preprocessing.sh | UTF-8 | 1,440 | 3.015625 | 3 | [] | no_license | PROJECT_FOLDER=~/projects/Cotton/
mkdir -p $PROJECT_FOLDER
ln -s $MBPL $PROJECT_FOLDER/metabarcoding_pipeline
RUN=cultivar_ion
for s in BAC FUN; do
mkdir -p $PROJECT_FOLDER/data/$RUN/$s/fastq
mkdir $PROJECT_FOLDER/data/$RUN/$s/filtered
mkdir $PROJECT_FOLDER/data/$RUN/$s/unfiltered
mkdir $PROJECT_FOLDER/data/$RUN/$s/fasta
done
# QC
for FILE in $PROJECT_FOLDER/data/$RUN/BAC/fastq/*; do
$PROJECT_FOLDER/metabarcoding_pipeline/scripts/PIPELINE.sh -c qcheck $FILE $PROJECT_FOLDER/data/$RUN/BAC/quality
done
for FILE in $PROJECT_FOLDER/data/$RUN/FUN/fastq/*; do
$PROJECT_FOLDER/metabarcoding_pipeline/scripts/PIPELINE.sh -c qcheck $FILE $PROJECT_FOLDER/data/$RUN/FUN/quality
done
# Length trimming
#cat $PROJECT_FOLDER/$RUN/BAC/fastq/* > bac.cat.fq
#cat $PROJECT_FOLDER/$RUN/FUN/fastq/* > fun.cat.fq
## ion torrent S5 looks like it uses extra phred 33 charcters (L and M - maybe more?) below to check
# awk 'NR % 4 ==0' LM28.D10.fastq|tr -d '\n'|grep -o . |sort -u|paste -s -d '\0'
# cat xaa|tr LM K > xaa1
#bacteria
SSU=BAC
QUAL=0.005
MAX_LEN=400
$PROJECT_FOLDER/metabarcoding_pipeline/scripts/PIPELINE.sh -c ion \
"$PROJECT_FOLDER/data/$RUN/$SSU/fastq/*.fastq" \
$PROJECT_FOLDER/data/$RUN/$SSU \
$QUAL $MAX_LEN
#Fungi
SSU=FUN
QUAL=0.01
MAX_LEN=150
$PROJECT_FOLDER/metabarcoding_pipeline/scripts/PIPELINE.sh -c ion \
"$PROJECT_FOLDER/data/$RUN/$SSU/fastq/*.fastq" \
$PROJECT_FOLDER/data/$RUN/$SSU \
$QUAL $MAX_LEN
| true |
df17bc6af11e332e058b871039af783632d5b9b8 | Shell | b-ggs/pak | /test/pak_brew.bats | UTF-8 | 948 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
setup() {
PAK=$BATS_TEST_DIRNAME/../pak
# Create fake brew
echo 'echo "fake $0 $@"' > $BATS_TMPDIR/brew
chmod +x $BATS_TMPDIR/brew
}
teardown() {
rm $BATS_TMPDIR/brew
}
@test "'pak' returns 'brew'" {
run $PAK
[ "$output" = "brew" ]
}
@test "'pak install' returns 'brew install'" {
run $PAK install
[ "$output" = "brew install" ]
}
@test "'pak install zsh' invokes 'brew install zsh'" {
PATH=$BATS_TMPDIR:$PATH run $PAK install zsh
[ "${lines[0]}" = "[pak] Attempting to install 'zsh' via 'brew'..." ]
[ "${lines[1]}" = "[brew]" ]
[ "${lines[2]}" = "fake $BATS_TMPDIR/brew install zsh" ]
}
@test "'pak install zsh vim' invokes 'brew install zsh vim'" {
PATH=$BATS_TMPDIR:$PATH run $PAK install zsh vim
[ "${lines[0]}" = "[pak] Attempting to install 'zsh vim' via 'brew'..." ]
[ "${lines[1]}" = "[brew]" ]
[ "${lines[2]}" = "fake $BATS_TMPDIR/brew install zsh vim" ]
}
# vi:syntax=sh
| true |
531a4f9256eb5f690421df7fc4eda0a558e99296 | Shell | rybacklin/https-git.oschina.net-darcyg-wrtoy | /platforms/ramips/proj_mtall/files/firstuprun/files/etc/uci-defaults/samba | UTF-8 | 887 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/sh
. /lib/functions.sh
. /lib/ramips.sh
. /lib/functions/uci-defaults.sh
. /lib/functions/system.sh
[ ! -f /etc/config/samba ] && exit 0
uci set samba.@samba[0].name=mtall-devboard
uci set samba.@samba[0].workgroup=WORKGROUP
uci set samba.@samba[0].description="MTALL-OpenWrt-DevBoard"
uci set samba.@samba[0].homes=1
uci add samba sambashare
if [ -d /mnt/www ]; then
uci set samba.@sambashare[0].name=webroot
uci set samba.@sambashare[0].path=/mnt/www
uci set samba.@sambashare[0].read_only=no
uci set samba.@sambashare[0].guest_ok=yes
fi
if [ -d /mnt/data ]; then
uci set samba.@sambashare[1].name=data
uci set samba.@sambashare[1].path=/mnt/data
uci set samba.@sambashare[1].read_only=no
uci set samba.@sambashare[1].guest_ok=yes
fi
uci commit samba
/etc/init.d/samba enable
/etc/init.d/samba restart
echo "================>samba" >> /var/log/initlog
| true |
03317adacdf21c910bc3fdc8d8cb4ec8e15180a8 | Shell | barebuild/compile | /recipes/perl | UTF-8 | 571 | 2.921875 | 3 | [] | no_license | #!/bin/bash
OUT_PREFIX=$1
set -e
set -o pipefail
VERSION=${VERSION:="5.18.1"}
PERL_VERSION="perl-${VERSION}"
PERLBREW_URL="http://install.perlbrew.pl"
CPANM_URL="http://cpanmin.us"
PERLBREW_ROOT="$OUT_PREFIX"
PERLBREW_INSTALL_OPTIONS="-Duserelocatableinc -n -v"
PERL_ROOT="$PERLBREW_ROOT/perls/$PERL_VERSION"
export PERLBREW_ROOT=$PERLBREW_ROOT
curl -kL $PERLBREW_URL | bash
source $PERLBREW_ROOT/etc/bashrc
perlbrew init
perlbrew install $PERL_VERSION $PERLBREW_INSTALL_OPTIONS
perlbrew use $PERL_VERSION
curl -L $CPANM_URL | perl - --self-upgrade
cpanm local::lib
| true |
015f694de16306eeb9a14240927c1cf45dfc9c3d | Shell | ctemplin/i3_conf | /.i3/bin/i3-move-container-to-new-workspace.sh | UTF-8 | 182 | 2.875 | 3 | [] | no_license | #!/bin/bash
main ()
{
MAX_I3_WS_NUM=$(i3-msg -t get_workspaces | jq '[.[].num] | max + 1')
export MAX_I3_WS_NUM
i3-msg move container to workspace "$MAX_I3_WS_NUM"
}
main "$@" | true |
a7685cfff1bb9781341cb85536798e4a4047d5ac | Shell | wyattfry/wp-utils | /wp-backup.sh | UTF-8 | 1,745 | 4.46875 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
# Utility function to check exit status of last command
checkexit() {
if [[ $? -ne 0 ]]
then
echo "$1 was not successful" >&2
exit 1
fi
}
if [[! -f password ]]
then
echo 'Could not find a file called "password"' >&2
echo 'Create one in the same directory that contains the database password.' >&2
exit 1
fi
# Automatic Wordpress Backup of database and files
DBNAME=${USER}_wp314
DBUSER=${USER}_wp314
DBPASS=$(cat password)
if [[ $# -eq 2 ]]
then
# Assume wp-backup.sh SOURCE_DIR TARGET_DIR
# e.g. wp-backup.sh /home/user/public_html/some_subdir /home/user/wp-backups
if [[ ! -e $1 ]]
then
echo "Could not read source directory '$1'" >&2
exit 1
elif [[ ! -e $2 ]]
then
echo "Could not read target directory '$2'" >&2
exit 1
fi
WP_PATH="$(cd "$(dirname "$1")"; pwd)/"
WP_DIR="$(basename "$1")"
BACKUP_DIR="$(cd "$(dirname "$2")"; pwd)/$(basename "$2")"
else
WP_PATH=/home/${USER}/
WP_DIR=public_html
BACKUP_DIR="/home/${USER}/wp-backups/"
fi
TIME_STAMP="$(date +%F-%N)"
SQL_FILE="${WP_PATH}${WP_DIR}/${DBNAME}-${TIME_STAMP}.sql"
# Export database to .sql file, save to root directory of wp folder
mysqldump -u $DBUSER -p$DBPASS $DBNAME > $SQL_FILE
checkexit "Database ${DBNAME} export"
echo "Database '${DBNAME}' successfully exported to ${SQL_FILE}"
# Archive and compress wp folder and save to backup directory
tar -zcvf "${BACKUP_DIR}/${WP_DIR}-${TIME_STAMP}.tar.gz" -C ${WP_PATH} ${WP_DIR}
checkexit "WordPress directory backup"
echo "WordPress directory backed up at ${BACKUP_DIR}/${WP_DIR}-${TIME_STAMP}.tar.gz"
# Clean up exported sql file
rm ${SQL_FILE}
checkexit "Deletion of ${SQL_FILE}"
echo "Deleted ${SQL_FILE}"
echo "Backup succeeded."
exit 0
| true |
f0f57cf2405318c32c9e750ad72028a99b900c4b | Shell | BenjaminsM/PGx-pipeline | /protocols/Convert_Final_report_to_Plink.sh | UTF-8 | 5,028 | 3.21875 | 3 | [] | no_license | #MOLGENIS walltime=02:00:00 mem=10gb ppn=1
#string ngsUtilsVersion
#string PLINKVersion
#string Sample_ID
#string arrayFinalReport
#string PlinkDir
#string familyList
#string famFile
#string lgenFile
#string arrayTmpMap
#string arrayMapFile
#string Project
#string logsDir
#string PLINKVersion2
#string BEDtoolsVersion
#string HRCFilterBedFile
#string HTSlibVersion
#string BCFtoolsVersion
#list chr
set -e
set -u
#Function to check if array contains value
array_contains () {
local array="$1[@]"
local seeking=$2
local in=1
for element in "${!array-}"; do
if [[ "${element}" == "${seeking}" ]]; then
in=0
break
fi
done
return "${in}"
}
makeTmpDir "${PlinkDir}"
tmpPlinkDir="${MC_tmpFile}"
#Check finalReport on "missing" alleles. Also, see if we can fix missing alleles somewhere in GenomeStudio
awk '{ if ($3 != "-" || $4 != "-") print $0};' "${arrayFinalReport}/${Sample_ID}.txt" \
> "${tmpPlinkDir}/${Sample_ID}_FinalReport.txt.tmp"
#Check finalreport on "D"alleles.
awk '{ if ($3 != "D" || $4 != "D") print $0};' "${tmpPlinkDir}/${Sample_ID}_FinalReport.txt.tmp" \
> "${tmpPlinkDir}/${Sample_ID}_FinalReport_2.txt.tmp"
#Push sample belonging to family "1" into list.txt
sampleValue=$(awk 'FNR == 8 {print$2}' "${arrayFinalReport}/${Sample_ID}.txt")
echo 1 "${sampleValue}" > "${tmpPlinkDir}/${familyList}"
#########################################################################
#########################################################################
module load "${ngsUtilsVersion}"
module load "${PLINKVersion}"
module list
##Create .fam, .lgen and .map file from sample_report.txt
sed -e '1,10d' "${tmpPlinkDir}/${Sample_ID}_FinalReport_2.txt.tmp" | awk '{print "1",$2,"0","0","0","1"}' | uniq > "${tmpPlinkDir}/${famFile}"
sed -e '1,10d' "${tmpPlinkDir}/${Sample_ID}_FinalReport_2.txt.tmp" | awk '{print "1",$2,$1,$3,$4}' | awk -f "${EBROOTNGSMINUTILS}/RecodeFRToZero.awk" > "${tmpPlinkDir}/${lgenFile}"
sed -e '1,10d' "${tmpPlinkDir}/${Sample_ID}_FinalReport_2.txt.tmp" | awk '{print $6,$1,"0",$7}' OFS="\t" | sort -k1n -k4n | uniq > ${tmpPlinkDir}/${arrayTmpMap}
grep -P '^[123456789]' "${tmpPlinkDir}/${arrayTmpMap}" | sort -k1n -k4n > "${tmpPlinkDir}/${arrayMapFile}"
grep -P '^[X]\s' "${tmpPlinkDir}/${arrayTmpMap}" | sort -k4n >> "${tmpPlinkDir}/${arrayMapFile}"
grep -P '^[Y]\s' "${tmpPlinkDir}/${arrayTmpMap}" | sort -k4n >> "${tmpPlinkDir}/${arrayMapFile}"
#####################################
##Create .bed and other files (keep sample from sample_list.txt).
##Create .bed and other files (keep sample from sample_list.txt).
#Create ped and map file
plink \
--lfile "${tmpPlinkDir}/${Sample_ID}" \
--recode \
--noweb \
--out "${tmpPlinkDir}/${Sample_ID}" \
--keep "${tmpPlinkDir}/${familyList}"
#Convert ped and map files to a VCF file
#Use different version from plink to make the VCF file
module unload plink
module load "${PLINKVersion2}"
module list
##Create genotype VCF for sample
plink \
--recode vcf-iid \
--ped "${tmpPlinkDir}/${Sample_ID}.ped" \
--map "${tmpPlinkDir}/${arrayMapFile}" \
--out "${tmpPlinkDir}/${Sample_ID}"
# Filter VCF file with bed file of SNPs from HRC with a MAF value lower than 0.01 .
# we want to exclude those SNP's so we use the -v option from bedtools intersect
module load "${BEDtoolsVersion}"
module load "${HTSlibVersion}"
module list
bedtools intersect -a "${tmpPlinkDir}/${Sample_ID}.vcf" -b "${HRCFilterBedFile}" -v -header > ${tmpPlinkDir}/${Sample_ID}.filteredMAF.vcf
bgzip -c "${tmpPlinkDir}/${Sample_ID}.filteredMAF.vcf" > "${tmpPlinkDir}/${Sample_ID}.filteredMAF.vcf.gz"
tabix -p vcf "${tmpPlinkDir}/${Sample_ID}.filteredMAF.vcf.gz"
# Make an VCF per chromosome
chromsomes=()
for chromosome in "${chr[@]}"
do
array_contains chromosomes "${chromosome}" || chromosomes+=("$chromosome")
done
for chr in ${chromosomes[@]}
do
tabix -h "${tmpPlinkDir}/${Sample_ID}.filteredMAF.vcf.gz" "${chr}" > "${tmpPlinkDir}/chr${chr}_${Sample_ID}.filteredMAF.vcf"
done
#Remove duplicate SNP's from the VCF files
module load "${BCFtoolsVersion}"
for chr in ${chromosomes[@]}
do
bcftools norm -d any "${tmpPlinkDir}/chr${chr}_${Sample_ID}.filteredMAF.vcf" -O v -o "${tmpPlinkDir}/chr${chr}_${Sample_ID}.filteredMAF_duplicatesRemoved.vcf"
done
# Convert per chromosome VCF's to .bed , .bim , .fam format PLINK which can be used as phasing input
for chr in ${chromosomes[@]}
do
plink \
--vcf "${tmpPlinkDir}/chr${chr}_${Sample_ID}.filteredMAF_duplicatesRemoved.vcf" \
--make-bed \
--out "${tmpPlinkDir}/chr${chr}_${Sample_ID}"
done
# Move output to output folder
for chr in ${chromosomes[@]}
do
echo "mv temporaru results from ${tmpPlinkDir} to ${PlinkDir}"
mv "${tmpPlinkDir}/chr${chr}_${Sample_ID}.filteredMAF_duplicatesRemoved.vcf" "${PlinkDir}"
mv "${tmpPlinkDir}/chr${chr}_${Sample_ID}.bed" "${PlinkDir}"
mv "${tmpPlinkDir}/chr${chr}_${Sample_ID}.bim" "${PlinkDir}"
mv "${tmpPlinkDir}/chr${chr}_${Sample_ID}.fam" "${PlinkDir}"
done
| true |
4b8bb43e1482c82607d7753c64bd0a292d81f961 | Shell | geekcamp-ph/installation-scripts | /rails-ubuntu.sh | UTF-8 | 1,466 | 3.546875 | 4 | [] | no_license | #!/bin/bash
set -e
echo "Updates packages. Asks for your password."
sudo apt-get update -y
echo "Installs packages. Give your password when asked."
sudo apt-get install build-essential bison openssl libreadline6 libreadline6-dev curl git-core zlib1g zlib1g-dev libssl-dev libyaml-dev libsqlite3-0 libsqlite3-dev sqlite3 libxml2-dev libxslt-dev autoconf libc6-dev nodejs -y
echo "Installs RVM (Ruby Version Manager) for handling Ruby installation and Ruby"
curl -L https://get.rvm.io | bash -s stable --ruby
source ~/.rvm/scripts/rvm
echo "Installs Bundler for Ruby dependency management"
gem install bundler --no-rdoc --no-ri
echo "Installs Ruby on Rails"
gem install rails --no-rdoc --no-ri
echo "Installs text editor"
sudo apt-get install gedit -y
echo -e "\n- - - - - -\n"
echo -e "Now we are going to print some information to check that everything is done:\n"
echo -n "Should be sqlite 3.7.3 or higher: sqlite "
sqlite3 --version
echo -n "Should be rvm 1.6.32 or higher: "
rvm --version | sed '/^.*$/N;s/\n//g' | cut -c 1-10
echo -n "Should be ruby-2.0.0-p247 or higher: "
ruby -v | cut -d " " -f 2
echo -n "Should be Rails 4.0 or higher: "
rails -v
echo -e "\n- - - - - -\n"
echo "If the versions match, everything is installed correctly. If the versions
don't match or errors are shown, something went wrong with the automated process
and we will help you do the installation the manual way at the event.
Congrats!"
| true |
77e48fb7d902e8a8cb8f94bea93b5394c8189109 | Shell | janwychowaniak/standalone-minitools | /szybkie_testy/generator_skryptow_powloki | UTF-8 | 695 | 3.53125 | 4 | [] | no_license | #!/bin/bash
jak_uzywac() {
cat 1>&2 <<EOF
./`basename $0` nazwa
Skrypt generuje szkielety mniejszych skryptow o zadanej nazwie,
majac za zadanie zapobiec kazdorazowemu nudnemu klepaniu ich od zera.
EOF
}
juz_istnieje() {
echo "*** $1 juz tu istnieje. Wybierz inna nazwe." 1>&2
}
MODUL_TRESCI_WYNIKU="./modele/script_gen_initial_content"
if ! [ -x "$MODUL_TRESCI_WYNIKU" ]; then
echo "***Nie udalo sie znalezc modulu dla tresci wyniku: $MODUL_TRESCI_WYNIKU" 1>&2
exit 1
fi
if [ $# -ne 1 ]; then
jak_uzywac
exit 1
fi
NAZWA=$1.sh
if [ -a $NAZWA ]; then
juz_istnieje $NAZWA
exit 1
fi
echo "#!$SHELL" > $NAZWA
$MODUL_TRESCI_WYNIKU $NAZWA
chmod u+x $NAZWA
geany $NAZWA &
| true |
bab25010e9f5106572469da3c7c1d6616008d5c0 | Shell | Kahuna915/Tech-Journal | /SYS265/linux/centos7/secure-ssh.sh | UTF-8 | 679 | 2.5625 | 3 | [] | no_license | #secure-ssh.sh
#author Kahuna915
#creates a new ssh user using $1 parameter
#adds a public key from the local repo or curled from the remote repo
#removes roots ability to ssh in
echo Create user name?
read USERNAME
sudo useradd -m -d /home/$USERNAME -s /bin/bash $USERNAME
sudo mkdir /home/$USERNAME/.ssh
sudo cp /home/noah/Tech-Journal/SYS265/linux/public-keys/id_rsa.pub /home/$USERNAME/.ssh/authorized_keys
sudo chmod 700 /home/$USERNAME/.ssh
sudo chmod 600 /home/$USERNAME/.ssh/authorized_keys
sudo chown -R $USERNAME:$USERNAME /home/$USERNAME/.ssh
sudo sed -i 's/PermitRootLogin yes/PermitRootLogin no/g' /etc/ssh/sshd_config
sudo systemctl restart sshd.service
| true |
f53d93fc6253593108229e71b2ed938409c710cd | Shell | D-TACQ/ACQ400_HTTPD | /bin/voltsmon | UTF-8 | 208 | 3.0625 | 3 | [] | no_license | #!/bin/sh
monitor() {
if [ -d /dev/hwmon ]; then
cd /dev/hwmon
for file in */*[Vv]*
do
echo $file $(cat $file)
done
fi
}
while [ 1 ]
do
monitor | fs2xml --kvp >/dev/shm/volts.xml
sleep 5
done
| true |
3d301f4191f8eef5d0287aec42c74f9bfd006a2b | Shell | joshhubers/aws-scripts | /ecs-ips | UTF-8 | 1,247 | 3.921875 | 4 | [] | no_license | #!/bin/sh
usage() {
echo "Usage: $0 -c <cluster> [-s <service_name>]"
echo " <cluster> The name of the ECS Cluster you wish to get EC2 IPs for"
echo " <service_name> Filter private ips by service name"
exit 1
}
while getopts "c:s:" o; do
case "${o}" in
c)
CLUSTER=${OPTARG}
;;
s)
SERVICE_NAME=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [[ -z "${CLUSTER}" ]] ; then
usage
fi
if [[ -z "${SERVICE_NAME}" ]] ; then
LISTED_TASKS=$(aws ecs list-tasks --cluster $CLUSTER --output text | cut -f2)
else
LISTED_TASKS=$(aws ecs list-tasks --cluster $CLUSTER --service-name $SERVICE_NAME --output text | cut -f2)
if [[ -z "${LISTED_TASKS}" ]] ; then
exit 1
fi
fi
TASK_CONTAINER_INSTANCE_ARNS=$(aws ecs describe-tasks --cluster $CLUSTER --tasks $LISTED_TASKS --query 'tasks[*].[containerInstanceArn]' --output text)
EC2_IDS=$(aws ecs describe-container-instances --cluster $CLUSTER --container-instances $TASK_CONTAINER_INSTANCE_ARNS --query 'containerInstances[*].[ec2InstanceId]' --output text)
aws ec2 describe-instances --instance-ids $EC2_IDS --query 'Reservations[*].Instances[*].NetworkInterfaces[*].PrivateIpAddresses[0].[PrivateIpAddress]' --output text
| true |
166d388f89c7eb40747ff1e4d3d9af57dc74a43c | Shell | crawc/libresbc | /build/ansible/roles/netfilter/templates/netfilter.sh | UTF-8 | 1,380 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
reload(){
basedir="{{rundir}}/platform/netfilter"
/usr/sbin/ipset restore < "$basedir/rtpset"
echo "Restored $basedir/rtpset"
/usr/sbin/ipset restore < "$basedir/sipset"
echo "Restored $basedir/sipset"
/sbin/iptables-restore $basedir/rules
echo "Restored $basedir/rules"
}
clear(){
/sbin/iptables -P INPUT ACCEPT
/sbin/iptables -P FORWARD ACCEPT
/sbin/iptables -P OUTPUT ACCEPT
/sbin/iptables -t nat -F
/sbin/iptables -t mangle -F
/sbin/iptables -F
/sbin/iptables -X
/usr/sbin/ipset flush
echo "NetFilter [iptable & ipset] was cleared"
/sbin/iptables -nvL
echo "-----------------------------------------------------------------------------------------------------"
/usr/sbin/ipset list
}
show(){
/sbin/iptables -nvL
echo "-----------------------------------------------------------------------------------------------------"
/usr/sbin/ipset list
}
dump(){
/sbin/iptables-save > iptables.netfilter
/usr/sbin/ipset save > ipset.netfilter
echo "NetFilter (iptables.netfilter & ipset.netfilter) files was dumped in current directory"
}
case "$1" in
reload)
reload
;;
reset)
reset
;;
clear)
clear
;;
show)
show
;;
dump)
dump
;;
*)
echo "Usage: netfilter {reload|reset|dump|show|clean}"
;;
esac
| true |
52c8bb0fa46883941a0c5ed76b745d1c1ca394a2 | Shell | podcastinator/dotfiles | /setup.sh | UTF-8 | 2,252 | 2.53125 | 3 | [] | no_license |
# ---------------------------------------------------------------------
# (Neo)vim
# ---------------------------------------------------------------------
# Install ripgrep for CtrlP
# Install nvim
if [ "$(uname)" == "Darwin" ]; then
brew install ripgrep
brew install neovim
else
sudo apt-get install -y software-properties-common
sudo add-apt-repository ppa:neovim-ppa/stable
sudo apt-get update
sudo apt-get install -y ripgrep
sudo apt-get install -y neovim curl
fi
# download plugin manager for vim
#curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
# https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# download plugin manager for nvim
curl -fLo ~/.config/nvim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
#cp vim/vimrc ~/.vimrc
mkdir -p ~/.config/nvim
mkdir -p ~/.config/nvim/after/ftplugin
cp nvim/init.vim ~/.config/nvim/init.vim
cp nvim/c.vim ~/.config/nvim/after/ftplugin/c.vim
cp nvim/c.vim ~/.config/nvim/after/ftplugin/cpp.vim
# ---------------------------------------------------------------------
# Git
# ---------------------------------------------------------------------
cp git/gitconfig ~/.gitconfig
# ---------------------------------------------------------------------
# Tmux
# ---------------------------------------------------------------------
# install tmux
if [ "$(uname)" == "Darwin" ]; then
brew install tmux
brew install reattach-to-user-namespace
cp tmux/tmux-macos.conf ~/.tmux.conf
else
sudo apt-get install -y tmux
sudo apt-get install -y xsel
cp tmux/tmux-linux.conf ~/.tmux.conf
fi
# download plugin manager for tmux
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
~/.tmux/plugins/tpm/scripts/install_plugins.sh
# ---------------------------------------------------------------------
# Tex
# ---------------------------------------------------------------------
cp tex/latexmkrc ~/.latexmkrc
# ---------------------------------------------------------------------
# Bash
# ---------------------------------------------------------------------
cat bash/bashrc >> ~/.bashrc
cat bash/bash_profile >> ~/.bash_profile
cat bash/inputrc >> ~/.inputrc
| true |
5d385ed129a8cb3f93f4ce1b7a0d3ba06263c24e | Shell | cave-g-f/streaming-graph-partitioning | /containers/interactive/master/scripts/run-driver.sh | UTF-8 | 1,632 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# -lt 4 ]
then
echo "Missing argument, provide 'worker-count (number)', 'thread-count (number)', 'dataset-location (directory)', 'workload (onehop|twohop)', and result-directory (directory)"
exit 2
fi
worker_count=$1
thread_count=$2
dataset_location=$3
workload=$4
result_dir=$5
if [ "$workload" == "twohop" ]; then
echo "2-hop query workload"
conf_path=/sgp/scripts/conf/ldbc-q12-twohop.properties
operation_count=100000
else
echo "1-hop query workload"
conf_path=/sgp/scripts/conf/ldbc-q11-onehop.properties
operation_count=1000000
fi
time_compression_ratio=0.0001
# locator should point to remote-objects.yaml
locator=/sgp/scripts/conf/remote-objects.yaml."$worker_count"
# DO NOT CHANGE
parameters_dir=$dataset_location/substitution_parameters
updates_dir=$dataset_location/social_network
# DO NOT CHANGE Database Implementation
db=ca.uwaterloo.cs.ldbc.interactive.gremlin.GremlinDb
# DO NOT CHANGE jar file for the workload implementation
workload_impl=/sgp/scripts/lib/snb-interactive-gremlin-1.0-SNAPSHOT-jar-with-dependencies.jar
exec java -Djava.util.logging.config.file=logging.properties -cp "/sgp/scripts/lib/jeeves-0.3-SNAPSHOT.jar:src/main/resources:$workload_impl" \
com.ldbc.driver.Client -w com.ldbc.driver.workloads.ldbc.snb.interactive.LdbcSnbInteractiveWorkload -oc $operation_count -P $conf_path \
-p "ldbc.snb.interactive.parameters_dir|$parameters_dir" -p "ldbc.snb.interactive.updates_dir|$updates_dir" -p "locator|$locator" -db $db \
-tc $thread_count -tcr $time_compression_ratio -ignore_scheduled_start_times true -rd $result_dir
| true |
89fecafb260074eb5e06427df925ff73e3380341 | Shell | pong3489/Projet_ARDrone_Track | /ardrone_urbi/bin/umake | UTF-8 | 16,980 | 4 | 4 | [] | no_license | #! /bin/sh
test -f /bin/ksh && test -z "$RUNNING_KSH" \
&& { UNAMES=`uname -s`; test "x$UNAMES" = xULTRIX; } 2>/dev/null \
&& { RUNNING_KSH=true; export RUNNING_KSH; exec /bin/ksh $0 ${1+"$@"}; }
unset RUNNING_KSH
# No failure shall remain unpunished.
set -e
me=$(basename "$0")
medir=$(dirname "$0")
# We have to initialize IFS to space tab newline since we save and
# restore IFS and apparently POSIX allows stupid/broken behavior with
# empty-but-set IFS.
# http://lists.gnu.org/archive/html/automake-patches/2006-05/msg00008.html
# We need space, tab and new line, in precisely that order. And don't
# leave trailing blanks.
space=' '
tab=' '
newline='
'
IFS="$space$tab$newline"
# Pacify verbose cds.
CDPATH=${ZSH_VERSION+.}$path_sep
# In case someone crazy insists on using grep -E.
: ${EGREP=egrep}
debug=false
quiet=false # by default let the tools' message be displayed
verb=false # true for verbose mode
## --------------------- ##
## Auxiliary functions. ##
## --------------------- ##
# In case `local' is not supported by the shell.
(
foo=bar
test_local () {
local foo="foo"
}
test_local
test $foo = bar
) || local () {
case $1 in
*=*) eval "$1";;
esac
}
# stderr LINE1 LINE2...
# ---------------------
# Report some information on stderr.
stderr ()
{
for i
do
echo >&2 "$me: $i"
done
}
# verbose WORD1 WORD2
# -------------------
# Report some verbose information.
verbose ()
{
if $verb; then
stderr "$@"
fi
}
# run COMMAND-LINE
# ----------------
# Run the COMMAND-LINE verbosely, and catching errors as failures.
run ()
{
if $verb; then
first=true
for i
do
if $first; then
stderr "Running: $i"
first=false
else
stderr " : $i"
fi
done
fi
"$@" 1>&5 ||
error 1 "$1 failed"
}
# error EXIT_STATUS LINE1 LINE2...
# --------------------------------
# Report an error and exit with EXIT_STATUS.
error ()
{
local s="$1"
shift
stderr "$@"
exit $s
}
# fata LINE1 LINE2...
# -------------------
# Report an error and exit 1.
fatal ()
{
error 1 "$@"
}
# dirlist_error EXIT_STATUS WHAT WHERE WHICH
# ------------------------------------------
# Report an error and exit with failure if WHICH, of type WHAT
# does not exist in WHERE.
# This function tests only directories
dirlist_error ()
{
local err="$1"
local type="$2"
local base="$3"
local val="$4"
if test ! -d $base/$val; then
stderr "no such $type $val, possible choices are :"
for d in $base/*; do
if test -d $d; then
stderr " - $(basename $d)"
fi
done
exit $err
fi
}
# exist_error EXIT_STATUS WHAT WHERE OPTION
# -----------------------------------------
# Report an error and exit with failure if WHERE is not found
# or is not of type WHAT.
# OPTION indicates which umake option to set for this value.
# This function tests only directories
exist_error ()
{
local err="$1"
local type="$2"
local base="$3"
local option="$4"
local longtype="$2"
case $type in
d) longtype=directory;;
f) longtype=file;;
esac
test "$type" = d -a -n "$base" &&
base=$(dirname $base/.)
if test ! -$type "$base"; then
stderr "no such $longtype $base"
if test -n "$option"; then
stderr " use option --$option to set to an alternative value."
fi
exit $err
fi
}
# Initialize the common set up. Should be done when $debug and
# $quiet are set.
initialize ()
{
# File descriptor usage:
# 0 standard input
# 1 standard output (--verbose messages)
# 2 standard error
# 3 some systems may open it to /dev/tty
# 4 used on the Kubota Titan
# 5 tools output (turned off by --quiet)
# 6 tracing/debugging (set -x output, etc.)
# Main tools' output (TeX, etc.) that TeX users are used to seeing.
#
# If quiet, discard, else redirect to the error flow.
if $quiet; then
exec 5>/dev/null
else
exec 5>&2
fi
# Enable tracing, and auxiliary tools output.
#
# Should be used where you'd typically use /dev/null to throw output
# away. But sometimes it is convenient to see that output (e.g., from
# a grep) to aid debugging. Especially debugging at distance, via the
# user.
if $debug || test x"$VERBOSE" = xx; then
exec 6>&1
set -x
else
exec 6>/dev/null
fi
verbose "$0 running."
}
# append VARIABLE CONTENT [SEPARATOR=' ']
# ---------------------------------------
append ()
{
local var="$1"
local content="$2"
local sep
sep=${3-' '}
eval "$var=\$$var\${$var:+$sep}\$content"
}
usage ()
{
cat <<EOF
Usage: $me [OPTION]... [FILE]...
General options:
-D, --debug turn on shell debugging (set -x)
-h, --help display this help and exit successfully
-q, --quiet no output unless errors
-V, --version display version information and exit successfully
-v, --verbose report on what is done
Compilation options:
--deep-clean remove all building directories
-c, --clean clean building directory before compilation
-j, --jobs=JOBS specify the numbers of commands to run simultaneously
-l, --library produce a library, don't link to a particular core
-s, --shared produce a shared library loadable by any core
-o, --output=output output file name
-C, --core=CORE build type [$core]
-H, --host=HOST destination host [$host]
-m, --disable-automain do not add the main function
--package=PACKAGE use pkg-config to add flags required by PACKAGE
-I<path> pass unchanged to compiler
-L<path> pass unchanged to linker
-l<lib> pass unchanged to linker
Developper options:
-p, --prefix=DIR library file location [$prefix]
-P, --param-mk=FILE param.mk location [$(param_mk)]
-k, --kernel=DIR kernel location [$(kernel)]
Exit codes:
1 some tool failed
2 invalid command line option
3 unknown command line argument
4 unable to find file or directory
FILE may be C/C++ source files, headers, libraries or directory that
will be searched for such files.
Report bugs to sdk-remote-bugs@gostai.com.
EOF
exit 0
}
version ()
{
cat <<\EOF
umake UNDEFINED (Urbi SDK Remote UNDEFINED)
Copyright (C) 2004-2010, Gostai S.A.S.
EOF
exit 0
}
# Return the location of param_mk
param_mk ()
{
if test -n "$param_mk"; then
echo "$param_mk"
else
# If we are building a library, there is no core, so use param.mk
# from the remote core which is always present.
echo "$brandlibdir/${core:-remote}/param.mk"
fi
}
# Return the location of the kernel
kernel ()
{
echo "${kernel+$prefix}"
}
# Clean all build directories.
deep_clean ()
{
if find . -name "${builddir_pref}*" -a -type d | xargs rm -rf; then
verbose "all build directories cleaned"
exit 0
else
fatal "cannot clean build directories"
fi
}
## ---------------------- ##
## Command line parsing. ##
## ---------------------- ##
get_options ()
{
# Push a token among the arguments that will be used to notice when we
# ended options/arguments parsing.
# Use "set dummy ...; shift" rather than 'set - ..." because on
# Solaris set - turns off set -x (but keeps set -e).
# Use ${1+"$@"} rather than "$@" because Digital Unix and Ultrix 4.3
# still expand "$@" to a single argument (the empty string) rather
# than nothing at all.
arg_sep="$$--$$"
set dummy ${1+"$@"} "$arg_sep"; shift
# Parse command line arguments.
while test x"$1" != x"$arg_sep"
do
# Handle --option=value by splitting apart and putting back on argv.
case $1 in
(--*=*)
opt=`echo "$1" | sed -e 's/=.*//'`
val=`echo "$1" | sed -e 's/[^=]*=//'`
shift
set dummy "$opt" "$val" ${1+"$@"}; shift
;;
esac
case $1 in
(-D | --debug ) debug=true;;
(-v | --verbose) verb=true;;
(-h | --help ) usage;;
(-q | --quiet ) quiet=true;;
(-V | --version) version;;
(-l | --library) core= ; shared=false ;;
(-s | --shared | --shared-library)
core= ; shared=true ; automain=false ;;
( --deep-clean) deep_clean ;;
(-c | --clean) clean=true ;;
(-j | --jobs) shift; append "-j $1" " ";;
(-C | --core ) shift; core=$1;;
(-H | --host ) shift; host=$1;;
(-o | --output) shift; target=$1;;
(-m | --disable-automain) automain=false;;
(-p | --prefix) shift; prefix=$1;;
(-P | --param-mk) shift; param_mk=$1;;
(-k | --kernel) shift; kernel=$1;;
(-I*) append EXTRA_CPPFLAGS $1 ;;
(-L*) append EXTRA_LDFLAGS $1 ;;
(-l*) append EXTRA_LDFLAGS $1 ;;
(--package) shift;
pkg-config --print-errors --exists $1
append EXTRA_CPPFLAGS "$(pkg-config --cflags $1)"
append EXTRA_LDFLAGS "$(pkg-config --libs $1)"
;;
(--) # What remains are not options.
shift
while test x"$1" != x"$arg_sep"
do
set dummy ${1+"$@"} "$1"; shift
shift
done
break
;;
(-*)
error 2 "Unknown or ambiguous option \`$1'." \
"Try \`--help' for more information."
;;
(*) set dummy ${1+"$@"} "$1"; shift;;
esac
shift
done
# Pop the token
shift
# Interpret remaining command line args as filenames.
case $#:$oname in
([01]:* | *:);;
(*) error 2 "Can't use option \`--output' with more than one argument.";;
esac
while test x"$1" != x || test $havearg = false
do
if test x"$1" = x && test $havearg = false; then
set dummy . ${1+"$@"}; shift
havearg=true
fi
# If this is a directory, append a slash.
case $1$(test -d "$1" && echo '/') in
(VPATH=*) vpath=$vpath:$(echo "$1" | sed -e 's/^[^=]*=//');;
(EXTRA_CPPFLAGS=*)
append EXTRA_CPPFLAGS "$(echo "$1" | sed -e 's/^[^=]*=//')" ;;
(EXTRA_LDFLAGS=*)
append EXTRA_LDFLAGS "$(echo "$1" | sed -e 's/^[^=]*=//')" ;;
(*=*) append makeargs "'$1'";;
(*.h |*.hh|*.hxx|*.hpp) append headers "'$1'"; havearg=true ;;
(*.cc|*.cpp|*.c|*.C) append sources "'$1'"; havearg=true ;;
(*.a|*.o|*.obj) append libs "'$1'"; havearg=true ;;
(*/)
# It is a directory.
files=$(find "$1" \
-iname '*.h' \
-or -iname '*.hh' \
-or -iname '*.hxx' \
-or -iname '*.hpp' \
-or -iname '*.c' \
-or -iname '*.cc' \
-or -iname '*.cpp' \
-or -iname '*.o' \
-or -iname '*.obj' \
-or -iname '*.a' | grep -Fv "$builddir_pref" ) || true
havearg=true;
shift
set dummy $files ${1+"$@"};;
(*)
error 3 "unknown type of file '$1'"
;;
esac
shift
done
}
## ------ ##
## Main. ##
## ------ ##
: ${DLMODEXT='.so'}
: ${EXEEXT=''}
: ${LIBSFX=''}
clean=false
: ${URBI_ENV='remote'}
core=
havearg=false # we have at least one path or file arg
: ${URBI_HOST='i686-pc-linux-gnu'}
host=$URBI_HOST
# Libraries (and flags) to use when linking a module.
LIBADD=
# Whether building a shared lib/module.
shared=true
builddir=
builddir_pref="_ubuild"
# Make the package relocatable: the urbi-root contains the bin
# directory that contains this tool. Yet, make it absolute. For
# instance because libtool does not want relative rpath, and prefix
# contributes to libdir.
prefix=$(cd $(dirname $0)/.. && pwd)
# Keep the variables in that order, they have dependencies. bindir is
# needed at least on Windows, where libdir is defined as $bindir.
: ${PACKAGE_BRAND="gostai"}
: ${exec_prefix="${prefix}"}
: ${bindir="${exec_prefix}/bin"}
: ${libdir="${exec_prefix}/lib"}
: ${brandlibdir="${libdir}/${PACKAGE_BRAND}"}
# Target name.
target=
libs=
sources=
headers=
makeargs=
automain=true
# Produce objects in the current directory, not in the source tree.
objects=
vpath=.
get_options "$@"
initialize
# Use default main.
if $automain; then
: ${umaindir='${brandsharedir}/umain'}
append sources "$umaindir/umain.cc"
fi
for s in $(eval echo "$sources")
do
append objects "'"$(basename "$s" | sed 's/\.[^.]*$/.o/g')"'"
append vpath $(dirname "$s") :
done
# The library extension.
libext=$DLMODEXT
$shared || libext=.a
# Select target name if unspecified.
case $target:$core in
(:) target=uobject$libext;;
(:*) target=urbiengine-$core$EXEEXT;;
# The user can provide $target with or without extension. Be sure
# to give it the proper extension.
(*:) target=${target%$libext}$libext;;
(*:*) target=${target%$EXEEXT}$EXEEXT;;
esac
# Remove ourselves from library list in lib generation mode, add OUTBIN
# option otherwise.
case $core in
('')
res=
for i in $libs
do
test "$i" = "$target" ||
append res "$i"
done
libs=$res
if $shared; then
append makeargs "OUTSHLIB=$target"
else
append makeargs "OUTLIB=$target"
fi
;;
(*)
append makeargs "OUTBIN=$target"
;;
esac
# When building an UObject as a shared lib, we have to pass
# -no-undefined to libtool, otherwise it refuses to build a DLL. But
# then we have missing symbols: those in libuobject, so we link
# against libuobject.
#
# We don't do that in general (Linux etc.) to avoid keeping an
# explicit rpath dependency on libuobject. It will be found and
# dlopened later by urbi-launch.
if $shared; then
case $host in
(*pw32*|*mingw32*)
# The following does not work:
#
# # Pass the true library, not the *.la file, since it contains
# # hard-coded locations to where the libraries we depend on were
# # installed. In other words, *.la files are not relocatable.
# #
# # This is not an issue in the present case.
# append LIBADD "\${envdir}/libuobject$LIBSFX.dll"
#
# because for some reason libtool then refuses to build a dll.
# So we will probably have to find a means to fix relocatability
# elsewhere.
append LIBADD "-no-undefined \${envdir}/libuobject$LIBSFX.la"
;;
esac
fi
# The tool to link.
# Always honor UMAKE_LINK if defined.
# Then try to find umake-link where it was installed, otherwise in the
# same dir as this tool, or finally, trust the \$PATH.
if test -z "$UMAKE_LINK"; then
for dir in '/prefix/bin' $(dirname "$0")
do
if test -f $dir/umake-link; then
UMAKE_LINK=$dir/umake-link
break;
fi
done
fi
: ${UMAKE_LINK=umake-link}
($UMAKE_LINK --version) >/dev/null 2>&1 ||
fatal "cannot run umake-link: $UMAKE_LINK"
# Define lib to link against.
if test -z "$LIBNAME"; then
case $core in
(''|remote|engine|webots)
UMAKE_LIBNAME=libuobject${LIBSFX};;
(fullengine)
UMAKE_LIBNAME=libuobject${LIBSFX}
UMAKE_EXTRALIB="${libdir}/liburbi$LIBSFX.la ${libdir}/libjpeg4urbi$LIBSFX.la"
core=engine;;
esac
else
UMAKE_LIBNAME=$LIBNAME
fi
# Then pass env.
append makeargs "prefix=$prefix"
verbose \
"LIBADD='$LIBADD'" \
"headers='$headers'" \
"libs='$libs'" \
"makeargs='$makeargs'" \
"objects='$objects'" \
"sources='$sources'" \
"vpath='$vpath'"
# Set and create build dir for temporary files
builddir="$(dirname $target)/_ubuild-$(basename $target)"
libsdir="$(dirname $target)/.libs"
# Clean target build directory
if $clean; then
if rm -rf "$builddir"; then
verbose "build directory cleaned."
else
fatal "cannot remove $builddir"
fi
fi
# Create target build directory
mkdir -p "$builddir"
# Generate object fullnames
obj_fullnames=
for o in $objects; do
obj_fullnames="$obj_fullnames '"${builddir}/$(echo "$o" | tr -d "'")"'"
done
objects=$obj_fullnames
line="\
-------------------------------------------------------------------------------"
bintype="'$core' binary"
if test -z "$core"; then
bintype="library"
fi
verbose "" "$line" "running to build $bintype." "$line" ""
# Check if base directory exists
exist_error 4 d "$prefix" prefix
# Check param.mk file
exist_error 4 f $(param_mk) param-mk
# Invoke make.
if $verb; then
echo >&2 "$(param_mk):"
sed >&2 's/^/> /' $(param_mk)
fi
verbose "invoking make -f $(param_mk) $target"
run eval make -f "$(param_mk)" \
"$target" \
UMAKE_BUILD_DIR="$builddir" \
UMAKE_URBI_ENV="${core:-remote}" \
UMAKE_LIBNAME="$UMAKE_LIBNAME" \
UMAKE_EXTRALIB="\"$UMAKE_EXTRALIB\"" \
UMAKE_LINK="$UMAKE_LINK" \
EXTRA_CPPFLAGS="'$EXTRA_CPPFLAGS'" \
EXTRA_LDFLAGS="'$EXTRA_LDFLAGS'" \
ARGUMENT_LIBS="'$libs'" \
LIBADD="'$LIBADD'" \
HEADERS="'$headers'" \
OBJECTS="'$objects'" \
VPATH="'$vpath'" \
"$makeargs"
verbose "done."
exit 0
# Local variables:
# mode: shell-script
# End:
| true |
cdec3bf2accabd2d7dded420d21ad1c722386b1b | Shell | jralmaraz/terraform-validator | /release.sh | UTF-8 | 2,480 | 3.921875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
if [ $# -eq 0 ]
then
echo "No version supplied. Run \`make release VERSION=X.X\`"
exit 1
fi
version=$1
if ! echo $version | grep -Eq "^[0-9]+\.[0-9]+\.[0-9]+$"
then
echo "Invalid version: ${version}"
echo "Please specify a semantic version with no prefix (e.g. X.X.X)."
exit 1
fi
release_dir="./release/${version}"
rm -rf $release_dir
mkdir -p $release_dir
echo "Go version: $(go version)"
go get github.com/google/go-licenses
echo "Downloading licenses and source code to bundle..."
# Ignore errors until https://github.com/google/go-licenses/pull/77 is merged
set +e
go-licenses save "github.com/GoogleCloudPlatform/terraform-validator" --save_path="./${release_dir}/THIRD_PARTY_NOTICES"
set -e
echo "Zipping licenses and source code..."
pushd "${release_dir}" > /dev/null
zip -rq9D "THIRD_PARTY_NOTICES.zip" "THIRD_PARTY_NOTICES/"
popd > /dev/null
architectures="amd64 arm64"
platforms="linux windows darwin"
skip_platform_arch_pairs=" windows/arm64 "
tar_gz_name=terraform-validator
ldflags="-X github.com/GoogleCloudPlatform/terraform-validator/tfgcv.buildVersion=v${version}"
release_bucket=terraform-validator
# Build release versions
for platform in ${platforms}; do
if [[ "$platform" == "windows" ]]; then
binary_name=terraform-validator.exe
else
binary_name=terraform-validator
fi
for arch in ${architectures}; do
if [[ " ${skip_platform_arch_pairs[@]} " =~ " ${platform}/${arch} " ]]; then
echo "Skipped unsupported platform/arch pair ${platform}/${arch}"
continue
fi
echo "Building ${binary_name} v${version} for platform ${platform} / arch ${arch}..."
GO111MODULE=on GOOS=${platform} GOARCH=${arch} CGO_ENABLED=0 go build -ldflags "${ldflags}" -o "${release_dir}/${binary_name}" .
echo "Creating ${release_dir}/${tar_gz_name}_${platform}_${arch}-${version}.tar.gz"
pushd "${release_dir}" > /dev/null
tar -czf "${tar_gz_name}_${platform}_${arch}-${version}.tar.gz" "${binary_name}" "THIRD_PARTY_NOTICES.zip"
popd > /dev/null
done
done
echo "Creating Github tag ${version}"
git tag "${version}"
git push origin "${version}"
echo "Github tag ${version} created"
# Publish release versions
echo "Pushing releases to Google Storage"
gsutil cp ${release_dir}/*.tar.gz gs://${release_bucket}/releases/v${version}
echo "Releases pushed to Google Storage"
echo "Create a new release by visiting https://github.com/GoogleCloudPlatform/terraform-validator/releases/new?tag=${version}&title=${version}"
| true |
69be960384b3f84a54eac15ceebdb6e5dab8c754 | Shell | plamb/phoenix-dockerfile | /build_sass.sh | UTF-8 | 735 | 2.71875 | 3 | [] | no_license | # Based on https://gist.github.com/edouard-lopez/503d40a5c1a49cf8ae87
# Install dependencies
# these are already included in main dependencies
# apt-get update && apt-get install -y automake libtool build-essential
# Fetch sources
git clone https://github.com/sass/libsass.git
git clone https://github.com/sass/sassc.git libsass/sassc
# Create custom makefiles for **shared library**, for more info read:
# 'Difference between static and shared libraries?' before installing libsass http://stackoverflow.com/q/2649334/802365
cd libsass
autoreconf --force --install
./configure \
--disable-tests \
--enable-shared \
--prefix=/usr
cd ..
# Build and install the library
make -C libsass -j5 install
# cleanup
rm -rf libasss
| true |
93a35045e20b541c1f8b27cd1326e127b1342bb7 | Shell | mehdimu/d43a3 | /postgresql-7.4.13/contrib/reindexdb/reindexdb | UTF-8 | 7,490 | 4.1875 | 4 | [
"MIT",
"PostgreSQL"
] | permissive | #!/bin/sh
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #
# Package : reindexdb Version : $Revision: 1.5 $
# Date : 05/08/2002 Author : Shaun Thomas
# Req : psql, sh, perl, sed Type : Utility
#
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #
# Function Definitions
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #
usage()
{
echo "$CMDNAME reindexes a PostgreSQL database."
echo
echo "Usage:"
echo " $CMDNAME [options] [dbname]"
echo
echo "Options:"
echo " -h, --host=HOSTNAME Database server host"
echo " -p, --port=PORT Database server port"
echo " -U, --username=USERNAME Username to connect as"
echo " -W, --password Prompt for password"
echo " -d, --dbname=DBNAME Database to reindex"
echo " -a, --all Reindex all databases"
echo " -t, --table=TABLE Reindex specific table only"
echo " -i, --index=INDEX Reindex specific index only"
echo " -e, --echo Show the command(s) sent to the backend"
echo " -q, --quiet Don't write any output"
echo
echo "Read the description of the SQL command REINDEX for details."
echo
exit 0
}
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #
# Program Body
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #
CMDNAME=`basename "$0"`
PATHNAME=`echo $0 | sed "s,$CMDNAME\$,,"`
# Try valiantly to get the location of psql, since you can't ever
# really know where it has been placed. We'll start by trying the
# path. If that fails, we'll try the directory where this script
# resides. Then on to whereis, and finally locate. Wish us luck.
if x=`psql -V 2>/dev/null | grep psql`; then
PSQL='psql'
elif [ -f ${PATHNAME}psql ]; then
PSQL=${PATHNAME}psql;
elif x=`whereis -b psql 2>/dev/null | sed 's/.* //'`; then
PSQL=$x
elif x=`locate -r bin/psql$ -n 1 2>/dev/null`; then
PSQL=$x
else
echo "$CMDNAME: Could not find psql to talk to postgres installation."
echo "Please make sure psql is in your path, or that this script is in"
echo "the same directory as psql was installed."
exit 1
fi
# Now, go through all of our command-line options and get each operation
# we said we'd accept in the usage listing.
while [ "$#" -gt 0 ]
do
# Show help.
case "$1" in
--help|-\?)
usage
exit 0
;;
# All of the following are postgres options. We can pass them on
# directly, without interpreting them in any way. We don't care.
# Anything that allows a space, we'll get the next *two* arguments
# and make sure to pass those along.
--host|-h|-p|--port|-U|--username)
PSQLOPT="$PSQLOPT $1 $2"
shift
;;
-h*|--host=*|-p*|--port=*|-U*|--username=*|-W|--password)
PSQLOPT="$PSQLOPT $1"
;;
# From this point on, we're setting options that are required for
# or only valid in This script. This includes which database(s) to
# reindex, which tables, or which indexes, and so on.
# Echoing. We'll *not* use this in queries we use to get lists.
--echo|-e)
ECHOOPT="-e"
;;
# Do not echo messages.
--quiet|-q)
ECHOOPT="-q"
quiet=1
;;
# Reindex all databases, all tables, all applicable indexes.
--all|-a)
alldb=1
;;
# Database to connect to, if not all of them.
--dbname|-d)
dbname="$2"
shift
;;
-d*)
dbname=`echo "$1" | sed 's/^-d/'`
;;
--dbname=*)
dbname=`echo "$1" | sed 's/^--dbname=//'`
;;
# Reindex specific Table. Disables index reindexing.
--table|-t)
table="$2"
shift
;;
-t*)
table=`echo "$1" | sed 's/^-t//'`
;;
--table=*)
table=`echo "$1" | sed 's/^--table=//'`
;;
# Reindex specific index. Disables table reindexing.
--index|-i)
index="$2"
shift
;;
-i*)
index=`echo "$1" | sed 's/^-i//'`
;;
--index=*)
index=`echo "$1" | sed 's/^--index=//'`
;;
# Yeah, no options? Whine, and show usage.
-*)
echo "$CMDNAME: invalid option: $1" 1>&2
usage;
exit 1
;;
# Finally, it's possible that the database name was just the last
# unlabeled option. So, let's get that.
*)
dbname="$1"
;;
esac
shift # Shift off each argument as we loop.
done
# Get a list of all databases we'll be using. This first case is if we
# were asked to do all databases.
if [ "$alldb" ]; then
if [ "$dbname" ] || [ "$index" ] || [ "$table" ]; then
echo "$CMDNAME: cannot reindex all databases and a specific database," 1>&2
echo " table, or index at the same time." 1>&2
exit 1
fi
# Execute a command to pull back all databases the user specified can
# connect to. That's the list we'll be using. It's also why it's
# a good idea for this to be run as a super-user.
sql='SELECT datname FROM pg_database WHERE datallowconn'
dbname=`$PSQL $PSQLOPT -q -t -A -d template1 -c "$sql"`
# Ok, if it's not all databases, make sure at least one database is
# specified before continuing.
elif [ -z "$dbname" ]; then
echo "$CMDNAME: missing required argument: database name" 1>&2
usage;
exit 1
fi
# No. We can't reindex a specific index and table at the same time.
# Complain about this, and move on.
if [ "$table" ] && [ "$index" ]; then
echo "$CMDNAME: cannot reindex a specific table and a specific index" 1>&2
echo "at the same time." 1>&2
exit 1
fi
# If index was selected, reindex that index.
if [ "$index" ]; then
$PSQL $PSQLOPT $ECHOOPT -c "REINDEX INDEX \"$index\"" -d "$dbname"
if [ "$?" -ne 0 ]; then
echo "$CMDNAME: reindex index \"$index\" failed" 1>&2
exit 1
fi
# Ok, no index. Is there a specific table to reindex?
elif [ "$table" ]; then
$PSQL $PSQLOPT $ECHOOPT -c "REINDEX TABLE \"$table\"" -d "$dbname"
if [ "$?" -ne 0 ]; then
echo "$CMDNAME: reindex table \"$table\" failed" 1>&2
exit 1
fi
# No specific table, no specific index, either we have a specific database,
# or were told to do all databases. Do it!
else
# We set IFS to newline only so that the for-loops won't misinterpret
# spaces in the lists we retrieved via psql. Note also the use of
# regclass to handle spaces, mixed-case names, and schema awareness.
sql="SELECT DISTINCT c.oid::pg_catalog.regclass FROM pg_catalog.pg_index x JOIN pg_catalog.pg_class c ON c.oid = x.indrelid JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid WHERE nspname NOT LIKE 'pg\\\\_%'"
IFS='
'
for db in $dbname; do
# Only print which database we're currently reindexing if not in
# quiet mode, and we're doing more than one database.
[ "$alldb" ] && [ -z "$quiet" ] && echo "Reindexing $db"
IFS='
'
# Get a list of non-system tables that have indexes.
tables=`$PSQL $PSQLOPT -q -t -A -d "$db" -c "$sql"`
# Ok, reindex every table in the database.
IFS='
'
for tab in $tables; do
IFS='
'
$PSQL $PSQLOPT $ECHOOPT -c "REINDEX TABLE $tab" -d "$db"
if [ "$?" -ne 0 ]; then
echo "$CMDNAME: reindex table $tab failed" 1>&2
exit 1
fi
IFS='
'
done
done
fi
exit 0
| true |
f15796ae7b3120767c714cdd1f6d4774d142d95f | Shell | gnayuy/janeliafarmresearchcampus | /computer/backup/mark/pipelines/reconstruct_v1.sh | UTF-8 | 2,399 | 2.84375 | 3 | [] | no_license | ANTS=/groups/scicomp/jacsData/yuTest/toolkits/ANTS/ANTS
WARP=/groups/scicomp/jacsData/yuTest/toolkits/ANTS/WarpImageMultiTransform
Vaa3D=/groups/scicomp/jacsData/yuTest/toolkits/Vaa3D/vaa3d
WORKDIR=/groups/scicomp/jacsData/yuTest/markReconstruction/MouseBrain1335038/temp
prefix=/groups/scicomp/jacsData/yuTest/markReconstruction/MouseBrain1335038/temp/section
suffix=.tif
MOV0=${prefix}101_ref_c0.nii
for((i=100; i>0; i--))
do
#time ./ireg -f convertColor2Grayscale -i ${prefix}${i}.tif -o ${prefix}${i}_ref.tif
nn=$((i+1))
n=$i;
FIX=${prefix}${nn}"_ref_c0_warped.nii"
MOV=${prefix}${n}"_ref_c0.nii"
OUTPUT=${prefix}${n}"_ref_c0_warped.nii"
AFFINE=$WORKDIR/affine/mi_$n"_"$nn
time $ANTS 2 -m MI[$FIX,${MOV}, 1, 32 ] -o $AFFINE -i 0 --number-of-affine-iterations 10000x10000x10000x10000 --rigid-affine true
time $WARP 2 ${MOV} ${OUTPUT} -R $FIX ${AFFINE}"Affine.txt"
MOV2="$WORKDIR/workdir/section_1327717_"$((i+1))"_yflip_c2.nii"
OUTPUT2="$WORKDIR/workdir/section_1327717_"$((i+1))"_yflip_c2_warped.nii"
#time $WARP 2 ${MOV} ${OUTPUT} -R $FIX ${AFFINE}"Affine.txt"
#time $WARP 2 ${MOV2} ${OUTPUT2} -R $FIX ${AFFINE}"Affine.txt"
#MOV0="$WORKDIR/workdir/section_1327717_"$((i+1))"_yflip_c0.nii"
#time $Vaa3D -x NiftiImageConverter -i $MOV0 $OUTPUT $OUTPUT2 -o "$WORKDIR/workdir/section_1327717_"$i"_aligned.v3draw" -p "#b 1 #v 1"
#time $Vaa3D -x NiftiImageConverter -i $OUTPUT $OUTPUT2 -o "$WORKDIR/workdir/section_1327717_"$i"_aligned.v3draw" -p "#b 1 #v 1"
OUTTIF1=${OUTPUT%*.nii}.tif
OUTTIF2=${OUTPUT2%*.nii}.tif
#./ireg -f imageReadWrite -i $OUTPUT -o $OUTTIF1
#./ireg -f imageReadWrite -i $OUTPUT2 -o $OUTTIF2
#$Vaa3D -x ireg -f yflip -i $OUTTIF1 -o ${OUTTIF1%*.tif}.v3draw;
#$Vaa3D -x ireg -f yflip -i $OUTTIF2 -o ${OUTTIF2%*.tif}.v3draw;
#time $Vaa3D -x ireg -f mergeColorChannels -i $MOV0 ${OUTTIF1%*.tif}.v3draw ${OUTTIF2%*.tif}.v3draw -o "$WORKDIR/workdir/section_1327717_"$i"_aligned.v3draw"
I1=${prefix}${n}"_c0.nii"
I2=${prefix}${n}"_c1.nii"
I3=${prefix}${n}"_c2.nii"
I1w=${prefix}${n}"_c0_w.nii"
I2w=${prefix}${n}"_c1_w.nii"
I3w=${prefix}${n}"_c2_w.nii"
AFFINE=$WORKDIR/affine/mi_$n"_"$nn"Affine.txt"
time $WARP 2 ${I1} ${I1w} -R $I1 ${AFFINE};
time $WARP 2 ${I2} ${I2w} -R $I1 ${AFFINE};
time $WARP 2 ${I3} ${I3w} -R $I1 ${AFFINE};
I=${prefix}${n}"_aligned.v3draw"
time $Vaa3D -x ireg -f NiftiImageConverter -i $I1w $I2w $I3w -o $I -p "#b 1 #v 1";
done
| true |
08d663828c0c008f6aa888461d4d3c2f65c5492f | Shell | trothr/xmitmsgx | /xmitmivp.sh | UTF-8 | 735 | 3.046875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
#
# Name: xmitmivp.sh (shell script)
# Date: 2023-03-24 (Fri)
# Author: Rick Troth, rogue programmer
#
# This script should be run *after* XMITMSGX is installed.
# It exercises the package header and static library,
# found in the standard Unix/Linux/POSIX locations.
#
cd `dirname "$0"`
if [ ! -f xmitmivp.c -a -f ../src/xmitmivp.c ] ; then cd ../src ; fi
#LANG=en_US.utf8
#LANG=en_US
#LANG=""
#export LANG
cc -o xmitmivp.o -c xmitmivp.c
RC=$? ; if [ $RC -ne 0 ] ; then exit $RC ; fi
cc -o xmitmivp xmitmivp.o -lxmitmsgx
RC=$? ; if [ $RC -ne 0 ] ; then exit $RC ; fi
rm xmitmivp.o
./xmitmivp
RC=$? ; if [ $RC -ne 0 ] ; then exit $RC ; fi
rm xmitmivp
exit
| true |
e4bfa840a61d96e713bfa88499216ae3b4afe03f | Shell | vazovn/galaxy-core-deployment | /filesender_setup/deploy_filesender_galaxy_config.sh | UTF-8 | 3,179 | 3.734375 | 4 | [] | no_license | #!/bin/bash
MYDIR="$(dirname "$(realpath "$0")")"
# source settings
. ${MYDIR}/../settings.sh
# Set the correct paths in the cluster partition for filesender. This operation shall be done here
# as galaxy user, because the nfs mount is root squash. Root can not run these commands on nielshenrik.
# To complete the setup, log into nielshenrik and run
# chown -R apache FILESENDER_STORAGE
echo "==== Entering Galaxy setup for Filesender ====="
echo "deploy_filesender_galaxy_config : Making Filesender storage and logs directories on cluster ... "
mkdir -p ${FILESENDER_STORAGE}/log
mkdir -p ${FILESENDER_STORAGE}/tmp
mkdir -p ${FILESENDER_STORAGE}/files
echo "deploy_filesender_galaxy_config : Making Simlesaml log directory and file on cluster ... "
mkdir -p ${FILESENDER_SIMPLESAML}
touch ${FILESENDER_SIMPLESAML}/simplesamlphp.log
echo "deploy_filesender_galaxy_config : Editing Galaxy configuration files required by Filesender setup..."
function sed_replace {
# TODO check if string contains %
if [ -z "$2" ]; then
echo "Error in replacing of line $1 in $3"
exit 1
fi
if [[ "${2:(-4)}" == "SKIP" ]]; then
echo "$1 not changed"
elif grep --quiet "$1" $3; then
sed -i -E "s%$1%$2%" $3
echo "replaced $1 with $2"
else
echo "Line matching /$1/ not found in $3"
exit 1
fi
}
## edit local_env.sh
if [ -f ${GALAXYTREE}/config/local_env.sh ]; then
echo "local_env.sh found, OK ..."
else
echo "local_env.sh not found, please check before deploying maintenance scripts!!"
exit 1
fi
if [[ -n "${FILESENDERUSER}" && -n "${FILESENDERPASSWORD}" && -n "${FILESENDERHOST}" && -n "${FILESENDERDBNAME}" ]]; then
filesenderdbstring="postgresql://${FILESENDERUSER}:${FILESENDERPASSWORD}@${FILESENDERHOST}/${FILESENDERDBNAME}"
sed_replace '^export FILESENDERDB=.*' "export FILESENDERDB=${filesenderdbstring}" ${GALAXYTREE}/config/local_env.sh
echo "replaced filesender db in local_env.sh"
else
echo "Filesender db settings missing from settings.sh"
fi
if [ -n "${FILESENDER_STORAGE}" ]; then
sed_replace '^export FILESENDER_STORAGE=.*' "export FILESENDER_STORAGE=${FILESENDER_STORAGE}" ${GALAXYTREE}/config/local_env.sh
fi
## edit galaxy.ini
sed_replace '^# webhooks_dir = .*' "webhooks_dir = config/plugins/webhooks/demo" ${GALAXYTREE}/config/galaxy.ini
sed_replace '^#ftp_upload_dir = .*' "ftp_upload_dir = ${GALAXYTREE}/database/ftp/user_upload" ${GALAXYTREE}/config/galaxy.ini
sed_replace '^#ftp_upload_site = .*' "ftp_upload_site = Galaxy FTP Upload site for big files" ${GALAXYTREE}/config/galaxy.ini
sed_replace '^#ftp_upload_dir_identifier = .*' "ftp_upload_dir_identifier = email" ${GALAXYTREE}/config/galaxy.ini
sed_replace '^#ftp_upload_dir_template' "ftp_upload_dir_template" ${GALAXYTREE}/config/galaxy.ini
sed_replace '^#ftp_upload_purge = .*' "ftp_upload_purge = False" ${GALAXYTREE}/config/galaxy.ini
# edit filesender webhook file
sed_replace 'FILESENDER_URL' "${FILESENDER_URL}" ${GALAXYTREE}/config/plugins/webhooks/demo/filesender/config/filesender.yaml
echo "=== Galaxy Filesender configuration files ready. === "
| true |
703682a1deb93c95f65d318a8ac2a15564c1eb51 | Shell | jschwel1/DistributedNetworkCameraCrawler | /rmdirs.sh | UTF-8 | 427 | 3.53125 | 4 | [] | no_license | #!/bin/bash
CONFIG=configs.cfg
IFS="="
CAMERA_CLIENT_PY=camera_client.py
while read -r key val
do
# Trim key and val
key=$(echo "$key" | xargs)
val=$(echo "$val" | xargs)
if [ "$key" = "name" ]
then
echo "Removing $val directory recursively"
rm -r $val
elif [ "$key" = "server" ]
then
echo "Removing $val directory recursively"
rm -r $val
fi
done < $CONFIG
| true |
aa8faaa9062e40d85478ff1f35dbb879d3c63839 | Shell | parsa/ReadyBuildScripts | /python-2.7.9.sh | UTF-8 | 323 | 3.390625 | 3 | [] | no_license | #/bin/bash
if [ ! -d $LOCAL_PATH ]; then
echo "Install directory doesn't exist"
exit 1
fi
mkdir -p python/tmp
pushd python/
wget https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz
tar xf Python-2.7.9.tgz
pushd tmp
../Python-2.7.9/configure --enable-shared --prefix=$LOCAL_PATH
make $LOCAL_MAKEFLAGS install
| true |
7645a5fc0cda48113db0c037a305a174af9f2281 | Shell | richardTowers/kh3dev | /scripts/greetRobot | UTF-8 | 205 | 3.1875 | 3 | [] | no_license | #!/bin/bash
#Sets up the computer's usb port to have the IP address 192.168.1.1
if sudo ifconfig usb0 192.168.1.1; then echo "usb0s IP set to 192.168.1.1"
else echo "Failed to configure usb0"
exit 1
fi
| true |
10b62bde7405f7910aec0ab99929aeda4fc11c03 | Shell | sayan2407/Task | /task2.sh | UTF-8 | 422 | 3.75 | 4 | [] | no_license | #! /usr/bin/bash
function isPrime()
{
num=$1
b=1
for (( i=2;i<=$(( num/2 ));i++ ))
do
if [ $(( num%i )) -eq 0 ]
then
b=0
break
fi
done
if [ $b -eq 1 ]
then
echo 1
else
echo 0
fi
}
i=0
valid=1
while [ $valid -gt 0 ]
do
((i++))
num=$(( RANDOM*RANDOM%10000 ))
check="$( isPrime $num )"
if [ $check -eq 1 ]
then
break
fi
done
echo "$num is prime"
echo "Number of times : $i"
| true |
b0c5b79f9042dfa8dd6d3fe7d4590665a0bfd1fd | Shell | GefenOnline/kube-noah | /app/modules/utils.sh | UTF-8 | 238 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# ------
# Utils
# ------
function logPrefix() {
local dateFormat=$(date +"%d/%M/%Y %T")
if [ ${FUNCNAME[1]} == 'source' ]; then
echo ${dateFormat}
else
echo ${dateFormat} [${FUNCNAME[1]}]
fi
}
| true |
3de02c506ec333e7aa2ad4571eb818dda8c549da | Shell | NotKit/archlinux-nemo | /mer-qt5-qtsystems/PKGBUILD | UTF-8 | 697 | 2.515625 | 3 | [] | no_license | # Maintainer: TheKit <nekit1000 at gmail.com>
pkgname=qtsystems-git
pkgver=mer/5.1.0+git9.r2.gfd76cbe
pkgrel=1
pkgdesc="Mer Qt System modules"
arch=('i686' 'x86_64' 'aarch64')
url="https://git.merproject.org/mer-core/qtsystems"
license=('GPL')
depends=('qt5-base')
makedepends=('git' 'cor' 'tut')
provides=("${pkgname%-git}")
conflicts=("${pkgname%-git}")
source=('git+https://git.merproject.org/mer-core/qtsystems.git')
md5sums=('SKIP')
pkgver() {
cd "$srcdir/${pkgname%-git}"
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "$srcdir/${pkgname%-git}/${pkgname%-git}"
qmake PREFIX=/usr
make
}
package() {
cd "$srcdir/${pkgname%-git}"
make DESTDIR="$pkgdir/" install
}
| true |
1d312897e9dce3d492ffdb6e0d8db8afbd466d0c | Shell | sgomezsaez/Nefolog_Docker | /init_db.sh | UTF-8 | 620 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
sudo -u postgres psql --username $POSTGRES_USER -d postgres -c "alter user postgres with password '$POSTGRES_PSSW'";
psql -f ./Postgres/databases/createUser.sql --username "$POSTGRES_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
CREATE DATABASE PGCloudmigration;
GRANT ALL PRIVILEGES ON DATABASE PGCloudmigration TO $POSTGRES_USER;
EOSQL
echo "Creating all required databases for Nefolog"
psql -f /Postgres/databases/CreatePGCloudmigration.sql --username "$POSTGRES_USER"
psql -f /Postgres/databases/PGCloudmigrationDBv2.sql --username "$POSTGRES_USER" -d PGCloudmigration
| true |
c3c0919841023d47268c568f6bc87fcb48a8213f | Shell | alexkon/jni-test | /src/main/resources/commands.sh | UTF-8 | 570 | 2.625 | 3 | [] | no_license | #!/bin/bash
cd ../java
# compile java class
javac JniTest.java
# make header file JniTest.h
javah -jni JniTest
# make implementation of JniTest.h -> JniTest.cpp (already present)
# compile implementation of JniTest.h
g++ "-I/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers" -c JniTest.cpp
# package compiled cpp class into jni library
g++ -dynamiclib -o ../resources/libJniTest.jnilib JniTest.o
# run the program
java -Djava.library.path=../resources JniTest
# remove classes
rm -rf *.class JniTest.h JniTest.o ../resources/libJniTest.jnilib | true |
50876f8555be69cc42c3ad007e0a3abc8230c227 | Shell | FrankPetrilli/PersonalProjects | /other/vim/autolink.sh | UTF-8 | 141 | 2.5625 | 3 | [] | no_license | #!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
rm ~/.vimrc
ln -s $DIR/vimrc ~/.vimrc
rm -r ~/.vim
ln -s $DIR/vim ~/.vim
| true |
6fe1619e62dbace6a5389719b1b6e2f04d60324b | Shell | WisdomWolf/dotFiles | /.config/yadm/bootstrap.d/install_pyenv_prereqs.sh | UTF-8 | 613 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env bash
OS=$(uname -s)
if [[ "$OS" = "Linux" ]]; then
ID=$(cat /etc/os-release | grep ID_LIKE | cut -d\= -f 2)
if [[ "$ID" = "arch" ]]; then
sudo pacman -S --needed --noconfirm base-devel openssl zlib xz
elif [[ "$ID" = "debian" ]]; then
sudo apt update; sudo apt install -y make build-essential libssl-dev zlib1g-dev \
libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev
fi
elif [[ "$OS" = "Darwin" ]]; then
brew install openssl readline sqlite3 xz zlib
fi
| true |
ac730fca3f5550aa4362e1bdb1bc26f8a73b52b4 | Shell | DavidAllio/L1---Tp-Bash | /TP1/ligpa.sh | UTF-8 | 277 | 3.625 | 4 | [] | no_license | #!/bin/bash
echo -n "Entrer nom du fichier:"
read nom
if ! test -r "$nom"
then
echo "Erreur lecture fichier" > /dev/stderr
exit 1
fi
parite=impaire
while read ligne
do
if test $parite = paire
then
echo "$ligne"
parite=impaire
else
parite=paire
fi
done < "$nom"
| true |
b8453f5404d9c5256700d252e726822b5725a3c2 | Shell | casoe/toolbox | /bin/bcs/monlist-import.sh | UTF-8 | 387 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# Name : monlist-import.sh
# Autor: Carsten Söhrens
FULLDATE=`date +\%Y-\%m-\%d`
WORKDIR=/opt/projektron/bcs/monlist
FULLDATE=`date +\%Y-\%m-\%d`
FILE=$1
# Wechsel in das Arbeitsverzeichnis
cd $WORKDIR
# Check ob ein Inputfile vorhanden ist
if [ -z $1 ]
then
echo Import-File fehlt
exit
fi
# Import der Daten
monapi import $FILE 2>&1 |tee -a $WORKDIR/log/import_$FULLDATE.log
| true |
9f6835366c4b0cb390ec1ffb243f5c02694d4f2e | Shell | manojiksula/nabla-servers-bower-sample | /docker-build.sh | UTF-8 | 3,034 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#set -xv
#set -u
set -e
WORKING_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck source=/dev/null
source "${WORKING_DIR}/step-0-color.sh"
#mkdir ./target/ || true
#cp ${WORKING_DIR}/target/test.war ./target/
# shellcheck disable=SC2154
echo -e "${magenta} Building TEST runtime image ${NC}"
# shellcheck disable=SC2154
echo -e "${green} pip install docker-compose==1.22.0 ${NC}"
if [ -n "${DOCKER_BUILD_ARGS}" ]; then
# shellcheck disable=SC2154
echo -e "${green} DOCKER_BUILD_ARGS is defined ${happy_smiley} : ${DOCKER_BUILD_ARGS} ${NC}"
else
# shellcheck disable=SC2154
echo -e "${red} ${double_arrow} Undefined build parameter ${head_skull} : DOCKER_BUILD_ARGS, use the default one ${NC}"
export DOCKER_BUILD_ARGS="--pull"
#export DOCKER_BUILD_ARGS="--build-arg --no-cache"
echo -e "${magenta} DOCKER_BUILD_ARGS : ${DOCKER_BUILD_ARGS} ${NC}"
fi
readonly DOCKER_REGISTRY=${DOCKER_REGISTRY:-"https://hub.docker.com/"}
readonly DOCKER_ORGANISATION=${DOCKER_ORGANISATION:-"nabla"}
readonly DOCKER_USERNAME=${DOCKER_USERNAME:-"nabla"}
readonly DOCKER_NAME=${DOCKER_NAME:-"nabla-servers-bower-sample"}
readonly DOCKER_TAG=${DOCKER_TAG:-"latest"}
#docker build --target builder .
#docker build --target builder -t aandrieu/test:latest .
#docker build --target runner .
echo -e "${green} Building docker image ${NC}"
echo -e "${magenta} time docker build ${DOCKER_BUILD_ARGS} -f ${WORKING_DIR}/Dockerfile -t \"${DOCKER_ORGANISATION}/${DOCKER_NAME}\" . --tag \"$DOCKER_TAG\" ${NC}"
time docker build ${DOCKER_BUILD_ARGS} -f "${WORKING_DIR}/Dockerfile" -t "${DOCKER_ORGANISATION}/${DOCKER_NAME}" . --tag "${DOCKER_TAG}"
RC=$?
if [ ${RC} -ne 0 ]; then
echo ""
# shellcheck disable=SC2154
echo -e "${red} ${head_skull} Sorry, build failed. ${NC}"
exit 1
else
echo -e "${green} The build completed successfully. ${NC}"
fi
echo -e ""
echo -e "${green} This image is a trusted docker Image. ${happy_smiley} ${NC}"
echo -e ""
echo -e "To push it"
echo -e " docker login ${DOCKER_REGISTRY} --username ${DOCKER_USERNAME} --password password"
echo -e " docker tag ${DOCKER_ORGANISATION}/${DOCKER_NAME}:latest ${DOCKER_REGISTRY}${DOCKER_ORGANISATION}/${DOCKER_NAME}:${DOCKER_TAG}"
echo -e " docker push ${DOCKER_REGISTRY}${DOCKER_ORGANISATION}/${DOCKER_NAME}"
echo -e ""
echo -e "To pull it"
echo -e " docker pull ${DOCKER_REGISTRY}${DOCKER_ORGANISATION}/${DOCKER_NAME}:${DOCKER_TAG}"
echo -e ""
echo -e "To use this docker:"
echo -e " docker run -d -P ${DOCKER_ORGANISATION}/${DOCKER_NAME}"
echo -e " - to attach your container directly to the host's network interfaces"
echo -e " docker run --net host -d -P ${DOCKER_ORGANISATION}/${DOCKER_NAME}"
echo -e ""
echo -e "To run in interactive mode for debug:"
echo -e " docker run -it --entrypoint /bin/bash ${DOCKER_ORGANISATION}/${DOCKER_NAME}:latest"
echo -e " docker run -it -d --name sandbox ${DOCKER_ORGANISATION}/${DOCKER_NAME}:latest"
echo -e " docker exec -it sandbox /bin/bash"
echo -e ""
exit 0
| true |
07f453d1a43c9e076950fb131485ac68c98e27b5 | Shell | huebn090/camera-trap-data-pipeline | /machine_learning/commands_flatten_ml.sh | UTF-8 | 1,451 | 3.0625 | 3 | [] | no_license | #!/bin/bash
export SITE=SER
export SEASON=SER_S15F
echo "This is where you are in the file system ${pwd}, we need to move to the directory that the python script requires"
PYTHONPATH="${PYTHONPATH}:/panfs/roc/groups/5/packerc/huebn090/camera-trap-data-pipeline"
export PYTHONPATH
cd "/panfs/roc/groups/5/packerc/huebn090/camera-trap-data-pipeline"
echo "Current dir:"
pwd
echo "If you want to install a specific package, you can do it in this file (i.e. python -m pip install <package> --user). Or, you can run (module load singularity), then (singularity run -H $(pwd) camera-trap-classifier-latest-cpu.simg), then (i.e. python -m pip install <package> --user)"
# python -m pip install pillow --user
echo "Running script..."
echo "trying faulty import"
python -c "from r.r.r import r"
echo "now trying actual import"
python -c "from utils.utils import set_file_permission" && echo "import successful"
echo "now trying to run main script"
python3 -m machine_learning.flatten_ml_predictions \
--predictions_empty /home/packerc/shared/zooniverse/MachineLearning/${SITE}/${SEASON}_predictions_empty_or_not.json \
--predictions_species /home/packerc/shared/zooniverse/MachineLearning/${SITE}/${SEASON}_predictions_species.json \
--output_csv /home/packerc/shared/zooniverse/MachineLearning/${SITE}/${SEASON}_ml_preds_flat.csv \
--log_dir /home/packerc/shared/zooniverse/MachineLearning/${SITE}/log_files/ \
--log_filename ${SEASON}_flatten_ml_predictions | true |
1a7d6dcfce80137b1127a7fa7600e3524a71e998 | Shell | SDN-Security/TENNISON | /tools/dev/old-topos/SimpleTopology/components/avgcpu.sh | UTF-8 | 117 | 2.671875 | 3 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | i="0"
while [ $i -lt $3 ];
do (ps -C $1 -o pcpu,pmem | tail -n1 | grep -v CPU ) >> $2;
sleep 1;
i=$((i+1))
done;
| true |
ee17cbea23aff3ba1c0f3027bd1e91980ac744b1 | Shell | kawashima-ryosuke/APE | /setup.sh | UTF-8 | 741 | 3.375 | 3 | [] | no_license | #!/bin/bash
echo "**************************************************************"
echo "This shellscript setups for APE_macro"
echo "**************************************************************"
echo "This file write alias in ~/.bashrc"
read -p "OK? [y/n]: " choice
if [ "$choice" = "y" ]; then
touch ~/.bashrc
echo '# alias setting for APE macro' >> ~/.bashrc
echo 'alias maketmp="source /Users/ryousuke/GitHub/APE/temp/temp.sh"' >> ~/.bashrc
echo 'alias epp_temp="echo /Users/ryousuke/GitHub/APE/temp"' >> ~/.bashrc
echo '' >> ~/.bashrc
echo "**************************************************************"
echo "FINIFHED !"
echo "**************************************************************"
else
echo "SELL SCRIPT IS STOPPED"
fi
| true |
d9b9e542b2927b8fc72718d2510f217ccb6d599a | Shell | erikarvstedt/modules-path-dev | /lib.sh | UTF-8 | 820 | 3.6875 | 4 | [] | no_license | export rootDir=$PWD
export buildDir=$rootDir/build
# Extract glibc source of erikarvstedt/nixpkgs/add-modules-path
# to ./glibc-2.33/
createSrc() {(
set -eo pipefail
unpackPhase
cd "${sourceRoot:-.}"
git init
git add .
git commit -m "init at glibc-2.33" >/dev/null
patchPhase || true
git add .
git commit -m $'apply nixpkgs patches\n\nApply patchPhase of pkgs.glibc'
)}
# Incrementally build glibc to ./build with debugging enabled.
# This takes <10 min on a desktop system
build() {(
set -eo pipefail
sourceRoot=glibc-2.33
if [[ ! -e $sourceRoot ]]; then
createSrc
fi
if [[ ! -e build/ ]]; then
(cd $sourceRoot; configurePhase)
fi
(cd build; buildPhase)
)}
gccCustomGlibc() {
gcc -Wl,--rpath="$buildDir" \
-Wl,--dynamic-linker="$buildDir/elf/ld.so" \
"$@"
}
| true |
ad394bfa3f558c3574d532df90ad1e021b367b8e | Shell | jeageun/AOS_LAB | /AOS1/perf_measure.sh | UTF-8 | 1,466 | 2.5625 | 3 | [] | no_license | for opt_random_access in 0 1
do
for ANON in 0 1
do
for POPULATE in 0 1
do
for SHARED in 1
do
for MSET in 0 1
do
rm perf.o
CFLAGS="-DMMAP_ALLOC -DFORK "
if [ $ANON -eq 1 ]
then
CFLAGS=$CFLAGS"-DANON "
fi
if [ $POPULATE -eq 1 ]
then
CFLAGS=$CFLAGS"-DPOPULATE "
fi
if [ $SHARED -eq 1 ]
then
CFLAGS=$CFLAGS"-DSHARED "
fi
if [ $MSET -eq 1 ]
then
CFLAGS=$CFLAGS"-DMSET "
fi
if [ $opt_random_access -eq 1 ]
then
CFLAGS=$CFLAGS"-Dopt_random_access=1 "
else
CFLAGS=$CFLAGS"-Dopt_random_access=0 "
fi
echo $CFLAGS
gcc $CFLAGS open_read_perf.c -o perf.o
for j in {1..5}
do
echo $j
./perf.o >> output.txt
sleep 2
done
done
done
done
done
done
| true |
ef7f90110bbbaae8c19378c4765cc94ed49ef271 | Shell | Azure-Samples/gaming-lamp | /azurecli/bash/4-create-golden-image.sh | UTF-8 | 1,483 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#############################################################################################
# Ensure you have logged in to Azure with your credentials prior to running this script
# az login
# Ensure that you have the Azure subscription ID, it should show up after you have logged in and it has the format:
# "id": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
# Ensure that you have installed in the virtual machine all you need prior to creating the image
#############################################################################################
#############################################################################################
# General variables used in the different Azure CLI commands run from this script
export YOURSUBSCRIPTIONID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
export RESOURCEGROUPNAME=myResourceGroup
export REGIONNAME=japanwest
# Variables for creating the golden image
export VMNAME=myVirtualMachine
export GOLDENIMAGENAME=myGoldenImage
#############################################################################################
#############################################################################################
# Connect to Azure
az login
# Set the Azure subscription
az account set \
--subscription $YOURSUBSCRIPTIONID
echo Creating the golden image named $GOLDENIMAGENAME using $VMNAME as a source
az image create \
--resource-group $RESOURCEGROUPNAME \
--source $VMNAME \
--name $GOLDENIMAGENAME \
--os-type Linux | true |
6e3c8db9c8a393ecec54e72396b81902d853f2d0 | Shell | wwjiang007/yugabyte-db | /yugabyted-ui/apiserver/scripts/openapi_bundle.sh | UTF-8 | 362 | 3.078125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"OpenSSL"
] | permissive | #!/usr/bin/env bash
export NPM_BIN=`npm bin -g 2>/dev/null`
pushd ../conf/openapi
echo "Processing paths component in openapi ..."
pushd paths
rm -rf _index.yaml
yq eval-all '. as $item ireduce ({}; . * $item )' $(ls -r *.yaml) > _index.yaml
popd
echo "Running bundle on openapi spec ..."
$NPM_BIN/openapi bundle ./openapi.yaml --output ../openapi.yml
popd
| true |
f83f6a913be8d198dce12cb45b8fa767c187a516 | Shell | toastyrye/textbank | /install.sh | UTF-8 | 534 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env bash
cd
mkdir -p textbank
cd textbank
curl -O https://raw.githubusercontent.com/toastyrye/textbank/master/messagessend.py
curl -O https://raw.githubusercontent.com/toastyrye/textbank/master/sendMessage.applescript
curl -O https://raw.githubusercontent.com/toastyrye/textbank/master/body.txt
curl -O https://raw.githubusercontent.com/toastyrye/textbank/master/contacts.csv
if ! command -v textbank &> /dev/null; then
cat <<EOF >> ~/.bash_profile
textbank() {(
cd ~/textbank && python messagessend.py
)}
EOF
fi
| true |
799e40db706c333145db891ea86e3a04e77d0c77 | Shell | twobit/spreadsheet | /docs.sh | UTF-8 | 2,395 | 4.34375 | 4 | [] | no_license | #!/bin/bash
# Documentation generation script. Accepts directory as input, and uses bash tools to pull doc strings and generate a
# markdown file. Resulting file is a little rough, but I prefer this to using a large build system.
#
# Usage: ./doc.sh src/Formulas
SRC_DIRECTORY=$1
CURRENT_BLOCK=""
BLOCK_MARKER="\`\`\`"
CAPTURE_NEXT_FUNCTION_NAME=""
FINALIZED_BLOCK=""
DOCUMENTATION_FILE=DOCS.md
function start_block() {
LINE="$1"
CURRENT_BLOCK=""
add_to_block "$LINE"
}
function add_to_block() {
LINE="$1"
LINE=$(echo "$LINE" | sed 's/\*\**//g' | sed 's/^\s*\/\s*//g' | sed 's/\s*\/\s*$//g' | sed "s/^[ \s]*//")
CURRENT_BLOCK="${CURRENT_BLOCK}\n""$LINE"
}
function process_line() {
LINE=$1
if [[ $LINE =~ \s*\/\*\*$ ]]
then
# If this line matches the opening of a doc comment, start block
start_block "$LINE"
elif [[ $LINE =~ \s*\*\/$ ]]
then
# If this line matches the end of a doc comment, end block
add_to_block "$LINE"
CAPTURE_NEXT_FUNCTION_NAME="TRUE"
elif [[ $LINE =~ \s*\*.*$ ]]
then
# If this line starts with a *, and ends with anything, it's inside a comment.
add_to_block "$LINE"
else
if [[ -n "$CAPTURE_NEXT_FUNCTION_NAME" ]]
then
CAPTURE_NEXT_FUNCTION_NAME=""
# Take the current block, strip line endings where necessary.
CURRENT_BLOCK=$(printf "$CURRENT_BLOCK" | paste -sd " " - | sed -e $'s/@/\\\n@/g')
# Grab the function name
FUNCTION_NAME=$(echo "$LINE" | grep -oE '[A-Z0-9_\$]{1,14}\s')
# Build the finalized block
FINALIZED_BLOCK="\n### ${FUNCTION_NAME}\n${FINALIZED_BLOCK}"
FINALIZED_BLOCK=$FINALIZED_BLOCK$(printf "\n$BLOCK_MARKER\n$CURRENT_BLOCK\n$BLOCK_MARKER")
# Write block
echo "Writing formula documentation for: $FUNCTION_NAME"
printf "$FINALIZED_BLOCK\n" >> $DOCUMENTATION_FILE
FINALIZED_BLOCK=""
fi
fi
}
function parse_file() {
echo "Writing documentation for file: $FILE"
FILE=$1
# Write the file name to the documentation file
FILE_TITLE=$(basename $FILE .ts)
printf "## $FILE_TITLE\n\n" >> $DOCUMENTATION_FILE
# Loop through every line in file.
while IFS= read -r p; do
process_line "$p"
done < "$FILE"
}
# Write the header of the documentation
printf "# Documentation\n\n" > $DOCUMENTATION_FILE
for f in $(ls $SRC_DIRECTORY/*.ts | grep -v AllFormulas)
do
parse_file "$f"
done
| true |
cf31d8f501653b6e5a2a831f4a23ca9cd21451e3 | Shell | ExEiS09/MongoDB_compose | /MONGO/run.sh | UTF-8 | 3,128 | 3.578125 | 4 | [] | no_license | #!/bin/bash
if [[ -z "$CLUSTER" ]]
then
echo "Please enter the cluster node names with comma-separated key-value"
exit 1
fi
if [[ -z "$ADMIN_NAME" || -z "$ADMIN_PASS" ]]
then
echo 'Specify the init administrator DB and password with ADMIN_NAME and ADMIN_PASS'
exit 2
fi
if [[ -z "$RS_NAME" ]]
then
echo 'Specify the replicaset RS_NAME!'
exit 3
fi
if [[ -z "$BASE_NAME" || -z "$BASE_USER" || -z "$BASE_PASSWORD" ]]
then
echo 'Specify the base name, base user and base password with BASE_NAME BASE_USER and BASE_PASSWORD'
exit 4
fi
if [[ -z "$CONTAINER_NAME" ]]
then
echo 'Specify the CONTAINER_NAME as environment variable'
exit 5
fi
pstst=$BASE_PASSWORD
psw=$ADMIN_PASS
sed -i "s/rs0/$RS_NAME/g" /etc/mongo/mongod.conf
#### Create admin user through mongo <
admincreate=$(cat <<EOF
db.createUser(
{
user: "$ADMIN_NAME",
pwd: "$ADMIN_PASS",
roles: [
{ role: "root", db: "admin" }
]
}
)
EOF
)
### Create base user through mongo <
usercreate=$(cat <<EOF
db.createUser(
{
user: "$BASE_USER",
pwd: "$BASE_PASSWORD",
roles: [
{ role: "readWrite", db: "$BASE_NAME" },
{ role: "read", db: "reporting" }
]
}
)
EOF
)
CONTAINER_BASE_DIR="/media/data/mongodb"
if [ -z "$LOG_DIR" ]; then
LOG_DIR=${CONTAINER_BASE_DIR}/logs
fi
if [ -z "$DATA_DIR" ]; then
DATA_DIR=${CONTAINER_BASE_DIR}/mongodb
fi
export LOG_DIR
export DATA_DIR
# Log dir create if not exist
[ ! -d "$LOG_DIR" ] && mkdir -p $LOG_DIR
# Data dir create if not exist
[ ! -d "$DATA_DIR" ] && mkdir -p $DATA_DIR
### Initialize non-secured cluster
/usr/bin/mongod --dbpath $DATA_DIR --logpath $LOG_DIR/mongo.log --pidfilepath /tmp/mongo.pid --bind_ip 0.0.0.0 --fork
rootdb='admin'
userdb=$BASE_NAME
echo $admincreate > admin.js
echo $usercreate > user.js
mongo $rootdb < admin.js
echo 'use $BASE_NAME' > db.js
mongo < db.js
mongo $userdb < user.js
pkill mongod
### We need this, 'cause not all instances can be up and running in 10 seconds :(
sleep 30
### Initialize secured cluster...
/usr/bin/mongod --config /etc/mongo/mongod.conf --fork
sleep 30
### Initialize ReplicaSet
### We need this section because we need to initiate replica set only on ONE node. And we need the environment variable that contain ankor for choosing the only one master.
if [[ $(echo $CONTAINER_NAME) == *'1'* ]] || [[ $(echo $CONTAINER_NAME) == *'master'* ]]
then
mongo admin -u ${ADMIN_NAME} -p ${ADMIN_PASS} --eval "rs.initiate()"
fi
for i in $(echo "${CLUSTER[@]}" | sed "s/,/ /g")
do
mongo admin -u ${ADMIN_NAME} -p ${ADMIN_PASS} --eval "rs.slaveOk()"
mongo admin -u ${ADMIN_NAME} -p ${ADMIN_PASS} --eval "rs.add( { host: \"${i}:27017\" } )"
#, priority: 0, votes: 0
done
### Delete all init JS....
rm -rf user.js admin.js db.js
### Klling instance one more time with sleep...
pkill mongod
sleep 30
### And finally start with initiated cluster and RS, in theory...
sed -i '/processManagement/d' /etc/mongo/mongod.conf
sed -i '/fork/d' /etc/mongo/mongod.conf
sed -i '/pidFilePath/d' /etc/mongo/mongod.conf
/usr/bin/mongod --config /etc/mongo/mongod.conf
| true |
db5731125de88077b73058475786b1f084fa565b | Shell | ajbergh/create_test_data | /create_empty_data.sh | UTF-8 | 2,435 | 4.25 | 4 | [] | no_license | #!/bin/bash
#########################################################################################
# Veeam Create Empty Data Script #
# (c)2019 adam.bergh@veeam.com #
# Usage: create_empty_data.sh <Top Dirs> <Sub Dirs> <Files per Dir> #
# This script requires GNU Parallel in be installed "apt-get install parallel" #
# This script uses touch to create empty data files #
# #
# #
#########################################################################################
if [ -z $1 ] || [ -z $3 ]
then
echo "create_empty_data.sh (c) 2019 Veeam Software adam.bergh@veeam.com"
echo ""
echo "Usage: create_empty_data.sh <Count of Top Directories> <Count of Sub Dirs in Each Top Dir> <Number of Files in Each Dir>"
echo "Example: create_empty_data.sh.sh 5 10 10"
echo "This would create 5 top level directories and 10 sub directories in each top level directory and 10 files in each"
exit
fi
rootdirs=$1
subdirs=$2
numfiles=$3
#filesize=$4
echo "You are about to create $rootdirs top level directories with $subdirs sub-directories in each one."
echo "Each folder will contain $numfiles files - Is this correct?"
read -p "Press [Enter] key to start, otherwise hit ctrl+c..."
echo "Starting data creation at $(date +%y/%m/%d) at $(date +%H:%M:%S). Please Wait...."
#Capture Start Time of Copy
start=`date +%s`
string1=""
####THIS LOOP CREATES NESTED DIRECTORY TREE######
for (( n=1; n<=$rootdirs; n++ ))
do
string1=""
string2=""
string2="folder$n-1"
for (( p=2; p<=$subdirs; p++ ))
do
string1="folder$n-$p"
string2=$string2\/$string1
done
#echo $string2
mkdir -p $string2
done
################################################3
find . -type d > dirs.txt #put directory tree into a text file for use in the below loop
#CREATE EMPTY FILES WITH TOUCH ##################################
while read dir;
do
seq -w 0 $numfiles | parallel touch $dir\/file{}.dat
printf ". "
done <dirs.txt
###########################################################
echo ""
rm dirs.txt
#Capture end time of script
end=`date +%s`
runtime=$((end-start))
echo "#########################################################################################"
echo "Done! Copy finished at $(date +%y/%m/%d) at $(date +%H:%M:%S) - Thanks for using Veeam!"
echo "It took $runtime seconds to complete this job"
echo "#########################################################################################"
| true |
170822ac0a0b594358ba04c91260b6a3eb794d8c | Shell | diegour1/javaprojects | /n3_ParqueEmpresarial/bin/mac/buildTest.sh | UTF-8 | 1,261 | 2.9375 | 3 | [] | no_license | #!/bin/sh
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Universidad de los Andes (Bogot� - Colombia)
# Departamento de Ingenier�a de Sistemas y Computaci�n
# Licenciado bajo el esquema Academic Free License version 2.1
#
# Proyecto Cupi2 (http://cupi2.uniandes.edu.co)
# Ejercicio: n3_parqueEmpresarial
# Autor: Equipo Cupi2 2015
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stty -echo
# ---------------------------------------------------------
# Asegura la creaci�n de los directorios classes y lib en test
# ---------------------------------------------------------
cd ../../test
mkdir classes
mkdir lib
# ---------------------------------------------------------
# Compila las clases del directotio test/source
# ---------------------------------------------------------
cd source
javac -nowarn -classpath ../../lib/parqueEmpresarial.jar;../lib/junit.jar -d ../classes/ uniandes/cupi2/parqueEmpresarial/test/*.java
# ---------------------------------------------------------
# Crea el archivo jar a partir de los archivos compilados
# ---------------------------------------------------------
cd ../classes
jar cf ../lib/parqueEmpresarialTest.jar uniandes/* -C ../data .
cd ../../bin
stty echo | true |
e3e59d6a263d8f53607395753cc98568d60343a7 | Shell | stuvusIT/dirty_scripts | /test_io.sh | UTF-8 | 2,471 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env bash
[ $USER != "root" ] && { sudo $0 $@; exit $?; }
BASE_DIR="$( dirname "$( realpath -s "${BASH_SOURCE[0]}" )" )/"
cd $BASE_DIR
set -e
set -u
set -o pipefail
function LOGIT {
echo -e -n "\e[1;33m$*\e[0m"
}
function TEST {
local t_start=$(date +%s)
$*
wait
local t_end=$(date +%s)
local t_dur=$((t_end - t_start))
LOGIT "\e[32m$t_dur seconds!\n"
}
function TEST_100M {
dd if=/tmp/test.bin.img of=/io_test/test.bin.img bs=1M count=100 conv=fsync iflag=fullblock,sync oflag=sync &>/dev/null
dd if=/io_test/test.bin.img of=/dev/null bs=1M count=100 iflag=fullblock,sync oflag=sync &>/dev/null
}
function TEST_M {
local tasks=$1
local task_size=$2
local mod=$(( 104857600 / task_size ))
echo -n -e "\r\t\t\tTest processes are starting"
for i in `seq 1 $tasks`; do
(
skip=$(( i % mod ))
# echo -e "\ntasks=$tasks, task_size=$task_size, skip=$skip, i=$i"
dd if=/tmp/test.bin.img of=/io_test/test_${tasks}_${task_size}_${i}.bin.img bs=${task_size} count=1 conv=fsync iflag=fullblock,sync oflag=sync skip=$skip &>/dev/null
dd if=/io_test/test_${tasks}_${task_size}_${i}.bin.img of=/dev/null bs=${task_size} iflag=fullblock,sync oflag=sync &>/dev/null
rm /io_test/test_${tasks}_${task_size}_${i}.bin.img
) &
echo -n -e "\r\t\t\t$i of $tasks processes are started. \r"
done
local num_children=`ps --no-headers -o pid --ppid=$$ | wc -w`
while [ $num_children -gt 1 ]; do
echo -n -e "\r\t\t\tWait for $num_children processes. \r"
sleep 0.5
num_children=`ps --no-headers -o pid --ppid=$$ | wc -w`
done
echo -n -e "\r\t\t\t \r\t\t\t"
wait
sync
}
ulimit -n 65536
ulimit -u 65536
ulimit -a
LOGIT "\nPress return to start IO-Test\n"
read
mkdir -pv /io_test/
LOGIT "Generate binary test image\n"
dd if=/dev/urandom of=/tmp/test.bin.img bs=1M status=progress conv=fsync iflag=fullblock,sync oflag=sync count=100 &>/dev/null
t_start=$(date +%s)
LOGIT "Test 1 task @100M...\t"
TEST TEST_100M
for tasks in 10 100 1000 2000 5000; do
LOGIT "Test $tasks tasks @1K..."
TEST TEST_M $tasks 1024
done
for tasks in 10 100 1000; do
LOGIT "Test $tasks tasks @1M..."
TEST TEST_M $tasks 1048576
done
for tasks in 10 50; do
LOGIT "Test $tasks tasks @10M."
TEST TEST_M $tasks 10485760
done
rm -rfv /io_test/
sync
t_end=$(date +%s)
t_dur=$((t_end - t_start))
LOGIT "\n\n\tOverall it takes about \e[32m$t_dur\e[33m seconds.\n\n\n"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.