blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
da2848983815cd736194c521df081a8ff6691de0 | Shell | 17000-or-bust/reservations | /database/mongoLoader.sh | UTF-8 | 2,350 | 3.5 | 4 | [] | no_license | #!/bin/bash
echo -e "\n# -------------------------------------"
echo "# STARTING MONGO LOADING SEQUENCE"
echo "# -------------------------------------"
start=$(date +'%T')
SECONDS=0
echo "Dropping existing reservations db"
mongo reservations --eval "db.dropDatabase()"
echo -e "Success.\n"
echo -e "\nAdding the following headers to reservations.csv:"
echo "id,restaurant_id,date,time"
sed -i '' $'1i\\\nid,restaurant_id,date,time\n' /Users/ecuyle/Documents/HackReactor/sdc/reservations/data/reservations.csv
echo -e "Success.\n"
echo -e "\nAdding the following headers to restaurants.csv:"
echo "id,max_party_size,max_days_to_book,has_rewards,time_slot_interval,start_hour,end_hour,bookings_today"
sed -i '' $'1i\\\nid,max_party_size,max_days_to_book,has_rewards,time_slot_interval,start_hour,end_hour,bookings_today\n' /Users/ecuyle/Documents/HackReactor/sdc/reservations/data/restaurants.csv
echo -e "Success.\n"
echo -e "\nStarting copying process at: $start"
echo "Copying restaurants.csv to restaurants collection..."
mongoimport -d reservations -c restaurants --type csv --file /Users/ecuyle/Documents/HackReactor/sdc/reservations/data/restaurants.csv --headerline
echo -e "Success."
echo "Indexing id column..."
mongo reservations --eval "db.restaurants.createIndex({ id: 1 })"
echo -e "Success.\n"
echo "Copying reservations.csv to reservations collection..."
mongoimport -d reservations -c reservations --type csv --file /Users/ecuyle/Documents/HackReactor/sdc/reservations/data/reservations.csv --headerline
echo -e "Success.\n"
echo "Indexing id column..."
mongo reservations --eval "db.reservations.createIndex({ id: 1 })"
echo -e "Success.\n"
echo "Indexing restaurant_id column..."
mongo reservations --eval "db.reservations.createIndex({ restaurant_id: 1 })"
echo -e "Success.\n"
echo "Removing CSV headers from reservations.csv && restaurants.csv"
sed 1d reservations.csv > reservations.csv.bak && mv reservations.csv.bak reservations.csv
sed 1d restaurants.csv > restaurants.csv.bak && mv restaurants.csv.bak restaurants.csv
echo -e "Success.\n"
end=$(date +'%T')
echo "Finished copying process at: $end"
duration=$SECONDS
echo -e "\nSuccess. Elapsed time: $(($duration)) seconds\n"
echo "# -------------------------------------"
echo "# FINISHED MONGO LOADING SEQUENCE"
echo "# -------------------------------------"
| true |
c97657cc7b98c022e87b5d49458f6d481eaef8c2 | Shell | osir/dotfiles | /home/config/i3/scripts/shutdown.sh | UTF-8 | 283 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env bash
choices="Shutdown\nReboot\nLogout"
choice=$(echo -e "$choices" | rofi -dmenu -p do -i)
case "$choice" in
'Shutdown')
systemctl poweroff
;;
'Reboot')
systemctl reboot
;;
'Logout')
i3-msg exit
;;
esac
| true |
720c99f17a45c58c529c65f1e121e6eec10c2b60 | Shell | mohsen-mehrabani/Bash-Programming | /Projects/RegularExpressions/sed.sh | UTF-8 | 212 | 3.5625 | 4 | [] | no_license | #!/bin/bash
E_BadArgs=65
if [ $# -eq 0 ]
then
echo "Usage: `basename $0` file " # equals to echo "Usage: $(basename "$0") file "
exit $E_BadArgs
else
for i
do
sed -e '1,/^$/d' -e '/^$/d' $i
done
fi | true |
85712819cb71037e05022f3e61e70d59ba373d6d | Shell | petronny/aur3-mirror | /mars-shooter-svn/PKGBUILD | UTF-8 | 1,314 | 3.046875 | 3 | [] | no_license | # Maintainer: Sven-Hendrik Haase <sh@lutzhaase.com>
pkgname=mars-shooter-svn
pkgver=192
pkgrel=1
pkgdesc="A ridiculous space shooter with nice graphics"
arch=(i686 x86_64)
url="http://mars-game.sourceforge.net/"
license=('GPL')
depends=('sfml-git' 'taglib' 'fribidi')
makedepends=('cmake' 'subversion')
provides=('mars-shooter')
conflicts=('mars-shooter')
source=()
md5sums=()
_svntrunk=https://mars-game.svn.sourceforge.net/svnroot/mars-game
_svnmod=mars-game
build() {
cd "$srcdir"
if [ -d $_svnmod/.svn ]; then
(cd $_svnmod && svn up -r $pkgver)
else
svn co $_svntrunk --config-dir ./ -r $pkgver $_svnmod
fi
msg "SVN checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
cd build && cmake ..
make
}
package() {
cd "$srcdir/$_svnmod-build"
mkdir -p $pkgdir/usr/share/marsshooter
mkdir -p $pkgdir/usr/bin/
cp -r credits.txt data/* license.txt $pkgdir/usr/share/marsshooter
cp mars $pkgdir/usr/bin/mars-shooter
find $pkgdir -name *.svn | xargs rm -rf
sed -i 's/Exec=.*/Exec=mars-shooter/g' resources/mars.desktop
install -Dm644 resources/mars.desktop $pkgdir/usr/share/applications/mars.desktop
install -Dm644 resources/mars.png $pkgdir/usr/share/pixmaps/mars.png
}
| true |
ec77c96fd04f51139564cdd2b4292b619246b486 | Shell | ivy-rew/debianDevSystem | /bin/loc.sh | UTF-8 | 422 | 3.921875 | 4 | [] | no_license | #!/bin/bash
# lines-of-code counter.
# inspired by https://stackoverflow.com/questions/6924158/eclipse-count-lines-of-code
what="*.java"
if [ ! -z "$1" ]; then
what="$1"
fi
count(){
where="$1"
all=$(find ${where} -name ${what} -exec cat {} \; | wc -l)
lines=$(find ${where} -name ${what} -exec grep "[a-zA-Z0-9]" {} \; | wc -l)
echo "lines of ${what} code in ${where}: all=${all} code=${lines}"
}
count $PWD
| true |
bf0f3d807330dd1de0a497e8d375b59842ff9e29 | Shell | kanokkorn/RL-bot | /run.sh | UTF-8 | 1,000 | 3.25 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Update & Upgrade existig packages
sudo apt-get update && sudo apt-get upgrade -y
# Install docker packages
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg2 software-properties-common
# Get and check official docker PGP key
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - && sudo apt-key fingerprint 0EBFCD88
# Check CPU architecture and add repository
arch=$(dpkg --print-architecture)
if [[ $arch == armhf* ]]; then
echo "ARM x32 Architecture"
sudo add-apt-repository \
"deb [arch=armhf] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
elif [[ $arch == arm64* ]]; then
echo "ARM x64 Architecture"
sudo add-apt-repository \
"deb [arch=arm64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
fi
# update and install docker ce
sudo apt-get update -y && sudo apt-get install -y docker-ce docker-ce-cli containerd.io
| true |
2c4aa066118340cc52f6db1a84d346b2b3df4bfa | Shell | ssh352/simple-binary-encoding-mdp3 | /parse_pcaps.sh | UTF-8 | 238 | 3.03125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#takes name of a file, and creates a parse file in the same directory
fullpath=$1
#if [[ $(find $1 -type f -size +500 >/dev/null) ]] ; then
echo $1
cmd="./gradlew readCMEPcaps --args='$fullpath ${fullpath}.parsed'"
eval $cmd
| true |
30cda151ffb5819cf0f9fb0cb4418b37128083ab | Shell | SarahBeecroft/Linkage | /DOWNLOADS.sh | UTF-8 | 1,000 | 2.75 | 3 | [] | no_license | #!/bin/bash
echo 'Starting downloads. This will take a few minutes'
wget http://bioinf.wehi.edu.au/software/linkdatagen/linkdatagen.pl
wget http://bioinf.wehi.edu.au/software/linkdatagen/vcf2linkdatagen.pl
wget http://bioinf.wehi.edu.au/software/linkdatagen/annotation/affymetrix/mappingfiles_affy_build37.tgz
wget http://bioinf.wehi.edu.au/software/linkdatagen/annotation/annotIllumina.tgz
wget https://sourceforge.net/projects/samtools/files/samtools/0.1.19/samtools-0.1.19.tar.bz2
wget http://hgdownload.cse.ucsc.edu/goldenpath/hg19/bigZips/hg19.fa.gz
wget http://bioinf.wehi.edu.au/software/linkdatagen/annotation/mps/annotHapMap3U.txt.gz
wget http://bioinf.wehi.edu.au/software/linkdatagen/annotation/mps/annotHapMap2U.txt.gz
conda install -c bioconda merlin
echo 'Decompressing files'
tar -xzvf mappingfiles_affy_build37.tgz
tar -xvzf annotIllumina.tgz
tar -xzvf merlin-1.1.2.tar.gz
tar -xf samtools-0.1.19.tar.bz2
gunzip annotHapMap2U.txt.gz
gunzip annotHapMap3U.txt.gz
gunzip hg19.fa.gz
| true |
da9912368e2a7ec9e04ba3059ec5ac7c6c6a5088 | Shell | talhassid/046266 | /wet1/part1_test | UTF-8 | 474 | 2.546875 | 3 | [] | no_license | #!/bin/bash
#test1
./part1 < /home/talhassid/046266/example/example2-err.cmm > part1-err.res
if [[ `diff part1-err.res /home/talhassid/046266/example/example2-err.tokens` != '' ]];
then
echo "test 1 fail";
sdiff part1-err.res /home/talhassid/046266/example/example2-err.tokens
fi
#test2
./part1 < example/example.cmm >& part1.res
if [[ `diff part1.res example/example.tokens ` != '' ]];
then
echo "test 2 fail";
sdiff part1.res example/example.tokens
fi
| true |
15502b2b44e8d4d92805680f690ca67ebd0e0e82 | Shell | KeitaW/dotfiles | /bin/bash/jpg2png | UTF-8 | 219 | 3.4375 | 3 | [] | no_license | #!/bin/sh
#
# eps2svg - convert eps file to svg file
#
if [ $# -ne 1 ]; then
cat<<EOF > /dev/stderr
usage:
$0 input.[jpeg, jpg]
EOF
exit 1
fi
output=`alter_extension $1 png`
convert $output -quality 100 $1
| true |
a52d1e1655d4f7af5eca183fb81d833fdf4a0fe2 | Shell | gstrtoint/stoq-utils | /git-hooks/pre-commit | UTF-8 | 336 | 3.171875 | 3 | [] | no_license | #!/bin/bash
git diff --check --cached
if [ $? -eq 2 ]; then
echo "Failed git diff --check. Please fix conflict markers and whitespace errors before committing."
exit 1
fi
set -e
# pwd is already set to the project's root
utils/source-tests.sh --staged || {
echo "Before commiting correct all the source problems.";
exit 1;
}
| true |
b55912662c5b3fe4d0e17e869898a00dc408ea55 | Shell | commandless/commandinstall | /vagrant/install.sh | UTF-8 | 2,232 | 3.1875 | 3 | [] | no_license | #!/bin/bash
# install backend
cp vimrc ../.vimrc
function DockerInstall {
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo bash -c 'echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu disco stable" > /etc/apt/sources.list.d/docker-ce.list'
sudo apt-get update
apt-cache policy docker-ce
sudo apt-get install -y docker-ce
}
function BackendPreInstall {
GraalvmVersion=graalvm-ce-java8-linux-amd64-19.3.1
sudo apt-get -y install build-essential libz-dev zlib1g-dev axel unzip zip
if [ ! -f "${GraalvmVersion}.tar.gz" ]; then
axel -n 10 -a -c "https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-19.3.1/${GraalvmVersion}.tar.gz"
tar zxf ${GraalvmVersion}
fi
FolderName=$( find /root/ -type d -name "graalvm-ce-java8*" )
echo $FolderName
echo 'export PATH=${PATH}'":${FolderName}/bin" >> /root/.bashrc
export PATH=${PATH}":${FolderName}/bin"
source /root/.bashrc
gu install native-image
}
function BackendInstall {
cd /root/
git clone https://github.com/commandless/commandless-backend
cd commandless-backend
./mvnw clean install -Pnative
}
function BackendRun {
/root/commandless-backend/target/cmdls-0.0.1-SNAPSHOT-runner &
}
function PostgrRun {
docker run -d --ulimit memlock=-1:-1 -it --rm=true --memory-swappiness=0 \
--name cmdls_postgres -e POSTGRES_USER=cmdls_user -e POSTGRES_PASSWORD=cmdls_passw0rd \
-e POSTGRES_DB=cmdls_db -p 5432:5432 postgres:10.5
}
function FrontendPreInstall {
# apt -y install nodejs npm
# npm install -g npm@latest
curl -sL https://deb.nodesource.com/setup_13.x | sudo -E bash -
sudo apt-get install -y nodejs
}
function FrontendInstall {
cd /root/
# git clone git@github.com:commandless/commandless-frontend.git
# cd commandless-frontend
wget 'https://github.com/commandless/commandless-frontend/archive/master.zip'
unzip master.zip
# npx degit sveltejs/template svelte-app
mv commandless-frontend-master commandless-frontend
cd commandless-frontend
cd svelte-app
npm install
}
function FrontendRun {
cd /root/commandless-fronted
npm run dev &
}
BackendPreInstall
BackendInstall
DockerInstall
PostgrRun
BackendRun
FrontendPreInstall
FrontendInstall
FrontendRun
| true |
f994b3079e0bd81eb813fdff138ad3315a4ab1b1 | Shell | robbert-vdh/dotfiles | /bin/fix-realtime-scheduling | UTF-8 | 2,677 | 3.859375 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Some applications like to spawn high priority realtime threads. This can cause
# latency spikes during DAW usage. To prevent this, this script will set those
# threads to `SCHED_OTHER` and revoke their permissions to use realtime
# scheduling. (although they could still use rtkit if that's enabled)
set -euo pipefail
thread_blacklist_re='^(webrtc_audio_mo|InotifyEventThr|AudioIPC Callba)$'
process_blacklist_re='^(/usr/bin/telegram-desktop|/usr/lib/(firefox|signal-|thunderbird))'
# To make it easier to see what's going on and which threads we're going to
# reschedule, we'll print all realtime threads with their thread names and the
# first part of their command, and we'll mark the threads we're going to
# reschedule. Excluding pid 2 and all children of that will hide the kernel
# threads since we don't want to mess with those.
realtime_threads=$(ps Hah --ppid 2 -p 2 --deselect -o rtprio:30,tid:30,user:30,comm:30,command |
# HACK: Since thread names can contain spaces and ps doesn't have a way to
# specify a delimiter, we'll hack around this a bit by making every
# every column fixed width and then manually adding tabs. This would
# have been neater with some proper parsing, but, oh well...
# We'll also truncate the last column for readability's sake.
sed -E 's/^(.{30}) /\1\t/;
s/(\t.{30}) /\1\t/;
s/(\t.{30}) /\1\t/;
s/(\t.{30}) /\1\t/;
s/ +//g;
s/(\t[^ ]+)( [^\t]+)?$/\1/' |
awk -F$'\t' '($1 > 0) {
if ($4 ~ THREAD_RE || $5 ~ PROCESS_RE) { printf "x\t" } else { printf " \t" }
print $1 "\t" $2 "\t" $3 "\t" $4 "\t" $5;
}' THREAD_RE="$thread_blacklist_re" PROCESS_RE="$process_blacklist_re" |
column -ts$'\t')
need_rescheduling=$(echo "$realtime_threads" | awk '$1 == "x" { print $3 }')
echo "$realtime_threads"
# A process could keep spawning new realtime threads using rtkit. If that's the
# case, then disabling rtkit is the only solution. This can be done with:
#
# sudo systemctl mask --now rtkit-daemon.service
if systemctl is-active --quiet rtkit-daemon.service; then
echo -e "\nNotice: rtkit is currently running, consider disabling it if new realtime processes keep popping up"
fi
if [[ -z $need_rescheduling ]]; then
echo -e "\nNothing to do here, modify the blacklists if needed."
else
echo -e "\nSetting all marked threads to SCHED_OTHER with RLIMIT_RTPRIO=0..."
echo "$need_rescheduling" | sudo xargs --no-run-if-empty -n1 prlimit --rtprio=0 -p
echo "$need_rescheduling" | sudo xargs --no-run-if-empty -n1 chrt -po 0
fi
| true |
b40835ab2769d6311786a65b57fc6cf05eafa1cc | Shell | Kimeiga/cs35l-mine | /ass2/sameln2 | UTF-8 | 2,099 | 4.3125 | 4 | [] | no_license | #!/bin/bash
dir=$1 #Get the first argument
RESULT=`ls -a $dir | sort` #Sort the files lexicographically based on the locale
IFS=$'\n' #Set the delimeter as new line so files with spaces are recognized
declare -a DOTSARRAY #Array to initially hold files beginning with .
declare -a OTHERARRAY #Array to hold every other regular file
let dotCount=0 #Count for the DOTSARRAY
let count=0 #Count for OTHERARRAY
for FILE in $RESULT #Loop through the output of the ls command
do
if [ ! -r "$dir/$FILE" ] #If the file has no read permissions, throw error
then
echo "You do not have permissions for the file $dir/$FILE"
fi
if [[ ! -L "$dir/$FILE" && -r "$dir/$FILE" ]] #Make sure file is not symbolic and readable
then
if [ -f "$dir/$FILE" ] #Make sure it is a regular file
then
if [ "${FILE:0:1}" == "." ] #If the file begins with a . store in DOTSARRAY
then
DOTSARRAY[$dotCount]="$dir/$FILE"
let dotCount=dotCount+1
else
OTHERARRAY[$count]="$dir/$FILE" #Else store in OTHERARRAY
let count=count+1
fi
fi
fi
done
DOTSARRAY=("${DOTSARRAY[@]}" "${OTHERARRAY[@]}") #Concatenate two arrays so all . files are at the beginning and the others are behind it sorted lexicographically according to locale
let count=${#DOTSARRAY[@]} #Let the overall count be the size of the concatenated array
exec 2>/dev/null
for (( i=0; i<$count; i++ )) #We have two nested for loops to compare each element with all the elements in the array
do
for (( j=$i+1; j<$count; j++ ))
do
if [[ -s "${DOTSARRAY[$i]}" && -s "${DOTSARRAY[$j]}" || ! -s "${DOTSARRAY[$i]}" && ! -s "${DOTSARRAY[$j]}" ]] #If the files are both empty or both not empty
then
compare=$(cmp -s -- "${DOTSARRAY[$i]}" "${DOTSARRAY[$j]}") #Compare the files including special characters and store in compare
if [ ! $compare ] #If the comparison returns exit code 0 (no difference)
then
echo "yes"
ln -f -- "${DOTSARRAY[$i]}" "${DOTSARRAY[$j]}" #Create a hardlink to the higher precedence file with the same name and deletes it
fi
fi
done
done
| true |
fa04a230661cd6dc57b02562ffe87864062f0ce5 | Shell | Chen-Jialin/Computational-Physics-Exercises-and-Assignments | /Week-7/Example-1/loop.sh | UTF-8 | 958 | 3.296875 | 3 | [
"MIT"
] | permissive | #! /bin/bash
# 本文件用于循环改变晶格常数进行计算以获得能量最低情况下的晶格常数
BIN=$HOME/bin/vasp_std # 设置VASP软件的路径
rm WAVECAR SUMMARY.fcc # 删除上次计算生成的波函数文件(因为是从晶体结构开始算起)和上次运行本脚本时生成的日志文件
for i in 3.5 3.6 3.7 3.8 3.9 4.0 4.1 4.2 4.3 ; do # 将所列的这串数字一次代入i并做以下的事情
cat >POSCAR <<!
fcc:
$i
0.5 0.5 0.0
0.0 0.5 0.5
0.5 0.0 0.5
1
cartesian
0 0 0
!# 将下列内容写入POSCAR文件,到!处结束
echo "a= $i" ; $BIN # 向屏幕输出本次循环所用的晶格常数并调用VASP软件开始运算
E=`awk '/F=/ {print $0}' OSZICAR` ; echo $i $E >>SUMMARY.fcc # 将OSZICAR中'F='这个字符串及其之后的内容赋给E这个变量,将i和E这两个变量输入日志文件
done # 本次循环做完了
cat SUMMARY.fcc # 向屏幕输出本次脚本运行生成的日志文件
| true |
fa516c2fb37d60f4d82f651517f8b8e71b363437 | Shell | ajitsingh25/weblogic_platform | /core/commands/ant/resources/secondary/secondary.txt_tmp | UTF-8 | 1,437 | 3.1875 | 3 | [] | no_license | #!/bin/sh
PLATFORM_HOME="@@CONFIG_HOME@@"
SCRIPT_HOME="$PLATFORM_HOME/custom/resources/secondary"
SCRIPT_PRECHECK="$SCRIPT_HOME/preCheck.sh"
SCRIPT_JDK="$SCRIPT_HOME/install_jdk.sh"
SCRIPT_WLS="$SCRIPT_HOME/install_WLS.sh"
SCRIPT_PATCH="$SCRIPT_HOME/applyPatch.sh"
SCRIPT_UNPACK="$SCRIPT_HOME/wlsUnpack.sh"
SCRIPT_NM="$SCRIPT_HOME/MS_Update_Nodemanager.sh"
SCRIPT_MONITORING="$SCRIPT_HOME/setupCron.sh"
chmod +x $SCRIPT_HOME/*.sh
#System Pre Check
#sh $SCRIPT_PRECHECK
#Install JDK
sh $SCRIPT_JDK
if [ "$?" -ne "0" ]; then
echo "JDK Installation Failed, please run installation script Manually. Aborting .."
exit
fi
#Install WLS
sh $SCRIPT_WLS
if [ "$?" -ne "0" ]; then
echo "WLS Installation Failed, please run installation script Manually. Aborting .."
exit
fi
#Apply Patch
sh $SCRIPT_PATCH
if [ "$?" -ne "0" ]; then
echo "WLS Patching Failed, please run patching script Manually. Aborting .."
exit
fi
#Domain Unpack
sh $SCRIPT_UNPACK
if [ "$?" -ne "0" ]; then
echo "WLS domain unpack Failed, please run unpack script Manually. Aborting .."
exit
fi
#Configure Node Manager
sh $SCRIPT_NM
if [ "$?" -ne "0" ]; then
echo "WLS NodeManager configuration Failed, please run node manager script Manually. Aborting .."
exit
fi
#Configure Host Monitoring Scripts
sh $SCRIPT_MONITORING
if [ "$?" -ne "0" ]; then
echo "Host Monitoring script configuration failed, please run the script Manually. Aborting .."
exit
fi
| true |
bea6aa70a3ce3e846899b9fdfe6c5b0045f279c1 | Shell | leobbb/shellscript | /auto_run_svn_up.sh | UTF-8 | 1,016 | 4 | 4 | [] | no_license | #!/bin/bash
##########################################################
# Use to update svn directory in present work directory.
# Author: yanzhenxing
# Date: 20151214
# Usage: run directly
##########################################################
HOMEPATH=$PWD
FILENAME=$HOMEPATH"/"update_info_$(date '+%Y-%m-%d_%H-%M')
function func()
{
cd $1
# echo "Now in $PWD" >> $FILENAME
svn info > /dev/null 2>&1
if [ 0 == $? ]
then
echo "$PWD is a svn directory." | tee -a $FILENAME
svn up 2>&1 | tee -a $FILENAME
echo "Result: $PWD had updated." | tee -a $FILENAME
echo "" | tee -a $FILENAME
else
#echo "This is not a svn directory."
for file in `ls $1`
do
if [ -d $1"/"$file ]
then
func $1"/"$file
fi
done
fi
}
echo `date` | tee -a $FILENAME
echo "Start to update..." | tee -a $FILENAME
echo "HOMEPATH = $HOMEPATH" | tee -a $FILENAME
func $HOMEPATH
cd $HOMEPATH
echo `date` | tee -a $FILENAME
echo "Everything is done." | tee -a $FILENAME
| true |
7bc0062d9bb956fdac8d2c704dbc8307f26ccd80 | Shell | jroquelaure/training-ci | /setup_jenkins/bootstrap.sh | UTF-8 | 3,597 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# bootstrap jenkins
# param 1: Artifactory password
# param 2: Jenkins administrator password
permission=conan-ci2
user=conan
password="conan2020"
address="jfrog.local"
artifactory_pass=$1
jenkins_pass=$2
if [[ $# -ne 2 ]] ; then
echo 'Please provide passwords for Artifactory and Jenkins. You will find them in your orbitera e-mail.'
exit 1
fi
echo "------ Artifactory configuration ------"
curl -uadmin:${artifactory_pass} -XPOST http://${address}/artifactory/api/security/groups/readers -d '{"autoJoin":"false"}' -H "Content-Type: application/json"
echo "create repo"
sed "s/<REPO_NAME>/conan-tmp/" templates/create_repo.json | sed "s/<REPO_TYPE>/conan/" | sed "s/<REPO_LAYOUT>/conan-default/" > conan-tmp.json
sed "s/<REPO_NAME>/conan-develop/" templates/create_repo.json | sed "s/<REPO_TYPE>/conan/" | sed "s/<REPO_LAYOUT>/conan-default/" > conan-develop.json
sed "s/<REPO_NAME>/conan-metadata/" templates/create_repo.json | sed "s/<REPO_TYPE>/generic/" | sed "s/<REPO_LAYOUT>/simple-default/" > conan-metadata.json
sed "s/<REPO_NAME>/app-debian-sit-local/" templates/create_repo.json | sed "s/<REPO_TYPE>/debian/" | sed "s/<REPO_LAYOUT>/simple-default/" > debian-sit.json
sed "s/<REPO_NAME>/app-debian-uat-local/" templates/create_repo.json | sed "s/<REPO_TYPE>/debian/" | sed "s/<REPO_LAYOUT>/simple-default/" > debian-uat.json
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/repositories/conan-tmp -T conan-tmp.json -H "Content-Type: application/json"
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/repositories/conan-develop -T conan-develop.json -H "Content-Type: application/json"
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/repositories/conan-metadata -T conan-metadata.json -H "Content-Type: application/json"
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/repositories/app-debian-sit-local -T debian-sit.json -H "Content-Type: application/json"
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/repositories/app-debian-uat-local -T debian-uat.json -H "Content-Type: application/json"
echo "create user"
sed "s/<USER>/${user}/" templates/create_user.json | sed "s/<PASSWORD>/${password}/" > user.json
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/security/users/${user} -T user.json -H "Content-Type: application/json"
echo "create permission"
sed "s/<USER>/${user}/" templates/create_permission.json | sed "s/<NAME>/${permission}/" | sed "s/<REPO1>/conan-tmp/"| sed "s/<REPO2>/conan-develop/" | sed "s/<REPO3>/conan-metadata/"| sed "s/<REPO4>/app-debian-sit-local/" | sed "s/<REPO5>/app-debian-uat-local/" > permission.json
curl -uadmin:${artifactory_pass} -XPUT http://${address}/artifactory/api/v2/security/permissions/${permission} -T permission.json -H "Content-Type: application/json"
echo "------ Conan client configuration ------"
conan config install https://github.com/conan-ci-cd-training/settings.git
conan user -p ${password} -r conan-develop ${user}
conan user -p ${password} -r conan-tmp ${user}
git config --global user.email "you@example.com"
git config --global user.name "Your Name"
echo "------ Set labs scripts permission ------"
find .. -name "*.sh" -exec chmod +x {} \;
echo "------ Jenkins configuration ------"
conan_build_info --v2 start conan-app 1
docker exec -it jenkins /bin/bash -c "curl https://raw.githubusercontent.com/conan-io/training-ci/master/setup_jenkins/init_jenkins.sh -O;chmod +x init_jenkins.sh;./init_jenkins.sh ${artifactory_pass} ${jenkins_pass}"
| true |
f9143b86621313507a0b3f4c9462032cf5236fec | Shell | wangjunjack/deepsight | /kubernetes/cronjob/database_backup.sh | UTF-8 | 624 | 2.875 | 3 | [] | no_license | #!/bin/sh
# 使用外部变量传入关键名称和地址,如果:DB_HOST等
export DUMP_FILE="$DB_NAME-$(date +"%F-%H%M%S").sql"
export MC_HOSTS="$S3_PROTOCOL://$S3_ACCESS_KEY:$S3_SECRET_KEY@$S3_HOST"
echo "mysqldump -h$DB_HOST -P$DB_PORT -u$DB_USER -p$DB_PASSWORD --database $DB_NAME > $DB_NAME-$(date +"%F-%H%M%S").sql.gz"
mysqldump -h$DB_HOST -P$DB_PORT -u$DB_USER -p$DB_PASSWORD --database $DB_NAME > $DUMP_FILE
if [ -n "S3_HOST" ]; then
export MC_HOSTS_store="$S3_PROTOCOL://$S3_ACCESS_KEY:$S3_SECRET_KEY@$S3_HOST"
echo "mc cp $DUMP_FILE store/$S3_BUCKET"
mc cp $DUMP_FILE store/backup/aliyun-database/
fi | true |
e0703a574d656b82b37b40721c137201d1c7e3ee | Shell | annahri/utility | /dual-monitor.sh | UTF-8 | 644 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# If second monitor attached on startup, set it up!
monitor_num="$(xrandr | grep -w connected | wc -l)"
[[ $monitor_num -eq 1 ]] && exit
monitor_prim="$(xrandr | grep -wi primary | awk '{print $1}')"
monitor_sec="$(xrandr | grep -w connected | grep -v primary | awk '{print $1}')"
monitor_sec_best="$(xrandr | grep "$monitor_sec" -A 5 | grep -w "1920x1080" | awk '{print $1}')"
# echo "Secondary monitor : $monitor_sec"
# echo "Best resolution : $monitor_sec_best"
# read -p "Proceed?"
xrandr --output "$monitor_sec" --mode "$monitor_sec_best" --set audio force-dvi --set "Broadcast RGB" "Full" --right-of "$monitor_prim"
| true |
cf77c8f35f4b239486dfdbd48246e8518f39ff4a | Shell | GUI/postgis-docker | /test | UTF-8 | 13,258 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -Eeuo pipefail
exitcode=0
printf "16beta3-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-16beta3-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "16beta3-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-16beta3-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "16beta3-bullseye-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-16beta3-bullseye-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "16beta3-bullseye-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-16beta3-bullseye-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "15.4-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-15.4-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "15.4-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-15.4-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "15.4-bullseye-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-15.4-bullseye-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "15.4-bullseye-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-15.4-bullseye-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "14.9-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-14.9-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "14.9-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-14.9-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "14.9-bullseye-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-14.9-bullseye-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "14.9-bullseye-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-14.9-bullseye-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "13.12-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-13.12-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "13.12-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-13.12-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "13.12-bullseye-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-13.12-bullseye-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "13.12-bullseye-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-13.12-bullseye-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "12.16-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-12.16-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "12.16-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-12.16-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "12.16-bullseye-postgis-3.4.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-12.16-bullseye-postgis-3.4.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "12.16-bullseye-postgis-3.4.0-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-12.16-bullseye-postgis-3.4.0-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "11.21-bookworm-postgis-3.3.4: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-11.21-bookworm-postgis-3.3.4' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "11.21-bookworm-postgis-3.3.4-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-11.21-bookworm-postgis-3.3.4-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "11.21-bullseye-postgis-3.3.4: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/postgis:build-cache-11.21-bullseye-postgis-3.3.4' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
printf "11.21-bullseye-postgis-3.3.4-pgrouting-3.5.0: "
output=$(docker run -v $(pwd)/test.sh:/docker-entrypoint-initdb.d/test.sh -e POSTGRES_PASSWORD=password --rm 'localhost:5000/ghcr.io/gui/pgrouting:build-cache-11.21-bullseye-postgis-3.3.4-pgrouting-3.5.0' 2>&1 || true)
if [[ "$output" =~ "PostGIS Tests: Error" ]]; then
exitcode=1
echo "Error"
echo "$output"
echo
elif [[ "$output" =~ "PostGIS Tests: OK" ]]; then
echo "OK"
else
exitcode=1
echo "Unexpected Error"
echo "$output"
echo
fi
exit "$exitcode"
| true |
5cc88be27a9b26531863c3448f9d6dacc096b778 | Shell | biowdl/cromwell-resource-crawler | /tests/data/call-ConvertDockerTagsFile/execution/script | UTF-8 | 2,265 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cd /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution
tmpDir=$(mkdir -p "/cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/tmp.45f999fc" && echo "/cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/tmp.45f999fc")
chmod 777 "$tmpDir"
export _JAVA_OPTIONS=-Djava.io.tmpdir="$tmpDir"
export TMPDIR="$tmpDir"
export HOME="$HOME"
(
cd /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution
)
out82f8c9a5="${tmpDir}/out.$$" err82f8c9a5="${tmpDir}/err.$$"
mkfifo "$out82f8c9a5" "$err82f8c9a5"
trap 'rm "$out82f8c9a5" "$err82f8c9a5"' EXIT
tee '/cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution/stdout' < "$out82f8c9a5" &
tee '/cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution/stderr' < "$err82f8c9a5" >&2 &
(
cd /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution
set -e
mkdir -p "$(dirname ./dockerImages.json)"
python <<CODE
import json
import yaml
with open("/cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/inputs/1599980398/dockerImages.yml", "r") as input_yaml:
content = yaml.load(input_yaml)
with open("./dockerImages.json", "w") as output_json:
json.dump(content, output_json)
CODE
) > "$out82f8c9a5" 2> "$err82f8c9a5"
echo $? > /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution/rc.tmp
(
# add a .file in every empty directory to facilitate directory delocalization on the cloud
cd /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution
find . -type d -exec sh -c '[ -z "$(ls -A '"'"'{}'"'"')" ] && touch '"'"'{}'"'"'/.file' \;
)
(
cd /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution
sync
)
mv /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution/rc.tmp /cromwell-executions/RNAseq/82f8c9a5-384c-46fd-8ca0-fc2909f8922c/call-ConvertDockerTagsFile/execution/rc
| true |
27b273ed05f0eb1bbe3ae24f585f763eec1c72e2 | Shell | barlesh/workstation | /build_workstation.sh | UTF-8 | 1,439 | 3.984375 | 4 | [] | no_license |
CYAN="\033[0;36m"
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
function errorHandler {
errorCode=$1
errorMessage=$2
if [ $errorCode -ne 0 ]; then
echo -e "${RED}$errorMessage${NC}"
exit 1
fi
}
# permissions (must be non root!)
if [ ! $EUID -eq 0 ];
then
echo -e "${RED}must be executed with root permissions. exiting${NC}"
exit 1
fi
# calculate requierd modules/softwares
# todo - currently all
# check which OS we are runnig on
OS_centos=$(cat /etc/*release | grep centos)
OS_ubuntu=$(cat /etc/*release | grep ubuntu)
OS_mint=$(cat /etc/*release | grep mint)
if [ ! "$OS_centos" == "" ]; then
OS="centos";
for f in fedora/install*; do echo "source $f"; source $f; done
# source /fedora/install*
fi
if [ ! "$OS_ubuntu" == "" ]; then
OS="ubuntu";
for f in debian/install*; do source $f; done
# source debian/install*
fi
echo "OS: $OS"
# PACKAGE_LIST=( git docker docker_compose slack google_chrome node postman vpn_client vscode clion virtualbox )
PACKAGE_LIST=( docker docker_compose slack google_chrome postman vpn_client vscode clion virtualbox )
# echo "PACKAGE_LIST: $PACKAGE_LIST"
# Read the array values with space
for PACKAGE in "${PACKAGE_LIST[@]}"; do
echo -e "${CYAN}installing $PACKAGE${NC}"
install_$PACKAGE
errorHandler $? "failed installing $PACKAGE"
done
# import relevant modules/function
# execute | true |
4a84e0e28e413eabf2c14d532783fcda7e1f35b2 | Shell | shivergard/portable | /system_preprare.sh | UTF-8 | 780 | 2.59375 | 3 | [] | no_license | #!/bin/sh
sudo apt-get update
#git + Git Details
sudo apt-get -y install git
#lamp stack
sudo apt-get install -y apache2
sudo apt-get install -y mysql-server libapache2-mod-auth-mysql php-mysql
sudo mysql_install_db
sudo apt-get install -y php7.0 libapache2-mod-php php-mcrypt
#composer for laravel
sudo apt-get install -y curl
sudo sh -c "curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/bin/"
#sudo nano /etc/apache2/mods-enabled/dir.conf
sudo apt-get install -y php-sqlite
#for laravel
sudo php7enmod mcrypt
sudo a2enmod rewrite
sudo service apache2 restart
#swamp create
sudo dd if=/dev/zero of=/swapfile bs=1M count=2000
sudo chmod 600 /swapfile
mkswap /swapfile
sudo swapon /swapfile
echo "/swapfile none swap sw 0 0" >> /etc/fstab | true |
6af00060cd2faadfac2eec0a400bfe7f3c40e344 | Shell | Burzhun/Big-django-project | /django_dev/entrypoint.sh | UTF-8 | 305 | 2.65625 | 3 | [] | no_license | #!/bin/sh
set -e
cmd="$@"
PGPASSWORD=$DB_PASSWORD
until pg_isready -h $DB_HOST; do
echo "Waiting for database start"
sleep 1
done
python3 manage.py makemigrations --merge --noinput
python3 manage.py migrate
# python3 manage.py loaddata app.json
exec $cmd
python3 manage.py runserver 0.0.0.0:80
| true |
eea1411b2f26c4982c3403153a7128753a2bc9c9 | Shell | jxnu-liguobin/transmittable-thread-local | /scripts/run-junit.sh | UTF-8 | 312 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
cd "$(dirname "$(readlink -f "$0")")"
export TTL_CI_TEST_MODE=true
source ./common.sh
# skip unit test for Javassist on command line, because Javassist is repackaged.
runCmd "${JAVA_CMD[@]}" -cp "$(getClasspath)" \
org.junit.runner.JUnitCore $(getJUnitTestCases | grep -vE '\.JavassistTest$')
| true |
98846bd903a585da7ebb483e27477fed4e9025b1 | Shell | Netronome/virtio-forwarder | /startup/vio4wd-pre-start.sh | UTF-8 | 2,451 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# BSD LICENSE
#
# Copyright(c) 2016-2017 Netronome.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Netronome nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
. /etc/default/virtioforwarder || { stop; exit 1; }
# Create the dir with the ZMQ endpoints.
@LIBEXECDIR@/create_zmq_dir.py $VIRTIOFWD_ZMQ_CORE_SCHED_EP
@LIBEXECDIR@/create_zmq_dir.py $VIRTIOFWD_ZMQ_STATS_EP
@LIBEXECDIR@/create_zmq_dir.py $VIRTIOFWD_ZMQ_PORT_CONTROL_EP
@LIBEXECDIR@/create_zmq_dir.py $VIRTIOFWD_ZMQ_CONFIG_EP
# Set up the socket dir permissions.
mkdir -vp "$VIRTIOFWD_SOCKET_DIR" || { stop; exit 1; }
test -n "$VIRTIOFWD_SOCKET_OWNER" &&
chown "$VIRTIOFWD_SOCKET_OWNER" "$VIRTIOFWD_SOCKET_DIR"
test -n "$VIRTIOFWD_SOCKET_GROUP" &&
chgrp "$VIRTIOFWD_SOCKET_GROUP" "$VIRTIOFWD_SOCKET_DIR"
test -n "$VIRTIOFWD_BIND_VFIO_PCI" &&
@LIBEXECDIR@//bind_uio_driver.py "$VIRTIOFWD_BIND_VFIO_PCI"
# Clean up any stray PID file that may be lying around.
rm -f "$VIRTIOFWD_PID_DIR"/virtioforwarder.pid
| true |
ccba826da480f29aab28e560238349f67886db40 | Shell | bolivaralejandro/bash_scripting | /pluralsight/bash_nigel/scriptfile.sh | UTF-8 | 86 | 2.546875 | 3 | [] | no_license | #!/bin/bash
read -p "What's your name? " name
echo
echo "Hi ${name}. I'm ${hostname}"
| true |
27d754c707e5bf9000c5db94d18a4994764edd99 | Shell | shorile/commfirewall | /install.sh | UTF-8 | 426 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | #ipset 创建ip集合
ipset create in_tcp_dro_sidp hash:net,port timeout 0
#ipset 将集合添加的iptables,黑名单规则 远端ip-本地端口
iptables -I INPUT -p tcp -m state --state NEW,ESTABLISHED -j DROP -m set --match-set in_tcp_dro_sidp src,dst
#ipset 添加到定时任务
SHELL_FOLDER=$(cd "$(dirname "$0")";pwd)
crontab -l | awk '{print $0;print "*/1 * * * * bash ${SHELL_FOLDER}/commfirewall.sh"}'| crontab
| true |
c6bf99d1407a1b24b57e4c9c662b408a4ac363cb | Shell | vpnwall-services/docker-scripts | /pentest/nessus.sh | UTF-8 | 479 | 2.515625 | 3 | [] | no_license | #!/bin/bash
LICENSE=""
# start
docker run -it --name nessus -p 8834:8834 localhost:5000/treadie/nessus
docker start nessus
# register
#docker exec -i nessus /opt/nessus/sbin/nessusctl fetch --register $LICENSE
# update plugins
#docker exec -i nessus /opt/nessus/sbin/nessuscli update --plugins-only
# To build
# Clone repo
# Go to repo dir
# docker build -t "treadie/nessus"
# docker history treadie/nessus
# docker build -t "treadie/nessus:latest" -t "treadie/nessus:6.8.1" .
| true |
e7d63325b782cfff724fd81e5a9504534d9dad8f | Shell | zjentohlauedy/MBA | /MBATool/web/templates/import_dos.sh.erb | UTF-8 | 762 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#
BASE_PATH=<%= base_path %>
DOS_PATH=<%= dos_path %>
if [[ LeagName.Dat -nt $DOS_PATH/LEAGNAME.DAT ||
parks.dat -nt $DOS_PATH/PARKS.DAT ||
Players.S -nt $DOS_PATH/PLAYERS.DAT ]];
then
echo "WARNING: current files are newer than import files!";
exit;
fi
echo "Copying files..."
$BASE_PATH/ewbdos2amiga $DOS_PATH/PLAYERS.DAT Players.S || exit
cp $DOS_PATH/LEAGNAME.DAT LeagName.Dat
cp $DOS_PATH/PARKS.DAT parks.dat
echo "Creating Backups..."
cp Players.S .Players.S.bak
cp LeagName.Dat .LeagName.Dat.bak
cp parks.dat .parks.dat.bak
n_boxscores=$(ls -1 $DOS_PATH/D*G*.TXT 2>/dev/null | wc -l)
if [[ $n_boxscores -gt 0 ]]
then
echo "Copying Boxscores..."
cp $DOS_PATH/D*G*.TXT .
chmod 444 D*G*.TXT
fi
| true |
38b881374452b8be708a7cee14b5127e558ed7da | Shell | wangbiaoshuai/service_scan | /cems-service-trans-epoll/code/test/run_test.sh | UTF-8 | 286 | 3.671875 | 4 | [] | no_license | #!/bin/sh
[ -e "Makefile" ] && make
if [ $? != 0 ]
then
exit 1
fi
execute_file=`ls`
for file in $execute_file
do
if [ "$file" = $0 ]
then
continue
fi
is_exe=`file $file | grep "executable"`
if [ "$is_exe" != "" ]
then
./$file
fi
done
| true |
743f61523e7c51f117f381dd0d25b416773ba6f1 | Shell | cysun0226/System-Administration | /hw2/2-2/crs.sh | UTF-8 | 9,463 | 3.828125 | 4 | [] | no_license | #!/bin/sh
##### initialize
# create ./data/
if [ ! -d "data" ]; then
mkdir data
fi
# variables
show_classroom=0
show_extra=0
time_conflict=0
opt1_str='Show Classroom'
opt2_str='Show Extra Column'
exist_id=''
update=1
total_time=''
##### download_data
get_json_value()
{
v=$(echo $1 | cut -d':' -f2 | sed 's/.$//')
cnt=$(expr $cnt + 1 )
case $cnt in
1) #id
# check if redundant
rdd=0
for i in $exist_id; do
if [ "$v" = "$i" ]; then
rdd=1
break
fi
done
if [ "$rdd" = "0" ]; then
exist_id="$exist_id $v"
printf '%s#' "$v"
fi
;;
2) # time
if [ "$rdd" = "1" ]; then
return
fi
printf '%s#' "$v"
;;
3) #name
cnt=0
if [ "$rdd" = "1" ]; then
return
fi
printf '%s\n' "$v"
;;
esac
}
parse_json()
{
cnt=0
while read p; do
get_json_value "$p"
done < $1
}
download_data()
{
raw_file='./data/raw_data.json'
prep_file='./data/pre_classes.txt'
curl "$1" --data " $2" > $raw_file
# insert new line into .json
cat "$raw_file" | sed 's/'{'/{\'$'\n/g' | sed 's/'}'/}\'$'\n/g' | sed 's/'\",'/,\'$'\n/g' | awk '/cos_id/{print $0} /cos_time/{print $0} /cos_ename/{print $0}' | sed 's/"//g' > "$prep_file"
parse_json "$prep_file" > ./data/classes.txt
rm $raw_file
rm $prep_file
}
##### CRS function
clean_use_table()
{
for d in 1 2 3 4 5 6 7; do
for t in M N A B C D X E F G H Y I J K L; do
eval "used_${d}_${t}=0"
done
done
}
get_2d_arr()
{
id=$1$2$3
eval value="\$$id"
printf '%s' "$value"
}
get_total_time()
{
total_time=''
while read a; do
x_time=$(echo $a | cut -d'#' -f2)
time_cnt=$(echo $x_time | grep -o '-' | wc -l)
time=''
for t in $(seq $time_cnt); do
time="$time$(echo $x_time | cut -d',' -f$t | cut -d'-' -f1)"
done
total_time="$total_time$time"
done < $1
echo "$total_time"
}
check_conflict()
{
for i in $(seq ${#1}); do
c=$(echo "$1" | cut -c $i-$i)
if_w=$(echo "$c" | grep '[1-7]')
# weekday
if [ "$if_w" != "" ];
then
day=$c
else
# time
t=$c
if_use=$(get_2d_arr used "_$day" "_$t")
if [ "$if_use" != "0" ]; then
time_conflict=1
fi
eval "used_${day}_$t=1"
fi
done
}
get_free_time_courses()
{
> ./data/available.txt
while read a; do
x_time=$(echo $a | cut -d'#' -f2)
time_cnt=$(echo $x_time | grep -o '-' | wc -l)
time=''
for t in $(seq $time_cnt); do
time="$time$(echo $x_time | cut -d',' -f$t | cut -d'-' -f1)"
done
time_seq=$(echo "$time" | fold -w1 | paste -sd' ' -)
available=1
for c in $time_seq; do
if_w=$(echo "$c" | grep '[1-7]')
# weekday
if [ "$if_w" != "" ];
then
day=$c
else
# time
t=$c
if_use=$(get_2d_arr used "_$day" "_$t")
if [ "$if_use" != "0" ]; then
available=0
fi
fi
done
if [ "$available" = "1" ]; then
printf '%s\n' "$a" >> $2
fi
done < $1
}
handle_option()
{
ipt=$1
case $ipt in
op1) show_classroom=$(expr 1 - $show_classroom)
if [ $show_classroom = 0 ];
then
opt1_str='Show Classroom'
else
opt1_str='Show Class Name'
fi
;;
op2) show_extra=$(expr 1 - $show_extra)
if [ $show_extra = 0 ];
then
opt2_str='Show Extra Column'
else
opt2_str='Hide Extra Column'
fi
;;
op3) # search courses
> ./data/input.txt
dialog --title "Search courses" --inputbox "Target substring:" 20 100 2>./data/input.txt
if [ "$?" = "1" ]; then
rm ./data/input.txt
update=0
return
fi
ipt=$(cat ./data/input.txt)
add_class s $ipt n
rm ./data/input.txt
;;
op4) # search free time courses
clean_use_table
check_conflict $(get_total_time ./data/cur_class.txt)
time_conflict=0
printf 'searching...'
get_free_time_courses ./data/classes.txt ./data/available.txt
add_class n n f
rm ./data/available.txt
# dialog --title "README" --textbox ./data/available.txt 50 100
# dialog --title "Time Available Courses" \
# --msgbox "$(get_free_time_courses ./data/classes.txt)" 50 140
;;
esac
printf "generate table..."
}
add_class()
{
# $1 = basic
# $2 = target
# $3 = free time
# create empty temp file
time_conflict=0
USR_IPT="./data/temp.txt"
>$USR_IPT
if [ "$1" = "all" ];
then
if [ "$(cat ./data/cur_class.txt)" != "" ]; then
off_list_item=$(grep -v -f ./data/cur_class.txt ./data/classes.txt | awk -F# '{printf "%s?%s - %s?off\n",$0,$2,$3}' | sed 's/^/"/' | sed 's/$/"/' | sed 's/?/" "/g')
else
off_list_item=$(cat ./data/classes.txt | awk -F# '{printf "%s?%s - %s?off\n",$0,$2,$3}' | sed 's/^/"/' | sed 's/$/"/' | sed 's/?/" "/g')
fi
on_list_item=$(cat ./data/cur_class.txt | awk -F# '{printf "%s?%s - %s?on\n",$0,$2,$3}' | sed 's/^/"/' | sed 's/$/"/' | sed 's/?/" "/g')
eval dialog --buildlist '"Add a class"' 30 110 20 $off_list_item $on_list_item 2>$USR_IPT
response=$?
else
>./data/tmp2.txt
if [ "$3" != "f" ]; then
off_list_item=$(grep $2 ./data/classes.txt | awk -F# '{printf "%s?%s - %s?off\n",$0,$2,$3}' | sed 's/^/"/' | sed 's/$/"/' | sed 's/?/" "/g')
eval dialog --buildlist '"Courses contain [$2]"' 30 110 20 $off_list_item 2>./data/tmp2.txt
response=$?
else
off_list_item=$(cat ./data/available.txt | awk -F# '{printf "%s?%s - %s?off\n",$0,$2,$3}' | sed 's/^/"/' | sed 's/$/"/' | sed 's/?/" "/g')
eval dialog --buildlist '"Course for free time"' 30 110 20 $off_list_item 2>./data/tmp2.txt
response=$?
fi
echo "" >> ./data/tmp2.txt
cat ./data/tmp2.txt | sed 's/" /\'$'\n/g' | tr -d '"' > $USR_IPT
cat $USR_IPT > ./data/tmp2.txt
cat ./data/tmp2.txt ./data/cur_class.txt | sed 's/^/"/' | sed 's/$/"/' > $USR_IPT
rm ./data/tmp2.txt
fi
# cancel
if [ "$response" = "1" ]; then
rm ./data/temp.txt
update=0
return
fi
cur_class=$(cat $USR_IPT | sed 's@\\@@g')
eval 'for word in '$cur_class'; do echo $word; done' > ./data/temp.txt
# check conflict
if [ "$cur_class" != "" ]; then
clean_use_table
check_conflict $(get_total_time ./data/temp.txt)
fi
# add_result=$(./print_table.sh ./data/temp.txt $show_classroom $show_extra 1)
# if [ "$add_result" = "pass" ];
if [ "$time_conflict" = "0" ];
then
dialog --title "Add Class" --yes-label 'YES' --no-label 'Cancel' --yesno \
"\n\nSaving?" 10 30
response=$?
case $response in
0) eval 'for word in '$cur_class'; do echo $word; done' > ./data/cur_class.txt
;;
1) update=0
;;
esac
else
dialog --title "Warning" --msgbox "\n\nTime conflict!" 10 30
fi
rm ./data/temp.txt
}
### main ----------------------
# check if courses data exists
if [ ! -f "./data/classes.txt" ]; then
dialog --title "CRS" \
--defaultno --yesno \
"Welcome to CRS.\n\nNo available courses data.\n\nDownload default courses?\
\n\n * [YES] Download default courses (CS107-fall)\n\n * [NO] Download from input URL"\
20 50
response=$?
case $response in
0) dialog --title "Download Courses Data" --msgbox "Download CS107-fall." 10 30
download_data 'https://timetable.nctu.edu.tw/?r=main/get_cos_list' \
'm_acy=107&m_sem=1&m_degree=3&m_dep_id=17&m_group=**&m_grade=**&m_class=**&m_option=**&m_crs name=**&m_teaname=**&m_cos_id=**&m_cos_code=**&m_crstime=**&m_crsoutline=**&m_costype=**'
dialog --title "Download Courses Data" --msgbox "Finish Downloading." 10 30
;;
1) dialog --title "Download Courses Data" --inputbox "Please input URL:" 20 100 2>./data/input.txt
url=$(cat ./data/input.txt)
dialog --title "Download Courses Data" --inputbox "Please input data format:" 20 100 2>./data/input.txt
data_format=$(cat ./data/input.txt)
download_data "$url" "$data_format"
;;
255) exit 0;;
esac
fi
# Data Exists
dialog --title "CRS" --msgbox \
"\nWelcome to CRS.\n\nFind available courses data.\n\nPress to start CRS."\
20 50
# Check if current_class exists
if [ ! -f "./data/cur_class.txt" ]; then
> ./data/cur_class.txt
fi
response=$?
case $response in
0) ;;
1) exit 0;;
255) exit 0;;
esac
# display timetable
while [ $response != 2 ]; do
if [ $update = 1 ]; then
./print_table.sh ./data/cur_class.txt $show_classroom $show_extra 0 | sed 's/#/\ /g' > ./data/table.txt
fi
timetable=$(cat ./data/table.txt)
update=1
# timetable=$(./print_table.sh ./data/cur_class.txt $show_classroom $show_extra 0 | sed 's/#/\ /g')
dialog --no-collapse --title "Timetable" \
--help-button --help-label "Exit" \
--extra-button --extra-label "Option" \
--ok-label "Add Class" --msgbox "$timetable" 50 140
response=$?
case $response in
0) add_class 'all' 'n' 'n'
while [ $time_conflict = 1 ]; do
add_class 'all' 'n' 'n'
done
;;
2) exit 0
;;
3) echo "Option"
opt=$(dialog --title "Option" --menu "" 24 70 6 \
op1 "$opt1_str" op2 "$opt2_str" op3 "Search Courses"\
op4 "Search Free Time Courses" --output-fd 1)
handle_option $opt
;;
esac
done
| true |
ba77b138af33001f1dfebebd520d74bfb2a1ca4f | Shell | samuelcstewart/dotfiles | /.zshrc | UTF-8 | 3,818 | 2.984375 | 3 | [] | no_license | # Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
#
[[ $- != *i* ]] && return
export ZPLUG_HOME=/usr/local/opt/zplug
source $ZPLUG_HOME/init.zsh
zplug "plugins/command-not-found", from:oh-my-zsh
zplug "plugins/vi-mode", from:oh-my-zsh
zplug "plugins/docker", from:oh-my-zsh
zplug "plugins/docker-compose", from:oh-my-zsh
zplug "plugins/kubectl", from:oh-my-zsh
zplug "mafredri/zsh-async"
zplug "romkatv/powerlevel10k", as:theme, depth:1
zplug "zsh-users/zsh-autosuggestions"
zplug "zsh-users/zsh-completions"
zplug "zsh-users/zsh-syntax-highlighting", defer:2
zplug "plugins/history-substring-search", defer:2, from:oh-my-zsh
zplug "johanhaleby/kubetail"
# Install plugins if there are plugins that have not been installed
if ! zplug check --verbose; then
printf "Install? [y/N]: "
if read -q; then
echo; zplug install
fi
fi
zplug load
# HISTORY #
HISTFILE=~/.zsh_history
HISTSIZE=1000000000
SAVEHIST=1000000000
setopt share_history
setopt append_history # sessions append to history file, rather then replace it.
setopt extended_history # saves timestamps to history
setopt hist_expire_dups_first # removes oldest history event with duplicate
setopt hist_ignore_dups # do not enter command into history if duplicates of previous event
setopt hist_ignore_space # remove commands with leading space from history
setopt hist_verify # reload line into editing buffer when using history expansion
setopt inc_append_history_time # history is added incrementally rather than on shell exit
# DIR STACK
setopt auto_pushd # push directories to dir stack
setopt pushd_minus # invert meanings of + and - when working with dir stack
setopt pushd_silent # do not print dir stack of pushd/popd
setopt pushd_to_home
setopt pushd_ignore_dups
DIRSTACKSIZE=20
zstyle ':completion:*' menu select
# Vim keybindings for substring search
bindkey -M vicmd 'k' history-substring-search-up
bindkey -M vicmd 'j' history-substring-search-down
zmodload zsh/terminfo
# bind up/down arrow keys for iterm
bindkey "$terminfo[cuu1]" history-substring-search-up
bindkey "$terminfo[cud1]" history-substring-search-down
# bind up/down arrow keys for nearly everything else
bindkey "$terminfo[kcuu1]" history-substring-search-up
bindkey "$terminfo[kcud1]" history-substring-search-down
# Base16 Shell. Use base16_ to change colorschemes dynamically.
# BASE16_SHELL=$HOME/dotfiles/shell/base16-shell/
# [ -n "$PS1" ] && [ -s $BASE16_SHELL/profile_helper.sh ] && eval "$($BASE16_SHELL/profile_helper.sh)"
# Attach to an existing deattached tmux session, or start a new one
#if [[ -z "$TMUX" ]] ;then
#ID="`tmux ls | grep -vm1 attached | cut -d: -f1`" # get the id of a deattached session
#if [[ -z "$ID" ]] ;then # if not available create a new one
#tmux new-session
#else
#tmux attach-session -t "$ID" # if available attach to it
#fi
#fi
# ~/.extra can be used for other settings you don’t want to commit.
for file in ~/.{bash_prompt,aliases,functions,exports,extra}; do
if [[ -r "$file" ]] && [[ -f "$file" ]]; then
source "$file"
fi
done
unset file
# VI
bindkey -v
export KEYTIMEOUT=1
# aliases
source $HOME/.aliases
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
export PATH="$HOME/.jenv/bin:$PATH"
eval "$(jenv init -)"
| true |
1930540ec5121b8224d5b8940e614e5385736d42 | Shell | MonkeyDoug/Forester | /.config/polybar/polybar-scripts/hlwm_layout.sh | UTF-8 | 345 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
while true; do
layout=$(herbstclient attr tags.focus.tiling.focused_frame.algorithm)
if [[ "$layout" == "horizontal" ]]; then
icon=
elif [[ "$layout" == "vertical" ]]; then
icon=
elif [[ "$layout" == "max" ]]; then
icon=
elif [[ "$layout" == "grid" ]]; then
icon=
fi
echo "$icon"
sleep 0.2
done
| true |
be3536b0bc7369377b627b5e1a6e2c471463f5e4 | Shell | LyzV/bash-learn | /test11 | UTF-8 | 144 | 3.078125 | 3 | [] | no_license | #! /bin/bash
case $1 in
red) echo 'Color is red';;
blue) echo 'Color is blue';;
green) echo 'Color is green';;
*) echo 'Uncknown color';;
esac
| true |
ff485fa45c83eaf916c81530e22bea42223214fd | Shell | Romain-P/alpharite | /releases/previous-modules/utils-rename-script.sh | UTF-8 | 107 | 2.75 | 3 | [] | no_license | regex='(.*)-cleaned.dll'
for f in *.dll; do
[[ $f =~ $regex ]] && mv "$f" "${BASH_REMATCH[1]}.dll"
done | true |
5338d1c408a063e93ac4901535fd118ee35819ac | Shell | daehahn/aiva-pi-avs-device-sdk | /automated_install.sh | UTF-8 | 1,763 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Get Swap Memory Size, if it's zero we will create own 384MB a temp swap file
#
create_swapfile()
{
SWAPSIZE=`free | awk '/Swap/ { printf "%d", $2/1024 }'`
while [[ "$SWAPSIZE" -lt "384" ]]; do
echo "=============================================================="
echo "Create a temporary SWAP file. It will disappear when reboot."
echo "** Please consier to add a performanant SWAP file/parition. **"
echo "=============================================================="
dd if=/dev/zero of=/tmp/swapfile.swp bs=1024 count=393224 status=progress
chmod 600 /tmp/swapfile.swp
sudo mkswap /tmp/swapfile.swp
sudo swapon /tmp/swapfile.swp
#sudo swapoff /tmp/swapfile.swp # /var/tmp is remained when reboot
SWAPSIZE=`free | awk '/Swap/ { printf "%d", $2/1024}'`
done
free
}
# Create temporary swpafile (384 MiB)
create_swapfile
# Download necessary files prefetch from avs-device-sdk
#wget https://raw.githubusercontent.com/alexa/avs-device-sdk/master/tools/Install/pi.sh
#wget https://raw.githubusercontent.com/alexa/avs-device-sdk/master/tools/Install/setup.sh
wget https://raw.githubusercontent.com/alexa/avs-device-sdk/master/tools/Install/genConfig.sh
echo "=============================================================="
echo " AiVA-96 AVS Device SDK Installation"
echo "=============================================================="
Origin=$(pwd)
Credentials="config.json"
Credentials_Loc=$Origin/$Credentials
if [[ ! -f "$Credentials_Loc" ]]; then
echo " ERROR - 'config.json' file not found."
echo " Place your 'config.json' file to $Origin"
echo " Ex) 'scp config.json linaro@ip_address:$Origin'"
trap - ERR
exit -1
fi
source ./setup.sh $Credentials
| true |
137ff7a1b9dd35cbab92b0e042b5779274506f64 | Shell | danielguerra69/metar-weather | /bin/docker-cmd.sh | UTF-8 | 639 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/sh
echo "$(date) Starting METAR Weather"
# set elasticsearch mappings
mapping.sh
# go to the metar dir
cd /metar
# add station information
#wget -O - https://www.aviationweather.gov/docs/metar/stations.txt > stations.txt
#keep looping for ever
while [ 1 ]; do
echo $(date)" Fetching new data"
# get the first set
lftp -c mirror http://tgftp.nws.noaa.gov/data/observations/metar/cycles
# do all the data
echo $(date)" Processing new data"
for x in `ls cycles/*.TXT`; do echo -n $(date)" "$x" " ; cat $x | grep -E "^[A-Z]{4} " | sort -u | metar2elastic.py ; done
echo $(date)" Sleeping 120 seconds"
sleep 120
done
| true |
f40a1748f87cf6741ccafdd769695263e0a4c10f | Shell | hn1987126/finance | /doc/shell/php_mysql_nginx/php.sh | UTF-8 | 2,591 | 3.046875 | 3 | [] | no_license | #!/bin/bash
#运行命令 /bin/bash php.sh /usr/local/php/ /root/soft/php/
TAR_SRC=${2}
cd $TAR_SRC
#源文件目录下创建临时目录
mkdir tmp
ls *.tar.gz > ls.list
for TAR in `cat ls.list`
do
tar -xzf $TAR -C tmp
done
rm ls.list
cd tmp/libxml2-2.6.30
./configure --prefix=/usr/local/libxml2
make && make install
cd ../libmcrypt-2.5.8
./configure --prefix=/usr/local/libmcrypt
make && make install
cd libltdl/
./configure --enable-ltdl-install
make && make install
cd ../
cd ../zlib-1.2.3
./configure
make && make install
cd ../libpng-1.2.31
./configure --prefix=/usr/local/libpng
make && make install
cd ../jpeg-6b/
mkdir /usr/local/jpeg6
mkdir /usr/local/jpeg6/bin
mkdir /usr/local/jpeg6/lib
mkdir /usr/local/jpeg6/include
mkdir mkdir -p /usr/local/jpeg6/man/man1
./configure --prefix=/usr/local/jpeg6 --enable-shared --enable-statc
make && make install
cd ../freetype-2.3.5
./configure --prefix=/usr/local/freetype
make && make install
cd ../autoconf-2.61
./configure
make && make install
# 安装gd库
cd ../libgd-2.1.1
./configure --prefix=/usr/local/gd2/ --with-jpeg=/usr/local/jpeg6/ --with-freetype=/usr/local/freetype/
make && make install
# 安装php
PHP_PATH=$1
PHP_SRC=${2}tmp/php-7.0.6
if [ ! -d $PHP_PATH ]; then
mkdir $PHP_PATH
fi
cd ${PHP_SRC}
./configure \
--prefix=/usr/local/php \
--with-libxml-dir=/usr/local/libxml2 \
--with-png-dir=/usr/local/libpng \
--with-freetype-dir=/usr/local/freetype \
--enable-soap \
--enable-mbstring=all \
--enable-sockets \
--enable-fpm \
--enable-zip \
--enable-json \
--with-mysqli=mysqlnd \
--with-pdo-mysql=mysqlnd \
--with-mysql-sock=mysqlnd \
--enable-pdo \
--enable-dom --enable-pcntl
make && make install
cp php.ini-production /usr/local/php/etc/php.ini
cp /usr/local/php/etc/php-fpm.conf.default /usr/local/php/etc/php-fpm.conf
cp /usr/local/php/etc/php-fpm.d/www.conf.default /usr/local/php/etc/php-fpm.d/www.conf
#sed -i "/^user = nobody/c\user = root" /usr/local/php/etc/php-fpm.d/www.conf
#sed -i "/^group = nobody/c\group = root" /usr/local/php/etc/php-fpm.d/www.conf
sed -i '$a\export PATH=$PATH:/usr/local/php/bin:/usr/local/php/sbin' /etc/profile
#启动
#nohup /usr/local/php/sbin/php-fpm -c /usr/local/php/etc/php.ini -R > /dev/null 2>&1
# 安装
# ./install_php.sh /usr/local/php/ /root/files/
#查看是否有旧版本
#rpm -qa | grep php
# 卸载php5.3.3
#rpm -e php-5.3.3-46.el6_6.x86_64
#rpm -e php-xml-5.3.3-46.el6_6.x86_64
#rpm -e php-fpm-5.3.3-46.el6_6.x86_64
#rpm -e php-mbstring-5.3.3-46.el6_6.x86_64
#rpm -e php-cli-5.3.3-46.el6_6.x86_64
#rpm -e php-common-5.3.3-46.el6_6.x86_64 | true |
168f6eaabc06f8e78a1fc8e410aa3699d96b2c12 | Shell | shivam-31/x-file | /x-file.sh | UTF-8 | 1,879 | 3.4375 | 3 | [] | no_license | #!/bin/bash
echo ""
echo ""
echo ""
echo " ********************************************************************"
echo " ********************************************************************"
echo " *** ***"
echo " *** xx xx xxxxxxxx xxxxxxxx xx xxxxxxx ***"
echo " *** xx xx xx xx xx xx ***"
echo " *** xx xxx xxxxxxxx xx xx xxxxxxx ***"
echo " *** xx xx xx xx xx xx ***"
echo " *** xx xx xx xxxxxxxx xxxxxxxx xxxxxxx ***"
echo " *** ***"
echo " ********************************************************************"
echo " ********************************************************************"
echo ""
echo ""
echo ""
while true ; do
{
echo "Please Enter Your USER NAME :"
read name
file_name="$name.txt"
if [ −f "$file_name" ] ; then
echo −e "$file_name exist !! "
else
echo −e "Your File doesn't exist ! Do You Want To Create It ? Enter (y)
es or (n)o : "
read answer
fi
if [ "$answer" == "y" ] ; then
touch $file_name;
echo "$name" >> username.list;
echo −e "Your File Is Created ";
fi
echo −e "Choose option :"
echo −e "1 . Edit The File ."
echo −e "2 . Print Present Users."
echo −e "3 . End The Program ."
read answer2
case $answer2 in
1 ) vim $file_name;
;;
2 ) less username.list;
;;
3 ) exit 0;
;;
esac # −−− end of case −−−
echo −e "Do You Want to encrypt it ? Enter (y)es or (n)o :"
read answer3
if [ "$answer3" == "y" ] ; then
echo −e "Please Enter passphrase : " ;
read passphrase ;
mcrypt $file_name −k $passphrase ;
rm $file_name;
fi
echo −e "Do You Want to Decrypt Your File ? Enter (y)es or (n)o :"
read answer4
if [ "$answer4" == "y" ]; then
echo −e "Enter passphrase";
file_name_d="$file_name.nc"
read passphrase_d;
mcrypt −k $passphrase_d −d $file_name_d;
fi
}
done
| true |
b25af7582005969ab374011c91aae301b267c648 | Shell | gmajay/shell | /scripts/test.sh | UTF-8 | 91 | 3.3125 | 3 | [] | no_license | j=1
while [ $j -le 10 ]
do
echo -n "$j "
j=$(( j + 1 )) # increase number by 1
done
| true |
9c67ffb8d67a717feca4efd2971761662c7bf4ea | Shell | cndark/server-1 | /update_confs.sh | UTF-8 | 1,643 | 3.75 | 4 | [] | no_license | #!/bin/bash
read -p "continue to update confs? [Yes/No]" x
[ "$x" != "Yes" ] && exit 1
# -------------------------------------
set -e
source ./env
# retrive code rev
echo "retrieving code version ..."
branch_code=$(git branch | grep \* | awk '{print $2}')
# read branch config
branch_data_var=${branch_code}_branch_data
branch_bat_var=${branch_code}_branch_bat
branch_data=${!branch_data_var}
branch_bat=${!branch_bat_var}
[ ! $branch_data ] && branch_data=$branch_code
[ ! $branch_bat ] && branch_bat=$branch_code
echo " data branch: $branch_data"
echo " bat branch: $branch_bat"
# update gamedata
echo "updating gamedata ..."
rm src/game/app/gamedata/data -rf
rm src/game/app/gamedata/filter -rf
svn export --force "${SVN_GAMEDATA_URL}_${branch_data}/go" src/game/app/gamedata > /dev/null
svn export --force "${SVN_GAMEDATA_URL}_${branch_data}/filter" src/game/app/gamedata/filter > /dev/null
REV_GAMEDATA=$(svn export --force "${SVN_GAMEDATA_URL}_${branch_data}/json" src/game/app/gamedata/data|tail -1)
[[ "$REV_GAMEDATA" != "Exported revision"* ]] && exit 1
echo " gamedata: $REV_GAMEDATA"
# update calcbattle
echo "updating bat code ..."
rm src/bat/calcbattle -rf
REV_CALCBATTLE=$(svn export --force "${SVN_CALCBATTLE_URL}/${branch_bat}/bat/calcbattle" src/bat/calcbattle|tail -1)
[[ "$REV_CALCBATTLE" != "Exported revision"* ]] && exit 1
echo " calcbattle: $REV_CALCBATTLE"
# write version file
echo "preparing version file ..."
cat > .ver/VER_${branch_code} << EOF
gamedata:
$branch_data
$REV_GAMEDATA
calcbattle:
$branch_bat
$REV_CALCBATTLE
EOF
# done
echo -e "\033[32mDone.\033[0m"
| true |
fa3579208bed383edae5274864c3e872a3805b56 | Shell | danealexander/dotfiles | /bin/validate | UTF-8 | 683 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# validate
#
# Validate folder
dir=$1
if [[ ! -f "~/vnu.jar" ]]; then
echo "Missing ~/vnu.jar"
fi
if [[ -n "$dir" ]]; then
cd "$dir"
else
echo "Missing argument for folder to validate"
exit 1
fi
out=validation-report.md
echo $(pwd)/$out
for file in $(find . -type f -name '*.html'); do
# @todo Use $(basename $a) to get filename
# Loop throuh array of exclude files
# htmltest.html
filename=$(basename "$file")
if [ $filename = htmltest.html ] ||
[ $filename = html-test.html ]; then
continue
fi
echo "### $file" >> $out;
java -jar ~/vnu.jar --skip-non-html --errors-only --format text $file 2>> $out
echo "" >> $out;
echo "" >> $out;
done | true |
47e199b0ba18763028ae8423951707e8a087e090 | Shell | Open-Source-Community/HackerRank-Linux-Shell-Solutions | /Concatenate-an-array-with-itself.sh | UTF-8 | 71 | 2.609375 | 3 | [] | no_license | d=""
readarray a
for t in ${a[@]}
do
d+=" $t"
done
echo $d $d $d
| true |
312cb0c59228dac6282cd9a1cc8c039b84f73440 | Shell | ds2/docker-templates | /cicd/jmeter/runTest.sh | UTF-8 | 306 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
JM_RESULTS=/jmeter-results
JM_LOGS=/jmeter-logs
JM_SRC=/jmeter-src
# JM_PROPS must be given by ENV
MYCMD="jmeter -n -t ${JM_SRC}/$JMETER_FILENAME -l ${JM_RESULTS}/sample-results.jtl -j ${JM_LOGS}/jmeter.log -e -o ${JM_RESULTS} ${JM_PROPS}"
echo "Will run now: $MYCMD"
eval "$MYCMD"
| true |
6639de1c0b3b8f96c46d7052479aca47bb458133 | Shell | flifloo/TelegramEDT | /update.sh | UTF-8 | 452 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ -f edt.db ]; then
if [ ! -d alembic ]; then
alembic init alembic
sed -i '/sqlalchemy.url/s/= .*/= sqlite:\/\/\/edt.db/' alembic.ini
sed -i "/target_metadata = None/s/target_metadata.*/import os, sys\nsys.path.append(os.getcwd())\nfrom TelegramEDT import Base\ntarget_metadata = Base.metadata/" alembic/env.py
fi
alembic revision --autogenerate -m "Auto upgrade"
alembic upgrade head
else
echo "No database !"
fi
| true |
a8669150f5766f525547c22ebb013876bfb22474 | Shell | lcostea/snyk | /test/smoke/spec/snyk_code_spec.sh | UTF-8 | 1,128 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #shellcheck shell=sh
Describe "Snyk Code test command"
Before snyk_login
After snyk_logout
Describe "snyk code test"
run_test_in_subfolder() {
cd ../fixtures/sast/shallow_sast_webgoat || return
snyk code test . --org=snyk-cli-smoke-test-with-snykcode
}
It "finds vulns in a project in the same folder"
When run run_test_in_subfolder
The output should include "Static code analysis"
The output should include "✗ [High] SQL Injection"
The status should be failure
The stderr should equal ""
End
End
Describe "code test with SARIF output"
It "outputs a valid SARIF with vulns"
When run snyk code test ../fixtures/sast/shallow_sast_webgoat --sarif --org=snyk-cli-smoke-test-with-snykcode
The status should be failure # issues found
The output should include '"$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"'
The output should include '"name": "SnykCode"'
The stderr should equal ""
The result of function check_valid_json should be success
End
End
End | true |
b77481d85050e13b37fad7d74e55f23f63bc8d06 | Shell | vikcron12/shell_startup | /copy_to_home.sh | UTF-8 | 238 | 3.953125 | 4 | [] | no_license | #!/bin/bash
#Decsription: copy all files to home dir
if [[ $# -lt 1 ]]; then
echo -n "Please provide file names to be coped to home dir .. :"
read file_names
else
file_names=$@
fi
for file in ${file_names}
do
echo ${file}
done
| true |
17401a5ef2dac879665f3fa1ee6765b753b1d955 | Shell | srod/dotfiles | /config/general/.bashrc | UTF-8 | 1,956 | 3.9375 | 4 | [
"MIT"
] | permissive |
#######################################################################
# ~/.bashrc - Custom PS1 Prompt for Bash Shell #
#######################################################################
# I don't much use Bash me, prefer ZSH of Fish... #
# When that's not availible, let's at least make things look half ok #
#######################################################################
# Originated from: https://github.com/Lissy93/minimal-terminal-prompt #
# Licensed under MIT (C) Alicia Sykes 2022 <https://aliciasykes.com> #
#######################################################################
## Define all the colors
COL_USER_HOST='\e[35m' # The color of 'user@host.ext'
COL_CURSOR='\e[35m' # The color of the trailing cursor arrow
COL_CURRENT_PATH='\e[37m' # The color of the current directory full path
COL_GIT_STATUS_CLEAN='\e[93m' # Color of fresh git branch name, with NO changes
COL_GIT_STATUS_CHANGES='\e[92m' # Color of git branch, affter its diverged from remote
## Text Styles
RESET='\e[0m' # What color will comand outputs be in
BOLD='\e[1m' # BOLD
## Config
SHOW_GIT=true
## If this is a valid git repo, echo the current branch name
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
## Echos what color the git branch should be (depending on changes)
check_for_git_changes() {
if [[ "$(parse_git_branch)" ]]; then
if [[ $(git status --porcelain) ]]; then
echo ${COL_GIT_STATUS_CLEAN}
else
echo ${COL_GIT_STATUS_CHANGES}
fi
fi
}
## Build-up what will be the final PS1 string
set_bash_prompt(){
PS1="${RESET}"
PS1+="${BOLD}${COL_USER_HOST}\u @ \h ${RESET}${COL_CURRENT_PATH}\w "
if [ "$SHOW_GIT" = true ] ; then
PS1+="$(check_for_git_changes)"
PS1+="$(parse_git_branch)"
fi
PS1+="\n${COL_CURSOR}└─▶ "
PS1+="${RESET}"
}
## Done, now just set the PS1 prompt :)
PROMPT_COMMAND=set_bash_prompt
| true |
3d3218f743353756cc38eed8ca18dcad4f3219a5 | Shell | FauxFaux/debian-control | /d/dq/dqcache-run_20161210-1_all/postrm | UTF-8 | 184 | 2.578125 | 3 | [] | no_license | #!/bin/sh -e
set -e
if [ x"$1" != "xpurge" ]; then
exit 0
fi
rm -rf /var/log/dqcache /etc/dqcache /var/lib/dqcache
deluser --force dqcache || :
deluser --force dqcachelog || :
| true |
468d861e906667352d9904a0344ceed74a60e842 | Shell | praveen-prakash/mixpanel-unity | /native/build/install_dependencies.sh | UTF-8 | 1,031 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | # Update homebrew
brew doctor
brew update
# Install dependencies
brew install swig
brew install cmake
brew install ant
brew install maven
brew install gradle
# Install casks
brew tap caskroom/cask
brew cask install intel-haxm
brew cask install android-sdk
brew cask install ./android-ndk-unity.rb
brew cask install android-studio
brew cask install unity
# Update Android SDK
android update sdk --no-ui
export ANT_HOME=/usr/local/opt/ant
export MAVEN_HOME=/usr/local/opt/maven
export GRADLE_HOME=/usr/local/opt/gradle
export ANDROID_SDK=/usr/local/share/android-sdk
export ANDROID_NDK=/usr/local/share/android-ndk-unity
echo "Add these environment variables to your .bashrc / .zshrc:"
echo "export ANT_HOME=/usr/local/opt/ant"
echo "export MAVEN_HOME=/usr/local/opt/maven"
echo "export GRADLE_HOME=/usr/local/opt/gradle"
echo "export ANDROID_SDK=/usr/local/share/android-sdk"
echo "export ANDROID_NDK=/usr/local/share/android-ndk-unity"
echo "These environment variables have already been exported to this terminal session."
| true |
42735dd11e73b10253a3a6000defb03f32893da1 | Shell | dsorrentino/director-tools | /functions/undercloud/remote_undercloud_deploy.sh | UTF-8 | 1,046 | 3.5625 | 4 | [] | no_license | #!/bin/bash
SCRIPT_NAME=remote_undercloud_deploy
source ~/common.sh
LOG="${SCRIPT_NAME}.log"
stdout ""
stdout "${SCRIPT_NAME} start"
stdout ""
SUDO=''
if [[ "$(whoami)" != "root" ]]
then
stdout "WARNING: Expected this to be run as root."
if [[ "$(sudo whoami)" != "root" ]]
then
stderr 'Terminating deployment.'
exit 1
else
stdout "Verified user has sudo capabilities. Will use sudo as needed."
SUDO="sudo"
fi
fi
if [[ ! -f ~/undercloud.conf ]]
then
stderr "No undercloud.conf found. Exiting."
exit 500
fi
stdout "Starting deployment."
openstack undercloud install | tee -a ${LOG}
if [[ ! -z "$(grep keystone /etc/passwd)" ]]
then
stdout "Adding keystone cronjob per: https://access.redhat.com/solutions/968883"
${SUDO} crontab -l -u keystone 2>/dev/null; echo '01 * * * * /usr/bin/keystone-manage token_flush >/dev/null 2>&1' | ${SUDO} crontab -u keystone -
if [[ $? -ne 0 ]]
then
stderr "Something went wrong with adding the cronjob."
fi
fi
stdout ""
stdout "${SCRIPT_NAME} end"
stdout ""
| true |
c1aba3be7353f14134f04a11b93bc8c0cb86f216 | Shell | liveplant/liveplant-server | /fmt-check | UTF-8 | 314 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
gofiles=$(ls *.go)
[ -z "$gofiles" ] && exit 0
unformatted=$(gofmt -l $gofiles)
[ -z "$unformatted" ] && exit 0
# Some files are not gofmt'd. Print message and fail.
echo >&2 "Go files must be formatted with gofmt. Please run:"
for fn in $unformatted; do
echo >&2 " gofmt -w $PWD/$fn"
done
exit 1
| true |
d6268293d61f85e3650b630adf22f2ffd3e2d14f | Shell | limyj0708/bankruptcy_prediction | /01_Data_Crawling/News_Crawling/shell script/upload_one_file_to_instances.sh | UTF-8 | 1,076 | 3.046875 | 3 | [] | no_license | # 파일 하나를 gcloud compute instance들에 한 번에 올리는 스크립트.
# 여기서는 크롤링 코드 py파일을 업로드 했다.
#!/bin/sh
start_num=1
last_num=32
((instance_num="${start_num}"))
machine_prefix=" cw-machine-"
scp_upload_prefix="gcloud compute scp "
zone_prefix=" --zone"
name_tosend="SeoulK/SeoulK_crawl_body.py"
remote_dir=":~/SeoulK"
while (("${instance_num}"<=last_num)); do
if (("${instance_num}"<=(start_num+7) )); then
zone=" us-east1-b"
elif (( "${instance_num}">=(start_num+8) && "${instance_num}"<=(start_num+15) )); then
zone=" us-west1-a"
elif (( "${instance_num}">=(start_num+16) && "${instance_num}"<=(start_num+23) )); then
zone=" us-central1-a"
else
zone=" northamerica-northeast1-a"
fi
machine_name=$machine_prefix$instance_num
echo $scp_upload_prefix$name_tosend$zone_prefix$zone$machine_name$remote_dir
$scp_upload_prefix$name_tosend$zone_prefix$zone$machine_name$remote_dir &
((instance_num="${instance_num}"+1))
done
wait
echo "All processes are done!" | true |
4f584f3629aee89dfa117683fffdeac8ac9dd397 | Shell | anjn/vhls-riscv | /vhls-check | UTF-8 | 22,902 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
# This script was generated by bashly (https://github.com/DannyBen/bashly)
# Modifying it manually is not recommended
# :command.root_command
root_command() {
# :src/root_command.sh
#inspect_args
#set -x
# Get arguments and flags
source_dir=${args[source_dir]}
output=${args[--output]}
work_dir=${args[--work-dir]}
csim_only=${args[--csim-only]}
hls_only=${args[--hls-only]}
cosim_only=${args[--cosim-only]}
synthesis=${args[--synthesis]}
verbose=${args[--verbose]}
docker=${args[--docker]}
force=${args[--force]}
log_dir=${args[--log-dir]}
log_limit=${args[--log-limit]}
progress=${args[--progress]}
set -ue
source_dir=$(readlink -f $source_dir)
work_dir=$(readlink -f $work_dir)
self=$(readlink -f $BASH_SOURCE)
self_base=$(basename $self)
self_dir=$(dirname $self)
logout=/dev/null
if [[ $verbose ]] ; then
logout=/dev/stdout
fi
# Argument check
if [[ ! -e $source_dir ]] ; then
red_bold Error: "Source directory '$source_dir' doesn't exist!"
exit 1
fi
if [[ $output ]] ; then
output=$(readlink -f $output)
if [[ $force ]] ; then
echo > $force
elif [[ -e $output ]] ; then
red_bold Error: "Output file '$output' already exists!"
exit 1
fi
fi
if [[ $log_dir ]] ; then
log_dir=$(readlink -f $log_dir)
fi
if [[ ! $force ]] && [[ -e $work_dir ]] ; then
red_bold Error: "Working directory '$work_dir' already exists!"
exit 1
fi
# Docker
if [[ $docker ]] ; then
repo=$self_base
tag=latest
# Build docker
#docker build -t $repo:$tag $self_dir
build_docker_image -t $repo:$tag
# Run docker
run_opts=
if [[ $output ]] ; then
touch $output
run_opts="$run_opts -v $output:/tmp/output.txt:rw"
fi
if [[ $log_dir ]] ; then
mkdir $log_dir
run_opts="$run_opts -v $log_dir:/tmp/log:rw"
fi
cmd_opts=
if [[ $synthesis ]] ; then
cmd_opts="$cmd_opts --synthesis"
fi
if [[ $log_limit ]] ; then
cmd_opts="$cmd_opts --log-limit $log_limit"
fi
if [[ $progress ]] ; then
cmd_opts="$cmd_opts --progress"
fi
docker run \
--rm \
-v /tools/Xilinx:/tools/Xilinx:ro \
-v $self:/tmp/$self_base:ro \
-v $source_dir:/tmp/source:ro \
--cpus=1 \
--memory 16g \
--env LIBRARY_PATH=/usr/lib/x86_64-linux-gnu \
$run_opts \
$repo:$tag \
/tmp/$self_base --force --output /tmp/output.txt --log-dir /tmp/log $cmd_opts /tmp/source
exit
fi
# Source files
regulation=$source_dir/regulation.txt
tb=$source_dir/tb.cpp
kernel_source=$source_dir/kernel.cpp
kernel_header=$source_dir/kernel.hpp
# Argument check
if [ ! -e $regulation ] ; then
red_bold "Error: Regulation file ($regulation) doesn't exist!"
exit 1
fi
if [ ! -e $tb ] ; then
red_bold "Error: Testbench ($tb) doesn't exist!"
exit 1
fi
if [ ! -e $kernel_source ] ; then
red_bold "Error: Kernel source ($kernel_source) doesn't exist!"
exit 1
fi
if [ ! -e $kernel_header ] ; then
red_bold "Error: Kernel header ($kernel_header) doesn't exist!"
exit 1
fi
# Load regulation
target_clock_period_ns=10
flow=vitis
vitis_version=2021.1
csim_timeout=1m
hls_timeout=5m
cosim_timeout=5m
syn_timeout=30m
cxxflags=
ldflags=
extra_files=
source $regulation
if [[ $extra_files ]] ; then
extra_files=$(readlink -f $extra_files)
fi
# Tool setup
source /tools/Xilinx/Vitis/$vitis_version/settings64.sh
# AR# 69355
if [[ -e /usr/lib/x86_64-linux-gnu ]] ; then
if [[ -z ${LIBRARY_PATH+x} ]] ; then
export LIBRARY_PATH=/usr/lib/x86_64-linux-gnu
else
export LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LIBRARY_PATH
fi
fi
# Create working directory
mkdir -p $work_dir
# Check
if [[ $csim_only ]] ; then
check_csim
exit
fi
if [[ $hls_only ]] ; then
check_hls
exit
fi
if [[ $cosim_only ]] ; then
check_hls
check_cosim
exit
fi
check_bytes
check_csim
check_hls
check_cosim
if [[ $synthesis ]] ; then
check_syn
fi
get_qor
get_sim_time
output_summary normal_exit=1
}
# :command.version_command
version_command() {
echo "$version"
}
# :command.usage
vhls_check_usage() {
if [[ -n $long_usage ]]; then
printf "vhls-check - Vitis HLS checker\n"
echo
else
printf "vhls-check - Vitis HLS checker\n"
echo
fi
printf "Usage:\n"
printf " vhls-check [SOURCE_DIR] [options]\n"
printf " vhls-check --help | -h\n"
printf " vhls-check --version\n"
echo
if [[ -n $long_usage ]]; then
printf "Options:\n"
# :command.usage_fixed_flags
echo " --help, -h"
printf " Show this help\n"
echo
echo " --version"
printf " Show version number\n"
echo
# :command.usage_flags
# :flag.usage
echo " --output, -o OUTPUT"
printf " Output summary to the specified file\n"
echo
# :flag.usage
echo " --work-dir, -w WORK_DIR"
printf " Working directory\n"
printf " Default: ./work\n"
echo
# :flag.usage
echo " --csim-only"
printf " Run csim\n"
echo
# :flag.usage
echo " --hls-only"
printf " Run hls\n"
echo
# :flag.usage
echo " --cosim-only"
printf " Run hls and cosim\n"
echo
# :flag.usage
echo " --synthesis, -s"
printf " Run synthesis for resource/timing estimation\n"
echo
# :flag.usage
echo " --verbose, -v"
printf " Verbose log output\n"
echo
# :flag.usage
echo " --docker, -d"
printf " Run in docker container environment\n"
echo
# :flag.usage
echo " --force, -f"
printf " Force output even if files or directories already exist\n"
echo
# :flag.usage
echo " --log-dir, -l LOG_DIR"
printf " Log output directory\n"
echo
# :flag.usage
echo " --log-limit LOG_LIMIT"
printf " Limit log output in bytes\n"
echo
# :flag.usage
echo " --progress"
printf " Show progress\n"
echo
# :command.usage_args
printf "Arguments:\n"
# :argument.usage
echo " SOURCE_DIR"
printf " Target source directory\n"
printf " Default: ./\n"
echo
fi
}
# :command.inspect_args
inspect_args() {
readarray -t sorted_keys < <(printf '%s\n' "${!args[@]}" | sort)
if (( ${#args[@]} )); then
echo args:
for k in "${sorted_keys[@]}"; do echo "- \${args[$k]} = ${args[$k]}"; done
else
echo args: none
fi
}
# :command.user_lib
# :src/lib/utils.sh
output_summary() {
if [[ $output ]] ; then
echo "$*" >> $output
fi
}
copy_log() {
if [[ $log_dir ]] ; then
if [[ $log_limit ]] ; then
head -c $log_limit "$1" > $log_dir/$(basename "$1")
else
cp "$1" $log_dir/$(basename "$1")
fi
fi
}
bold_nnl() { printf "\e[1m%b\e[0m" "$*"; }
print_result() {
bold_nnl "$1"; shift
echo "$*"
}
print_pass() {
bold_nnl "$1"; shift
green "$*"
}
print_fail() {
bold_nnl "$1"; shift
red "$*"
}
print_progress() {
if [[ $progress ]] ; then
echo testing_$1
fi
}
check_bytes() {
local bytes
bytes=$(cat $kernel_header $kernel_source | wc -c | awk '{print $1}')
output_summary bytes=$bytes
print_result "Bytes of kernel code: " $bytes
}
# :src/lib/colors.sh
# ---
# Color functions
# This file is a part of Bashly standard library
#
# Usage:
# Use any of the functions below to color or format a portion of a string.
#
# echo "before $(red this is red) after"
# echo "before $(green_bold this is green_bold) after"
#
# ---
red() { printf "\e[31m%b\e[0m\n" "$*"; }
green() { printf "\e[32m%b\e[0m\n" "$*"; }
yellow() { printf "\e[33m%b\e[0m\n" "$*"; }
blue() { printf "\e[34m%b\e[0m\n" "$*"; }
magenta() { printf "\e[35m%b\e[0m\n" "$*"; }
cyan() { printf "\e[36m%b\e[0m\n" "$*"; }
bold() { printf "\e[1m%b\e[0m\n" "$*"; }
underlined() { printf "\e[4m%b\e[0m\n" "$*"; }
red_bold() { printf "\e[1;31m%b\e[0m\n" "$*"; }
green_bold() { printf "\e[1;32m%b\e[0m\n" "$*"; }
yellow_bold() { printf "\e[1;33m%b\e[0m\n" "$*"; }
blue_bold() { printf "\e[1;34m%b\e[0m\n" "$*"; }
magenta_bold() { printf "\e[1;35m%b\e[0m\n" "$*"; }
cyan_bold() { printf "\e[1;36m%b\e[0m\n" "$*"; }
red_underlined() { printf "\e[4;31m%b\e[0m\n" "$*"; }
green_underlined() { printf "\e[4;32m%b\e[0m\n" "$*"; }
yellow_underlined() { printf "\e[4;33m%b\e[0m\n" "$*"; }
blue_underlined() { printf "\e[4;34m%b\e[0m\n" "$*"; }
magenta_underlined() { printf "\e[4;35m%b\e[0m\n" "$*"; }
cyan_underlined() { printf "\e[4;36m%b\e[0m\n" "$*"; }
# :src/lib/docker.sh
build_docker_image() {
local dockerfile=$(mktemp)
cat << 'EOS' > $dockerfile
FROM ubuntu:18.04
SHELL ["/bin/bash", "-c"]
ENV DEBIAN_FRONTEND=noninteractive
# Timezone
ENV TZ=Asia/Tokyo
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
# Install packages
RUN apt-get update -y && apt-get install -y \
build-essential \
bzip2 \
libc6-i386 \
libc6-dev-i386 \
gcc-multilib \
g++-multilib \
git \
libfontconfig1 \
libglib2.0-0 \
sudo \
nano \
locales \
libxext6 \
libxrender1 \
libxtst6 \
libgtk2.0-0 \
build-essential \
unzip \
ruby \
ruby-dev \
pkg-config \
libprotobuf-dev \
protobuf-compiler \
python-protobuf \
python-pip \
bc \
time \
libboost-dev \
&& apt autoclean -y \
&& apt autoremove -y \
&& rm -rf /var/lib/apt/lists/*
# Locale
RUN locale-gen en_US.UTF-8
EOS
docker build $* -f $dockerfile .
rm $dockerfile
}
# :src/lib/get_result.sh
######################################
# Get QoR
######################################
get_qor() {
if [[ $synthesis ]] ; then
local report_xml=$work_dir/prj_hls_cosim/solution/impl/report/verilog/kernel_export.xml
if [[ $vitis_version == "2021.1" ]] ; then
report_xml=$work_dir/prj_hls_cosim/solution/impl/report/verilog/export_syn.xml
fi
# Get resource/timing estimation from synthesis report
ruby << EOS >> $work_dir/qor.txt
require 'rexml/document'
doc = REXML::Document.new(File.new("$report_xml"))
puts "ff=#{doc.elements['profile/AreaReport/Resources/FF'].text}"
puts "lut=#{doc.elements['profile/AreaReport/Resources/LUT'].text}"
puts "dsp=#{doc.elements['profile/AreaReport/Resources/DSP'].text}"
puts "bram=#{doc.elements['profile/AreaReport/Resources/BRAM'].text}"
puts "uram=#{doc.elements['profile/AreaReport/Resources/URAM'].text}"
puts "clock_period=#{doc.elements['profile/TimingReport/AchievedClockPeriod'].text}"
EOS
else
# Get resource/timing estimation from hls report
ruby << EOS >> $work_dir/qor.txt
require 'rexml/document'
doc = REXML::Document.new(File.new("$work_dir/prj_hls_cosim/solution/syn/report/csynth.xml"))
puts "ff=#{doc.elements['profile/AreaEstimates/Resources/FF'].text}"
puts "lut=#{doc.elements['profile/AreaEstimates/Resources/LUT'].text}"
puts "dsp=#{doc.elements['profile/AreaEstimates/Resources/DSP'].text}"
puts "bram=#{doc.elements['profile/AreaEstimates/Resources/BRAM_18K'].text}"
puts "uram=#{doc.elements['profile/AreaEstimates/Resources/URAM'].text}"
puts "clock_period=#{doc.elements['profile/PerformanceEstimates/SummaryOfTimingAnalysis/EstimatedClockPeriod'].text}"
EOS
fi
#eval $(grep clock_period $work_dir/qor.txt)
eval $(cat $work_dir/qor.txt)
if [[ $output ]] ; then
cat $work_dir/qor.txt >> $output
fi
print_result "Resource usage"
print_result " FF : " $ff
print_result " LUT : " $lut
print_result " DSP : " $dsp
print_result " BRAM : " $bram
print_result " URAM : " $uram
print_result "Clock period (ns): " $clock_period
print_result "Clock frequency (MHz): " $(echo "1000/$clock_period" | bc)
}
######################################
# Get simulation time
get_sim_time() {
#sim_start=$(grep -e '^// RTL Simulation .* \[0\.00%\]' $work_dir/vitis_hls.log | awk -F @ '{print $2}' | sed 's/[^0-9]//g')
#sim_end=$(grep -e '^// RTL Simulation .* \[100\.00%\]' $work_dir/vitis_hls.log | awk -F @ '{print $2}' | sed 's/[^0-9]//g')
local sim_start=$(grep --text -e '^// RTL Simulation .* @ "[0-9]*"' $work_dir/cosim.log | head -n 1 | awk -F @ '{print $2}' | sed 's/[^0-9]//g')
local sim_end=$(grep --text -e '^// RTL Simulation .* @ "[0-9]*"' $work_dir/cosim.log | tail -n 1 | awk -F @ '{print $2}' | sed 's/[^0-9]//g')
local sim_time=$(echo "($sim_end-$sim_start)/1000" | bc)
local sim_cycle=$(echo $sim_time/$target_clock_period_ns | bc)
local sim_time=$(echo $sim_cycle*$clock_period | bc)
output_summary sim_cycle=$sim_cycle
output_summary sim_time=$sim_time
print_result "Simulation cycle: " $sim_cycle
print_result "Simulation time (ns): " $sim_time
}
# :src/lib/csim_checker.sh
######################################
# Check csim
######################################
check_csim() {
print_progress csim
pushd $work_dir > /dev/null
local csim_cxxflags="$cxxflags -I$source_dir"
local csim_result=
local csim_fail=1
local csim_timeout_error=0
local csim_compile_error=0
local csim_runtime_error=0
local csim_error=0
local csim_mismatch=0
cat << EOS > csim.tcl
open_project -reset prj_csim
add_files -cflags "$csim_cxxflags" $kernel_source
add_files -cflags "$csim_cxxflags -DCSIM" -tb $tb
EOS
if [[ $extra_files ]] ; then
echo "add_files \"$extra_files\"" >> csim.tcl
fi
cat << EOS >> csim.tcl
set_top kernel
open_solution -flow_target $flow solution
set_part xcu200-fsgd2104-2-e
create_clock -period ${target_clock_period_ns}ns -name default
csim_design -ldflags "$ldflags"
exit
EOS
set +e
timeout $csim_timeout time vitis_hls -f csim.tcl |& tee csim.log > $logout
local exit_code=${PIPESTATUS[0]}
set -e
if [ $exit_code -eq 124 ] ; then
csim_timeout_error=1
csim_result="Timeout ($csim_timeout)"
elif grep --text -e "^ERROR:" $work_dir/vitis_hls.log | grep "compilation error" > /dev/null ; then
csim_compile_error=1
csim_result="Compile error, see log file: $work_dir/csim.log"
elif grep --text -e "^ERROR:" $work_dir/vitis_hls.log | grep "CSim failed with errors" > /dev/null ; then
csim_runtime_error=1
csim_result="Runtime error, see log file: $work_dir/csim.log"
elif grep --text -e "^ERROR:" $work_dir/vitis_hls.log | grep "nonzero return value" > /dev/null ; then
csim_mismatch=1
csim_result="Mismatch"
elif [ $exit_code -ne 0 ] ; then
csim_error=1
csim_result="Unknown error (exit code: $exit_code), see log file: $work_dir/csim.log"
else
csim_fail=0
csim_result="Pass"
fi
output_summary csim_fail=$csim_fail
output_summary csim_timeout=$csim_timeout_error
output_summary csim_compile_error=$csim_compile_error
output_summary csim_runtime_error=$csim_runtime_error
output_summary csim_error=$csim_error
output_summary csim_mismatch=$csim_mismatch
copy_log csim.log
if [ $csim_fail -ne 0 ] ; then
print_fail "CSim: " $csim_result
exit
else
print_pass "CSim: " $csim_result
fi
popd > /dev/null
}
# :src/lib/syn_checker.sh
######################################
# Check syn
######################################
check_syn() {
print_progress syn
pushd $work_dir > /dev/null
local syn_fail=1
local syn_timeout_error=0
cat << EOS > syn.tcl
open_project prj_hls_cosim
open_solution -flow_target $flow solution
export_design -flow syn -rtl verilog -format ip_catalog
exit
EOS
set +e
timeout $syn_timeout time vitis_hls -f syn.tcl |& tee syn.log > $logout
exit_code=${PIPESTATUS[0]}
set -e
if [ $exit_code -eq 124 ] ; then
syn_timeout_error=1
syn_result="Timeout ($syn_timeout)"
else
syn_fail=0
syn_result=Pass
fi
output_summary syn_fail=$syn_fail
output_summary syn_timeout=$syn_timeout_error
#copy_log syn.log
if [ $syn_fail -ne 0 ] ; then
print_fail "Syn: " $syn_result
exit
else
print_pass "Syn: " $syn_result
fi
popd > /dev/null
}
# :src/lib/cosim_checker.sh
######################################
# Check cosim
######################################
check_cosim() {
print_progress cosim
pushd $work_dir > /dev/null
local cosim_cxxflags="$cxxflags -I$source_dir"
local cosim_result=
local cosim_fail=1
local cosim_timeout_error=0
local cosim_error=0
local cosim_mismatch=0
cat << EOS > cosim.tcl
open_project prj_hls_cosim
add_files -cflags "$cosim_cxxflags -DCOSIM" -tb $tb
EOS
if [[ $extra_files ]] ; then
echo "add_files \"$extra_files\"" >> cosim.tcl
fi
cat << EOS >> cosim.tcl
open_solution -flow_target $flow solution
cosim_design -ldflags "$ldflags"
exit
EOS
set +e
timeout $cosim_timeout time vitis_hls -f cosim.tcl |& tee cosim.log > $logout
exit_code=${PIPESTATUS[0]}
set -e
if [ $exit_code -eq 124 ] ; then
cosim_timeout_error=1
cosim_result="Timeout ($cosim_timeout)"
elif grep --text -e "^ERROR:" $work_dir/vitis_hls.log | grep "nonzero return value" > /dev/null ; then
cosim_mismatch=1
cosim_result="Mismatch"
elif grep --text -e "^ERROR:" $work_dir/vitis_hls.log > /dev/null ; then
cosim_error=1
cosim_result="Error, see log file: $work_dir/cosim.log"
elif [ $exit_code -ne 0 ] ; then
cosim_error=1
cosim_result="Unknown error, see log file: $work_dir/cosim.log"
else
cosim_fail=0
cosim_result="Pass"
fi
output_summary cosim_fail=$cosim_fail
output_summary cosim_timeout=$cosim_timeout_error
output_summary cosim_error=$cosim_error
output_summary cosim_mismatch=$cosim_mismatch
copy_log cosim.log
if [ $cosim_fail -ne 0 ] ; then
print_fail "CoSim: " $cosim_result
exit
else
print_pass "CoSim: " $cosim_result
fi
popd > /dev/null
}
# :src/lib/hls_checker.sh
######################################
# Check hls
######################################
check_hls() {
print_progress hls
pushd $work_dir > /dev/null
local hls_cxxflags="$cxxflags -I$source_dir"
local hls_result=
local hls_fail=1
local hls_timeout_error=0
local hls_error=0
cat << EOS > hls.tcl
open_project -reset prj_hls_cosim
add_files -cflags "$hls_cxxflags" $kernel_source
set_top kernel
open_solution -flow_target $flow solution
set_part xcu200-fsgd2104-2-e
create_clock -period ${target_clock_period_ns}ns -name default
csynth_design
exit
EOS
set +e
timeout $hls_timeout time vitis_hls -f hls.tcl |& tee hls.log > $logout
exit_code=${PIPESTATUS[0]}
set -e
if [ $exit_code -eq 124 ] ; then
hls_timeout_error=1
hls_result="Timeout ($hls_timeout)"
elif grep --text -e "^ERROR:" $work_dir/vitis_hls.log > /dev/null ; then
hls_error=1
hls_result="HLS error, see log file: $work_dir/hls.log"
elif [ $exit_code -ne 0 ] ; then
hls_error=1
hls_result="HLS unknown error, see log file: $work_dir/hls.log"
elif [ ! -e $work_dir/prj_hls_cosim/solution/syn/report/csynth.xml ] ; then
hls_error=1
hls_result="HLS report not found, see log file: $work_dir/hls.log"
else
hls_fail=0
hls_result=Pass
fi
output_summary hls_fail=$hls_fail
output_summary hls_timeout=$hls_timeout_error
output_summary hls_error=$hls_error
copy_log hls.log
if [ $hls_fail -ne 0 ] ; then
print_fail "HLS: " $hls_result
exit
else
print_pass "HLS: " $hls_result
fi
popd > /dev/null
}
# :command.command_functions
# :command.parse_requirements
parse_requirements() {
# :command.fixed_flag_filter
case "$1" in
--version )
version_command
exit
;;
--help | -h )
long_usage=yes
vhls_check_usage
exit 1
;;
esac
# :command.environment_variables_filter
# :command.dependencies_filter
# :command.command_filter
action="root"
# :command.required_args_filter
# :command.required_flags_filter
# :command.parse_requirements_while
while [[ $# -gt 0 ]]; do
key="$1"
case "$key" in
# :flag.case
--output | -o )
if [[ $2 ]]; then
args[--output]="$2"
shift
shift
else
printf "%s\n" "--output requires an argument: --output, -o OUTPUT"
exit 1
fi
;;
# :flag.case
--work-dir | -w )
if [[ $2 ]]; then
args[--work-dir]="$2"
shift
shift
else
printf "%s\n" "--work-dir requires an argument: --work-dir, -w WORK_DIR"
exit 1
fi
;;
# :flag.case
--csim-only )
args[--csim-only]=1
shift
;;
# :flag.case
--hls-only )
args[--hls-only]=1
shift
;;
# :flag.case
--cosim-only )
args[--cosim-only]=1
shift
;;
# :flag.case
--synthesis | -s )
args[--synthesis]=1
shift
;;
# :flag.case
--verbose | -v )
args[--verbose]=1
shift
;;
# :flag.case
--docker | -d )
args[--docker]=1
shift
;;
# :flag.case
--force | -f )
args[--force]=1
shift
;;
# :flag.case
--log-dir | -l )
if [[ $2 ]]; then
args[--log-dir]="$2"
shift
shift
else
printf "%s\n" "--log-dir requires an argument: --log-dir, -l LOG_DIR"
exit 1
fi
;;
# :flag.case
--log-limit )
if [[ $2 ]]; then
args[--log-limit]="$2"
shift
shift
else
printf "%s\n" "--log-limit requires an argument: --log-limit LOG_LIMIT"
exit 1
fi
;;
# :flag.case
--progress )
args[--progress]=1
shift
;;
-* )
printf "invalid option: %s\n" "$key"
exit 1
;;
* )
# :command.parse_requirements_case
if [[ ! ${args[source_dir]} ]]; then
args[source_dir]=$1
shift
else
printf "invalid argument: %s\n" "$key"
exit 1
fi
;;
esac
done
# :command.default_assignments
[[ -n ${args[source_dir]} ]] || args[source_dir]="./"
[[ -n ${args[--work-dir]} ]] || args[--work-dir]="./work"
}
# :command.initialize
initialize() {
version="0.1.0"
long_usage=''
set -e
# :src/initialize.sh
# Code here runs inside the initialize() function
# Use it for anything that you need to run before any other function, like
# setting environment vairables:
# CONFIG_FILE=settings.ini
#
# Feel free to empty (but not delete) this file.
}
# :command.run
run() {
declare -A args
parse_requirements "$@"
if [[ ${args[--version]} ]]; then
version_command
elif [[ ${args[--help]} ]]; then
long_usage=yes
vhls-check_usage
elif [[ $action == "root" ]]; then
root_command
fi
}
initialize
run "$@"
| true |
c7aa0d2d6cc19798be6b2529851c3cae0e3a3303 | Shell | vindalu/vindalu | /scripts/vindalu-ctl | UTF-8 | 1,445 | 3.625 | 4 | [] | no_license | #!/bin/bash
#
# chkconfig: 345 95 5
# description: Vindalu
#
NAME="vindalu"
APP_HOME="/opt/${NAME}"
CFG_FILE="${APP_HOME}/etc/${NAME}.json"
LOGFILE="${APP_HOME}/log/${NAME}.log"
BIN="${APP_HOME}/bin/${NAME}"
OPTS="-c ${CFG_FILE}"
PGREP="pgrep -f ${BIN}\s"
RETVAL=0
status() {
pids=`${PGREP} | xargs`
if [ "$pids" == "" ]; then
echo "${NAME}... [ stopped ]"
else
echo "${NAME} running... [ ${pids} ]"
fi
}
start() {
pids=`${PGREP} | xargs`
if [ "$pids" != "" ]; then
echo "${NAME} already running... [ ${pids} ]"
exit 1;
fi
nohup ${BIN} ${OPTS} > ${LOGFILE} 2>&1 &
sleep 3;
pids=`${PGREP} | xargs`
if [ "$pids" == "" ]; then
echo "${NAME}... [ failed ]"
RETVAL=1
else
echo "${NAME} running... [ ${pids} ]"
RETVAL=0
fi
}
stop() {
pids=`${PGREP} | xargs`
for pid in ${pids}; do
kill ${pid}
done
pids=`${PGREP} | xargs`
if [ "$pids" == "" ]; then
echo "${NAME}... [ stopped ]"
RETVAL=0
else
echo "${NAME} running... [ ${pids} ]"
RETVAL=1
fi
}
case $1 in
start)
start
;;
stop)
stop
;;
restart)
stop
sleep 2
start
;;
status)
status
;;
*)
echo "$0 [ start | stop | restart | status ]"
RETVAL=1
;;
esac
exit $RETVAL
| true |
a7ef835ade39de03f8c0ea256b93d8573493072e | Shell | sdettmer/config | /bin/multibuild.sh | UTF-8 | 3,858 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# This is multibuild search & exec front end.
# If invoked ANYWHERE below tisc-src/vobs or tisc-src/build, it finds multibuild.sh.
# If invoked with any filter option, it passes all options to multibuild.sh.
# If invoked in a multibuild builddir, it invokes multibuild for this builddir.
# If invoked in directory with multiple builddirs directly below, it invokes it for each.
# Cleanup accidentally created builddirs:
# cd build
# rm */multibuild.info.tmp ; rmdir *
# TODO
# - if started in src without parameters, better build all below build?
# - really good idea to start multibuild multiple times?
# - when started in build/CONFIGNAME with -n "*", it will still use
# -p -t -b and thus not build all names --> using one selector
# removes others? e.g. when -n specified, don't use -t...?
# OR auto-filter only with -n (never use -t... from multibuild.info),
# -n is unique and sufficient anyway
# - per default (no args): "update" modes
# - update all existing ("run for all below build/")
# - update the one that built latest ("rebuilt just the very last)
# - update all that were built with last call ("re-run with last args")
# - at least -d | -D should be "merged" (e.g. with -n) to allow
# things like "rebuild the last, but now with DLU files"
set -e
workingdir=$(pwd)
#echo "in $workingdir"
# If invoked with any filter option, do not try any multibuild.info.
# Note: RTC-232409 ("VERBOSE") is fully handled by multibuild itself.
tryinfo="1"
declare -a args=($@)
for arg in "${args[@]}" ; do
case $arg in
-n|-t|-b|-p) tryinfo="" ;;
esac
done
# Invoke multibuild.sh with multibuild.info as last parameter.
useinfo()
{
echo "Using $multibuildinfo"
. "${multibuildinfo}"
# very old tisc-src versions had multibuild.info without $multibuild.
[ "${multibuild}" ] || multibuild="$topsrc/vobs/tisc_ccu-c/load/bld/multibuild.sh"
${multibuild} "$@" "$multibuildinfo"
}
# If invoked in directory with multiple builddirs directly below, it invoke for each.
multimode=""
# Unfortunately, we need absolute path for multibuild.info:
for multibuildinfo in $(pwd)/*/multibuild.info ; do
if [ -e "$multibuildinfo" -a "$tryinfo" ] ; then
multimode=1
useinfo "$@"
fi
done
[ "$multimode" ] && exit 0
# Maybe we already have "vobs" as sibling (special case for tisc-src itself):
[ -d "${workingdir}/vobs" ] && workingdir="${workingdir}/vobs"
# try to find "vobs" directory in current working directory:
# remove last directory until it ends with 'vobs', e.g.
while [ ${workingdir##*vobs} ] ; do
# If we have multibuild.info (and no parameters), use its parameters
if [ -e "${workingdir}/multibuild.info" -a "$tryinfo" ] ; then
multibuildinfo="${workingdir}/multibuild.info"
useinfo "$@"
exit 0
fi
# If we have no multibuild.info (and no parameters),
# but multibuild.info.tmp, use its parameters (to avoid
# rebuilding all instead of one if canceled by user)
if [ -e "${workingdir}/multibuild.info.tmp" -a "$tryinfo" ] ; then
multibuildinfo="${workingdir}/multibuild.info.tmp"
useinfo "$@"
exit 0
fi
workingdir=${workingdir%/*}
#echo workingdir=$workingdir
# Maybe we found it as sibling:
[ -d "${workingdir}/vobs" ] && workingdir="${workingdir}/vobs"
done
if [ -x "$workingdir/twcs_wcac/load/bld/multibuild.sh" ] ; then
multibuild="$workingdir/twcs_wcac/load/bld/multibuild.sh"
elif [ -x "$workingdir/tisc_ccu-c/load/bld/multibuild.sh" ] ; then
multibuild="$workingdir/tisc_ccu-c/load/bld/multibuild.sh"
else
echo "No multibuild.sh found below $workingdir" >&2
exit 2
fi
#echo multibuild=$multibuild
# exec does the error handling here (gives e.g. "No such file or directory")
exec $multibuild "$@"
echo "Failed to execute multibuild" >&2
exit 2
# vim: et ts=4 sw=4
| true |
b63a07e4a6ffbf4deabd079b7e7032d4c78a568c | Shell | RanMax/SysProg | /Sources/sem1/lab5/02_for_test | UTF-8 | 132 | 2.578125 | 3 | [] | no_license | #!/bin/bash
for planet in Mercury Venus Earth Mars Jupiter Saturn Uranus Neptune Pluto
do
echo $planet;
echo " , ";
echo ${3}
done
| true |
27cdf3eba2b6b48a8b70c013320f58607e9c1b78 | Shell | CRImier/pautomount | /pautomount.init | UTF-8 | 1,848 | 3.84375 | 4 | [
"MIT"
] | permissive | #! /bin/sh
### BEGIN INIT INFO
# Provides: pautomount
# Required-Start: $local_fs $syslog $remote_fs
# Required-Stop: $local_fs $syslog $remote_fs
# Should-Start:
# Should-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start and stop pautomount
# Description: Fully configurable daemon for automount on hotplug
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/usr/bin/pautomount
PIDFILE=/run/pautomount.pid
NAME=pautomount
DESC="Python automount daemon"
# use lsb-base
. /lib/lsb/init-functions
# abort if no executable exists
test -x $DAEMON || exit 0
set -e
retval=0
daemon_start () {
log_daemon_msg "Starting $DESC" "$NAME"
start-stop-daemon --start -q -b --make-pidfile --pidfile $PIDFILE --exec $DAEMON
log_end_msg $retval;
}
daemon_stop () {
log_daemon_msg "Stopping $DESC" "$NAME"
if ( pidofproc $DAEMON 2>&1 > /dev/null ) ; then
/bin/kill `cat $PIDFILE`
fi
log_end_msg $retval
}
daemon_reload () {
log_daemon_msg "Reloading $DESC" "$NAME"
if ( pidofproc $DAEMON 2>&1 > /dev/null ) ; then
/bin/kill -SIGHUP $(cat $PIDFILE)
else
log_failure_msg "$NAME not started, so not reloading"
retval=1
fi
log_end_msg $retval
}
daemon_status () {
log_daemon_msg "Checking status of $DESC" "$NAME"
retval=1
if ( pidofproc $DAEMON 2>&1 > /dev/null ) ; then
retval=0
fi
log_end_msg $retval
}
case "$1" in
start)
daemon_start
;;
stop)
daemon_stop
;;
reload)
daemon_reload
;;
status)
daemon_status
;;
force-reload|restart)
daemon_stop
daemon_start
;;
*)
N=/etc/init.d/$NAME
echo "Usage: $N {start|stop|reload|force-reload|restart|status}" >&2
retval=2
;;
esac
exit $retval
| true |
9bfea95d9f43d9fd5d655ac999c7ba668e5f4308 | Shell | ecksun/verbump | /verbump | UTF-8 | 1,954 | 4.1875 | 4 | [] | no_license | #!/bin/bash
set -e
set -u
assert_dependency() {
local dep="$1"
local reason="$2"
local hint="$3"
if ! hash "$dep" 2>/dev/null; then
>&2 echo "Dependency $dep missing. $reason ($hint)"
exit 1
fi
}
assert_dependency semver "Required" "npm install -g semver"
is_sbt() {
[ -f build.sbt ]
}
bump_sbt() {
local bump_level
local version
local new_version
bump_level="$1"
version=$(grep -oP 'version := "\K([^"]+)(?=")' build.sbt)
new_version=$(semver "$version" -i "$bump_level")
sed -i 's/version := .*/version := "'"$new_version"'"/' build.sbt
git add build.sbt
commit_tag "$new_version"
echo "[SBT] Bumped $bump_level verison: $version -> $new_version"
}
is_npm() {
[ -f package.json ]
}
bump_npm() {
assert_dependency npm "npm is required to bump package.json version" "sudo apt install npm"
local bump_level="$1"
npm version "$bump_level" | cut -c 2-
}
bump_git() {
local bump_level
local version
local new_version
bump_level="$1"
version=$(git describe --abbrev=0 --always --match v[0-9]* HEAD | cut -c2-)
new_version=$(semver "$version" -i "$bump_level")
git tag --annotate --message "$new_version" "v${new_version}"
echo "[GIT] Bumped $bump_level verison: $version -> $new_version"
}
commit_tag() {
local version="$1"
git commit -m "$version"
git tag --annotate --message "$version" "v${version}"
}
get_bump_level() {
case "$1" in
patch) ;&
minor) ;&
major) ;&
preminor) ;&
premajor) ;&
prepatch) ;&
prerelease)
echo "$1"
;;
*)
echo "patch"
;;
esac
}
bump_all() {
local bump_level
bump_level=$(get_bump_level "${1}")
if is_sbt; then
bump_sbt "$bump_level"
elif is_npm; then
bump_npm "$bump_level"
else
bump_git "$bump_level"
fi
}
bump_all "${1-patch}"
| true |
482de845c261410c24a63c2a6fb4751d156874d6 | Shell | lkfo415579/GOD.util | /BPE_TOOLKIT/unk2_run.sh | UTF-8 | 1,208 | 2.609375 | 3 | [] | no_license | TERM=News
CORPUS_PATH=..
mkdir -p $TERM
python ~/GOD.util/BPE_TOOLKIT/apply_bpe_unk2.py -c $CORPUS_PATH/$TERM/en-zh/en.bpe < 34k.unk2.en-zh.en > $TERM/unk2.en-zh.b.en
python ~/GOD.util/BPE_TOOLKIT/apply_bpe_unk2.py -c $CORPUS_PATH/$TERM/en-zh/zh.bpe < 34k.unk2.en-zh.zh > $TERM/unk2.en-zh.b.zh
python ~/GOD.util/BPE_TOOLKIT/apply_bpe_unk2.py -c $CORPUS_PATH/$TERM/zh-en/en.bpe < 34k.unk2.zh-en.en > $TERM/unk2.zh-en.b.en
python ~/GOD.util/BPE_TOOLKIT/apply_bpe_unk2.py -c $CORPUS_PATH/$TERM/zh-en/zh.bpe < 34k.unk2.zh-en.zh > $TERM/unk2.zh-en.b.zh
echo "EN-zh"
cat $TERM/unk2.en-zh.b.en >> $CORPUS_PATH/$TERM/en-zh/train.$TERM.en-zh.en
cat $TERM/unk2.en-zh.b.zh >> $CORPUS_PATH/$TERM/en-zh/train.$TERM.en-zh.zh
head -n 200 $TERM/unk2.en-zh.b.en >> $CORPUS_PATH/$TERM/en-zh/valid.$TERM.en-zh.en
head -n 200 $TERM/unk2.en-zh.b.zh >> $CORPUS_PATH/$TERM/en-zh/valid.$TERM.en-zh.zh
echo "zh-EN"
cat $TERM/unk2.zh-en.b.en >> $CORPUS_PATH/$TERM/zh-en/train.$TERM.zh-en.en
cat $TERM/unk2.zh-en.b.zh >> $CORPUS_PATH/$TERM/zh-en/train.$TERM.zh-en.zh
head -n 200 $TERM/unk2.zh-en.b.en >> $CORPUS_PATH/$TERM/zh-en/valid.$TERM.zh-en.en
head -n 200 $TERM/unk2.zh-en.b.zh >> $CORPUS_PATH/$TERM/zh-en/valid.$TERM.zh-en.zh
| true |
782f7812a0698cf8cf6ffc6bdf0b16093bdd3a85 | Shell | GauthierMichon/Linux_b2 | /TP4/scripts/script_nfs.sh | UTF-8 | 993 | 2.765625 | 3 | [] | no_license | #!/bin/bash
# Config du fichier host
echo "192.168.4.11 gitea.tp4.b2 gitea" >> /etc/hosts
echo "192.168.4.12 mariadb.tp4.b2 mariadb" >> /etc/hosts
echo "192.168.4.13 nginx.tp4.b2 nginx" >> /etc/hosts
systemctl start nfs-server rpcbind
systemctl enable nfs-server rpcbind
mkdir /nfsbackup/
mkdir /nfsbackup/gitea/
mkdir /nfsbackup/mariadb/
mkdir /nfsbackup/nginx/
chmod 777 /nfsbackup/
echo -e "/nfsbackup/gitea/ 192.168.4.11(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/nfsbackup/mariadb/ 192.168.4.12(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
/nfsbackup/nginx/ 192.168.4.13(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)" > /etc/exports
# On active les services nécessaires au bon fonctionnement de nfs
firewall-cmd --permanent --add-service mountd
firewall-cmd --permanent --add-service rpc-bind
firewall-cmd --permanent --add-service nfs
firewall-cmd --reload | true |
65527c7ab0344d09b1f46d2c485c1ab20f2450ca | Shell | Loot-IT/scripts | /newuser.sh | UTF-8 | 801 | 3.8125 | 4 | [] | no_license | #!/bin/bash
username="$2"
password="$3"
creds="$username-creds.txt"
addUser()
{
if [ -z "$password" ]; then
password=$(date +%s | sha256sum | base64 | head -c 12 ; echo)
fi
echo "Hello $username, Here are your credentials to log in" > "$creds"
echo "" >> "$creds"
echo "username: $username" >> "$creds"
echo "password: $password" >> "$creds"
sudo useradd -m -s /bin/bash "$username"
echo "$username:$password" | sudo chpasswd
mail -A "$creds" -s "credentials for $username" kuzniarski.michal@gmail.com < /dev/null
rm "$creds"
sudo cp rules.txt /home/$username || exit 1
echo "user $username successfully added"
}
delUser()
{
sudo userdel -r $username
}
if [ "$1" == "add" ]; then
addUser
elif [ "$1" == "remove" ]; then
delUser
else
echo "You need to specify either [add] or [remove]"; exit 1
fi
| true |
a59214bfdd854600e2c4afb0bad53cde7ac85b27 | Shell | shintaro-uchiyama/slack-suite | /deploy.sh | UTF-8 | 240 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/zsh
if [ $# -ne 1 ]; then
echo "please set project_id argument" 1>&2
echo "ex zsh deploy.sh project-id" 1>&2
exit 1
fi
projectID=$1
gsutil cp "gs://"$projectID"-secret/slack-suite/app-engine/secret.yaml" .
gcloud app deploy
| true |
02aa576d7c202250f51555fb890301538103e50c | Shell | Tony-Reed/Witopia-connect | /progBar | UTF-8 | 544 | 3.109375 | 3 | [] | no_license | #!/bin/sh
# cutesie 'animation' while we wait
progBar()
{
pid=$1
delay=0.5
barstr='*'
while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do
printf "$barstr"
sleep $delay
done
}
sleep 30 &
progBar $!
#printf "\n"
spin()
{
pid=$1
delay=0.05
spinstr='|/-\'
tput civis
while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do
local temp=${spinstr#?}
printf " [%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
tput cnorm
}
sleep 5 &
spin $!
| true |
df0a5c6992dc34ef32b3b98da0ddb67e8da94460 | Shell | czebotar/examples | /bash/docker_quickstart.sh | UTF-8 | 254 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo "Build a container"
echo " docker build -t tag_name ."
echo ""
echo ""
echo "List built containers"
echo " docker images"
echo ""
echo ""
echo "Run a container interactively to test it:"
echo "docker run -it --rm tag_name /bin/bash"
| true |
dfcb79022d657e11ad79d240281ffe4aee9d8599 | Shell | BabaZibulya/OpenCLInterpolation | /build_win.sh | UTF-8 | 143 | 2.671875 | 3 | [] | no_license | #!/bin/bash
set -e
if [ ! -d "build" ]; then
mkdir build
fi
cd build
cmake -G 'Visual Studio 15 2017 Win64' -DCMAKE_BUILD_TYPE=Debug ..
| true |
160fb962eda12779d97c123563b7ae4cc708a962 | Shell | mrirecon/myocardial-t1-mapping | /post.sh | UTF-8 | 1,286 | 3.5 | 4 | [] | no_license | #!/bin/bash
set -e
if [ ! -e $TOOLBOX_PATH/bart ] ; then
echo "\$TOOLBOX_PATH is not set correctly!" >&2
exit 1
fi
export PATH=$TOOLBOX_PATH:$PATH
export BART_COMPAT_VERSION="v0.5.00"
usage="Usage: $0 <reco> <t1map>"
if [ $# -lt 2 ] ; then
echo "$usage" >&2
exit 1
fi
reco=$(readlink -f "$1")
t1map=$(readlink -f "$2")
if [ ! -e $reco ] ; then
echo "Input file does not exist." >&2
echo "$usage" >&2
exit 1
fi
if [ ! -e $TOOLBOX_PATH/bart ] ; then
echo "\$TOOLBOX_PATH is not set correctly!" >&2
exit 1
fi
if ./utils/version_check.sh ; then
RESCALE_LL=1
else
RESCALE_LL=0
fi
# Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x
WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'`
trap 'rm -rf "$WORKDIR"' EXIT
cd $WORKDIR
if [ $RESCALE_LL -eq 1 ] ; then
# work around scaling in looklocker:
printf "%s\n" "Rescaling looklocker"
bart slice 6 0 $reco tmp_Ms
bart slice 6 1 $reco tmp_M0
bart slice 6 2 $reco tmp_R1s
bart scale 2.0 tmp_M0 tmp_M0 # this scaling used to be bart of bart looklocker
bart join 6 tmp_Ms tmp_M0 tmp_R1s tmp_reco_rescaled
else
bart copy $reco tmp_reco_rescaled
fi
bart looklocker -t0.2 -D0.0153 tmp_reco_rescaled $t1map
| true |
b53aac6bab49a77d2fca8377704ffd452413cc5f | Shell | Serubin/dotfiles | /packages/desktop/latex/latex.info | UTF-8 | 153 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# LaTeX Info file
#
export package_support="arch debian ubuntu"
echo "LaTeX: A markup language"
PRE_INSTALL_OPTIONS() {
echo ""
}
| true |
7a48a909492eade7fcb47720bf7f2399f90323c3 | Shell | hardc0d3/Ethearnal | /ethnode/build_cdn.sh | UTF-8 | 349 | 2.90625 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
# todo different build targets
cwd=`pwd`
target_package="cdn_linux_amd64.tar.gz"
# clean old build
rm dist/cdn -rf
pyinstaller ./cdn.py
sleep 3
cp -r ./apidef ./dist/cdn/
cp -r ./cdnapidef ./dist/cdn/
mkdir -p ./packages
cd ./dist
cp -r ./ert/* ./cdn/
tar zcvf ${target_package} ./cdn
cp ${target_package} ../packages/
cd ${cwd}
| true |
f3074b279bf89a96b41c029d6dab95865c59fb79 | Shell | gaiaresources/catools | /bin/restore | UTF-8 | 324 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
set -u
## The backup may be older than our current database so we need to drop our current db first
dropDb
FILE=${1:-$(find backups/ -name '*.sql.gz'|sort -r|head -n1)}
echo "Restoring backup $FILE"
pv $FILE|gunzip| mysql -u${DB_USER} -p${DB_PASSWORD} -h${DB_HOST} ${DB_NAME}
caUtils clear-caches
| true |
ec512c29adde8841a379fc5729d8906b7ee5eaa9 | Shell | fefaleksey/trikRuntime | /deploy.sh | UTF-8 | 247 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | if [ "$1" ]; then
ip=$1
else
echo "Usage: deploy.sh <robot ip>"
exit 1
fi
ssh root@$ip 'chmod -x /etc/trik/trikGui.sh && killall trikGui'
rsync -avz -e ssh . root@$ip:/home/root/trik
ssh root@$ip 'chmod +x /etc/trik/trikGui.sh && kill -hup 1'
| true |
58012c1397b3adb8ec0ebc4279b4fa784f580115 | Shell | github-lvbing/shell-github | /stlinkInstall.sh | UTF-8 | 815 | 2.703125 | 3 | [] | no_license |
DIR_SRC=/home/lvbing/toolchain
cd $DIR_SRC
git clone https://github.com/texane/stlink.git
# install cMake
sudo apt-get install cmake
# USB驱动ibusb
sudo apt-get install libusb-dev
# 用户空间USB编程库开发文件
sudo apt-get install libusb-1.0-0-dev
# 压缩解压软件/
sudo apt-get install p7zip mingw-w64
cd ./stlink
make
cd $(pwd)/build/Release
sudo make install DESTDIR=_install
echo "-----------------------------------------------"
echo "$(pwd)/_install/usr/local/bin
st-flash 将二进制文件固化到 STM32 设备
st-info 查询已连接 STLink 的 STM32 设备信息
st-util 运行 GDB 服务与 STM32 设备进行交互
stlink-gui STlink 图形化工具
cmd:lsusb
cmd:st-info --prob
cmd:sudo st-flash write test.bin 0x8000000"
echo "-----------------------------------------------"
| true |
026ac582e6dc0caf43da885f5de9f4d6063e7bc1 | Shell | michael-a-green/gss-labs | /opencart/install.sh | UTF-8 | 1,041 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#Will require sudo execution since certbot creates the
#files with root uid
#Due the sequential execution needed and the lack of pre-installed
#docker-compose on LA this script is used instead of a compose yml
docker network create opencart&&
mkdir -p letsencrypt/etc letsencrypt/var/lib &&
docker run -it --name certbot-opencart\
-p 80:80 -p 443:443\
-v "$PWD/letsencrypt/etc:/etc/letsencrypt"\
-v "$PWD/letsencrypt/var/lib:/var/lib/letsencrypt"\
certbot/certbot certonly --standalone -d $HOSTNAME &&
mkdir -p nginx/cert nginx/conf.d nginx/templates &&
cp ./letsencrypt/etc/live/$HOSTNAME/fullchain.pem ./nginx/cert/&&
cp ./letsencrypt/etc/live/$HOSTNAME/privkey.pem ./nginx/cert/ &&
cp ./opencart.conf.template nginx/templates&&
docker run -d --network opencart --name opencart\
--restart always\
ameseguer/opencart&&
docker run --network opencart --name nginx\
--restart always -p 80:80 -p 443:443\
-v "$PWD/default.conf:/etc/nginx/conf.d/default.conf:ro"\
-v "$PWD/nginx/cert:/etc/nginx/cert"\
nginx
| true |
95ca75104f39c37605d8a0db250c7037a00087f9 | Shell | joliencremers/regression_wrapping | /2 Simulationstudy/Ccodes_and_scripts/run_Group_size.sh | UTF-8 | 1,656 | 2.640625 | 3 | [] | no_license |
#Change to directory with simulated data
cd ../Simulated_data/Group_size
#Run the WN sampler
for f in *WN*; do
# run regression with k=1 and prior variance=5
../../Ccodes_and_scripts/Regression_WN.exe 1 5 $f
# run analysis with autocorrelations (argument 1) and write report to dat file
../../Ccodes_and_scripts/Analysis.exe 1 output* Analysis_Group_sizeWN.dat
# zip the (large) output data
zip ../../Simulation_output/Group_size_out.zip output*
zip ../../Simulation_output/Group_size_raw.zip raw*
zip ../../Simulation_output/Group_size_analysis.zip analysis*
zip ../../Simulation_output/Group_size_autocor.zip auto*
rm -f output*
rm -f raw*
rm -f analysis*
rm -f auto*
done
mv Analys_Group_sizeWN.dat ../../Simulation_output/
#Run the WC sampler
for f in *WC*; do
# run regression with k=1 and prior variance=5
../../Ccodes_and_scripts/Regression_WC.exe 1 5 $f
# run analysis with autocorrelations (argument 1) and write report to dat file
../../Ccodes_and_scripts/Analysis.exe 1 output* Analysis_Group_sizeWC.dat
# zip the (large) output data
zip ../../Simulation_output/Group_size_out.zip output*
zip ../../Simulation_output/Group_size_raw.zip raw*
zip ../../Simulation_output/Group_size_analysis.zip analysis*
zip ../../Simulation_output/Group_size_autocor.zip auto*
rm -f output*
rm -f raw*
rm -f analysis*
rm -f auto*
done
mv Analys_Group_sizeWC.dat ../../Simulation_output/
#All output has been written to /Simulation_output/
#the Analysis_Group_sizeWC.dat and Analysis_Group_sizeWN.dat are important. Refer to the R code in \2 Simulation study\Diagnotic_tools_and_Analysis\Analysis.R for further instructions.
| true |
d70511db2330f74fa7882249c67c64ef7467f472 | Shell | lijianqiao/Script | /OpsManage_sh/InstallOpsmange.sh | UTF-8 | 17,787 | 3.1875 | 3 | [] | no_license | #!/bin/bash
#Name InstallOpsManage
#Create by li
#Use environment = centos 7.5
#
#
echo "重要说明"
echo "1.本脚本仅适用于CentOS-7-x86_64-Minimal-1810.iso安装的系统"
echo "2.官方文档:https://github.com/welliamcao/OpsManage"
echo "3.其中很多需要修改的变量用户自行修改,具备详细的注释"
echo "4.因为部分用户在国内因为wget一些软件很慢,所以本人也上传了所需的必要的软件"
echo "5.当然,使用这里的软件,那么对应脚本中的 第201行、第214行、第226行、第305行都需要注释掉,并且需要您这边把该四个文件上传到你放置文件的目录,该目录也是您执行脚本第一个让您输入的 “请输入文件存放位置” 的地方!!"
echo "6.部分用户网卡名和位置未必与本人的一样,可以在 第26行更改网卡配置文件位置(ens33) 、 第38行和第124行改网卡名(ens33)"
echo "7.暂时只是想到这么点,有什么问题,希望大家指正!"
echo " "
echo " "
echo " "
echo "========================================================================="
echo "========= 1.定义变量及相关配置及位置 ======"
echo "========================================================================="
#通用
SELINUX_PATH=/etc/selinux/config
HOST_NAME='/etc/sysconfig/network'
echo "注意:本脚本可能因为下载某些软件导致脚本运行失败,可自行下载完成,修改对应位置之后运行!当然也可以axel来实现yum断点续传"
#定义下载文件放置位置,可创建软件存放文件夹并进入
#mkdir -p /data/tools
read -p "请输入文件存放位置:" TOOLS_PATH
if [ ! -d "$TOOLS_PATH" ]; then
mkdir -p $TOOLS_PATH
fi
#TOOLS_PATH=/root
cd $TOOLS_PATH
#网络相关配置文件及位置
#获取本机IP地址
#IPADDR1=/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"
#IPADDR1=ip a show dev ens33|grep -w inet|awk '{print $2}'|awk -F '/' '{print $1}'
ETHCONF=/etc/sysconfig/network-scripts/ifcfg-ens33
HOSTS=/etc/hosts
HOSTNAME=`hostname`
DIR=$TOOLS_PATH/backup/`date +%Y%m%d`
#增加GitHub快速下载
GITHUB_IP1='199.232.5.194 github.global.ssl.fastly.net'
GITHUB_IP2='140.82.113.4 github.com'
GITHUB_IP3='185.199.108.153 assets-cdn.github.com'
NETMASK=255.255.255.0
DNS_PATH='/etc/resolv.conf'
sed -i 's/ONBOOT\=no/ONBOOT\=yes/g' ${ETHCONF}
systemctl restart network
IPADDR1=`ip a show dev ens33|grep -w inet|awk '{print $2}'|sed 's/\/.*//'`
_GATEWAT_=`cat /etc/resolv.conf | awk '{print $2}'|tail -1`
echo "========================================================================="
echo "========= 2.修改主机名与网络配置及更改源 ======"
echo "========================================================================="
echo "........自动获取的ip是$IPADDR1 ..........."
read -p "Please insert ip address:" _ipaddr_
#创建OpsManage管理员账户与密码
read -p "请输入OpsManage管理员账号:" _user_
read -p "请输入OpsManage管理员密码:" _passwd_
read -p "请输入OpsManage管理员邮箱(可随意填写):" _email_
#创建OpsManage数据库管理员密码(账户默认为root,可自行定义)
read -p "请输入运行OpsManage数据库管理员密码:" _mysql_pwd_
#
#修改主机名(简单粗暴)
#hostnamectl set-hostname Zabbix-Server
#或
function Change_hosts(){
if
[ ! -d $DIR ];then
mkdir -p $DIR
fi
cp $HOSTS $DIR
read -p "当前主机名为${HOSTNAME},是否修改(y/n):" yn
if [ "$yn" == "Y" ] || [ "$yn" == "y" ]; then
read -p "请输入主机名:" hdp
sed -i "2c HOSTNAME=${hdp}" ${HOST_NAME}
hostnamectl set-hostname ${hdp}
echo "$_ipaddr_ $hdp">>$HOSTS
echo "$GITHUB_IP1">>$HOSTS
echo "$GITHUB_IP2">>$HOSTS
echo "$GITHUB_IP3">>$HOSTS
cat $HOSTS |grep 127.0.0.1 |grep "$hdp"
else
echo "....主机名未修改 .........."
#fi
fi
}
Change_hosts
#
function Change_ip(){
#判断备份目录是否存在,中括号前后都有空格,!叹号在shell表示相反的意思#
if
[ ! -d $DIR ];then
mkdir -p $DIR
fi
echo "准备开始改变IP,在此之前备份原来配置"
cp $ETHCONF $DIR
grep "dhcp" $ETHCONF
#如下$?用来判断上一次操作的状态,为0,表示上一次操作状态正确或者成功#
if
[ $? -eq 0 ];then
sed -i 's/dhcp/static/g' $ETHCONF
#awk -F. 意思是以.号为分隔域,打印前三列#
#.2 是我的网关的最后一个数字,例如192.168.0.2#
echo -e "IPADDR=$_ipaddr_\nNETMASK=$NETMASK\nGATEWAY=$_GATEWAT_" >>$ETHCONF
echo "This IP address Change success !"
else
echo -n "这个$ETHCONF已存在 ,请确保更改吗?(y/n)":
read i
fi
if
[ "$i" == "y" -o "$i" == "yes" ];then
#awk -F. 意思是以.号为分隔域
count=(`echo $_ipaddr_|awk -F. '{print $1,$2,$3,$4}'`)
#定义数组, ${#count[@]}代表获取变量值总个数#
A=${#count[@]}
#while条件语句判断,个数是否正确,不正确循环提示输入,也可以用[0-9]来判断ip#
while
[ "$A" -ne "4" ]
do
count=(`echo $_ipaddr_|awk -F. '{print $1,$2,$3,$4}'`)
A=${#count[@]}
done
#sed -e 可以连续修改多个参数#
sed -i -e 's/^IPADDR/#IPADDR/g' -e 's/^NETMASK/#NETMASK/g' -e 's/^GATEWAY/#GATEWAY/g' $ETHCONF
#echo -e \n为连续追加内容,并自动换行#
echo -e "IPADDR=$_ipaddr_\nNETMASK=$NETMASK\nGATEWAY=$_GATEWAT_" >>$ETHCONF
echo "This IP address Change success !"
else
echo "This $ETHCONF static exist,please exit"
# exit $?
fi
}
Change_ip
systemctl restart network
NEW_IP=`ip a show dev ens33|grep -w inet|awk '{print $2}'|sed 's/\/.*//'`
cat >> $DNS_PATH << EOF
nameserver $_GATEWAT_
EOF
systemctl restart network
yum install -y vim wget psmisc
#更换pip阿里源
mkdir -p ~/.pip/
touch ~/.pip/pip.conf
cat >> ~/.pip/pip.conf << EOF
[global]
index-url = https://mirrors.aliyun.com/pypi/simple/
EOF
#增加Mariadb数据库源(本人是使用的国内源(https://www.centos.bz/2017/12/%E5%AE%89%E8%A3%85mariadb%E9%80%9F%E5%BA%A6%E6%85%A2%E7%9A%84%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95-%E4%BD%BF%E7%94%A8%E5%9B%BD%E5%86%85%E6%BA%90/),可自行修改,地址:https://downloads.mariadb.org/mariadb/repositories/#mirror=acorn)
touch /etc/yum.repos.d/MariaDB.repo
cat >> /etc/yum.repos.d/MariaDB.repo << EOF
# MariaDB 10.3 CentOS repository list - created 2019-10-01 13:19 UTC
# http://downloads.mariadb.org/mariadb/repositories/
[mariadb]
name = MariaDB
#baseurl = http://yum.mariadb.org/10.3/centos7-amd64
baseurl = https://mirrors.ustc.edu.cn/mariadb/yum/10.3/centos7-amd64
gpgkey=https://mirrors.ustc.edu.cn/mariadb/yum/RPM-GPG-KEY-MariaDB
gpgcheck=1
EOF
#更换yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#更改epel源
mv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup
mv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
wget -O /etc/yum.repos.d/epel-testing.repo http://mirrors.aliyun.com/repo/epel-testing.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
yum clean all
yum makecache
systemctl restart network
yum update -y
#
echo "========================================================================="
echo "========= 3.关闭selinux和防火墙 ======"
echo "========================================================================="
#关闭selinux
#临时关闭
setenforce 0
#永久关闭(重启后生效)
sed -i 's/SELINUX\=enforcing/SELINUX\=disabled/g' $SELINUX_PATH
#关闭防火墙
##临时关闭firewall
systemctl stop firewalld.service
#禁止firewall开机启动
systemctl disable firewalld.service
#
echo "========================================================================="
echo "========= 4.安装通用组件 ======"
echo "========================================================================="
echo "ETHCONF=$ETHCONF"
echo "HOSTS=$HOSTS"
echo "HOSTNAME=$HOSTNAME"
echo "获取本机IP地址=$NEW_IP"
#安装控件
yum install -y net-tools tree zip unzip git expect
#
echo "========================================================================="
echo "========= 5.安装相关组件 ======"
echo "========================================================================="
#
#
yum install zlib zlib-devel readline-devel sqlite-devel bzip2-devel openssl-devel gdbm-devel libdbi-devel ncurses-libs kernel-devel libxslt-devel libffi-devel python-devel zlib-devel openldap-devel sshpass gcc epel-release supervisor -y
#如果升级报错,可使用下面方法
#cd $TOOLS_PATH
#wget -c -t 0 https://files.pythonhosted.org/packages/30/db/9e38760b32e3e7f40cce46dd5fb107b8c73840df38f0046d8e6514e675a1/pip-19.2.3-py2.py3-none-any.whl
#pip install pip-19.2.3-py2.py3-none-any.whl
cd $TOOLS_PATH
yum remove -y MariaDB-common*
yum install -y autoconf
yum install -y http://www.percona.com/downloads/percona-release/redhat/0.1-6/percona-release-0.1-6.noarch.rpm
wget -c -t 0 https://www.percona.com/downloads/Percona-Server-5.6/Percona-Server-5.6.45-86.1/binary/redhat/7/x86_64/Percona-Server-5.6.45-86.1-r5bc37b1-el7-x86_64-bundle.tar
tar -xvf Percona-Server-5.6.45-86.1-r5bc37b1-el7-x86_64-bundle.tar
rpm -ivh Percona-Server-56-debuginfo-5.6.45-rel86.1.el7.x86_64.rpm
rpm -ivh Percona-Server-shared-56-5.6.45-rel86.1.el7.x86_64.rpm
rpm -ivh Percona-Server-client-56-5.6.45-rel86.1.el7.x86_64.rpm
rpm -ivh Percona-Server-server-56-5.6.45-rel86.1.el7.x86_64.rpm
yum update -y
yum install -y mysql-devel
echo "========================================================================="
echo "========= 5.安装相Python ======"
echo "========================================================================="
cd $TOOLS_PATH
PYTHON_PATH=/usr/local/python3
wget -c -t 0 https://www.python.org/ftp/python/3.6.6/Python-3.6.6.tgz
tar -xzvf Python-3.6.6.tgz
cd Python-3.6.6
./configure --prefix=$PYTHON_PATH
make all && make install && make clean && make distclean
ln -s $PYTHON_PATH/bin/pip3 /usr/bin/pip3
pip3 install --upgrade pip
echo "========================================================================="
echo "========= 6.安装相关模块 ======"
echo "========================================================================="
cd /mnt/
#git clone -b v3 https://github.com/welliamcao/OpsManage.git
wget -c -t 0 https://codeload.github.com/welliamcao/OpsManage/zip/v3
unzip v3
mv OpsManage-3 OpsManage
OPSMANAGE_PATH=/mnt/OpsManage
cd $OPSMANAGE_PATH
pip3 install -r requirements.txt
echo "========================================================================="
echo "========= 7.安装redis ======"
echo "========================================================================="
cd $TOOLS_PATH
wget -c -t 0 http://download.redis.io/releases/redis-3.2.8.tar.gz
tar -xzvf redis-3.2.8.tar.gz
cd redis-3.2.8
make && make install
sed -i 's/bind 127\.0\.0\.1/bind 127\.0\.0\.1 $NEW_IP/g' redis.conf
sed -i 's/daemonize no/daemonize yes/g' redis.conf
sed -i 's/loglevel notice/loglevel warning/g' redis.conf
sed -i 's/logfile \"\"/logfile \"\/var\/log\/redis.log\"/g' redis.conf
cd ../
REDIS_PATH=/usr/local/redis
mv redis-3.2.8 $REDIS_PATH
$REDIS_PATH/src/redis-server $REDIS_PATH/redis.conf
echo "========================================================================="
echo "========= 8.配置mysql ======"
echo "========================================================================="
sed -i '/\[mysqld\]/a\character_set_server = utf8' /etc/my.cnf
systemctl restart mysqld
systemctl enable mysqld
mysqladmin -u root password $_mysql_pwd_
echo "flush privileges;" | mysql -uroot -p$_mysql_pwd_
mysqladmin -uroot -p$_mysql_pwd_ password $_mysql_pwd_
echo "flush privileges;" | mysql -uroot -p$_mysql_pwd_
# systemctl stop mysqld
# sed -i '/character_set_server = utf8/a\skip-grant-tables' /etc/my.cnf
# echo "use mysql; \nupdate user set password=PASSWORD('$_mysql_pwd_') where user='root';\nflush privileges;" | mysql -uroot -p
# sed -i 's/skip-grant-tables /#skip-grant-tables/g' /etc/my.cnf
# systemctl restart mysqld
echo "create database opsmanage DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;" | mysql -uroot -p$_mysql_pwd_
echo "grant all privileges on opsmanage.* to root@'%' identified by '$_mysql_pwd_';" | mysql -uroot -p$_mysql_pwd_
echo "flush privileges;" | mysql -uroot -p$_mysql_pwd_
systemctl restart mysqld
#sed -i 's/host \= 192\.168\.1\.\*/host \= 127\.0\.0\.1/g' $OPSMANAGE_PATH/conf
sed -i 's/\<host = 192.168.1.*\>/host = 127.0.0.1/g' $OPSMANAGE_PATH/conf/opsmanage.ini
sed -i 's/\<server = 192.168.1.*\>/server = 127.0.0.1/g' $OPSMANAGE_PATH/conf/opsmanage.ini
sed -i 's/\<backup_host = 192.168.1.*\>/backup_host = 127.0.0.1/g' $OPSMANAGE_PATH/conf/opsmanage.ini
sed -i 's/\<password = welliam\>/password = '$_mysql_pwd_'/g' $OPSMANAGE_PATH/conf/opsmanage.ini
cd $OPSMANAGE_PATH
$PYTHON_PATH/bin/python3 manage.py makemigrations wiki
$PYTHON_PATH/bin/python3 manage.py makemigrations orders
$PYTHON_PATH/bin/python3 manage.py makemigrations filemanage
$PYTHON_PATH/bin/python3 manage.py makemigrations navbar
$PYTHON_PATH/bin/python3 manage.py makemigrations databases
$PYTHON_PATH/bin/python3 manage.py makemigrations asset
$PYTHON_PATH/bin/python3 manage.py makemigrations deploy
$PYTHON_PATH/bin/python3 manage.py makemigrations cicd
$PYTHON_PATH/bin/python3 manage.py makemigrations sched
$PYTHON_PATH/bin/python3 manage.py makemigrations apply
$PYTHON_PATH/bin/python3 manage.py migrate
#
/usr/bin/expect <<-EOF
set timeout 30
spawn $PYTHON_PATH/bin/python3 manage.py createsuperuser
expect {
"Username*" { send "$_user_\n",exp_continue }
"Email*" { send "$_email_\n",exp_continue }
"Password*" { send "$_passwd_\n",exp_continue }
"Bypass password*" { send "y\n" }
}
expect eof;
EOF
# 如果出现错误ImportError: cannot import name 'LDAPError'
# pip3 uninstall python-ldap
# pip3 install --upgrade python-ldap
echo "========================================================================="
echo "========= 9.安装Nginx ======"
echo "========================================================================="
yum install -y pcre pcre-devel gcc-c++ openssl
cd $TOOLS_PATH
wget -c -t 0 http://nginx.org/download/nginx-1.16.1.tar.gz
tar -zxvf nginx-1.16.1*
cd nginx-1.16*
./configure
make && make install
_WHEREIS_NGINX_=`whereis nginx`
NGINX_PATH=`echo $_WHEREIS_NGINX_ |cut -d' ' -f2`
$NGINX_PATH/sbin/nginx
cp $NGINX_PATH/sbin/nginx /etc/init.d/
chmod +x /etc/init.d/nginx
#设置开机启动nginx
cat >> /etc/rc.local << EOF
$NGINX_PATH/sbin/nginx
EOF
chmod 755 /etc/rc.local
$NGINX_PATH/sbin/nginx
echo "========================================================================="
echo "========= 10.启动部署平台 ======"
echo "========================================================================="
SUPER_PATH='/etc/supervisord.conf'
echo_supervisord_conf > $SUPER_PATH
export PYTHONOPTIMIZE=1
cat >> $SUPER_PATH << EOF
[program:celery-worker-default]
command=$PYTHON_PATH/bin/celery -A OpsManage worker --loglevel=info -E -Q default -n worker-default@%%h
directory=$OPSMANAGE_PATH
stdout_logfile=/var/log/celery-worker-default.log
autostart=true
autorestart=true
redirect_stderr=true
stopsignal=QUIT
numprocs=1
[program:celery-worker-ansible]
command=$PYTHON_PATH/bin/celery -A OpsManage worker --loglevel=info -E -Q ansible -n worker-ansible@%%h
directory=$OPSMANAGE_PATH
stdout_logfile=/var/log/celery-worker-ansible.log
autostart=true
autorestart=true
redirect_stderr=true
stopsignal=QUIT
numprocs=1
[program:celery-beat]
command=$PYTHON_PATH/bin/celery -A OpsManage beat --loglevel=info --scheduler django_celery_beat.schedulers:DatabaseScheduler
directory=$OPSMANAGE_PATH
stdout_logfile=/var/log/celery-beat.log
autostart=true
autorestart=true
redirect_stderr=true
stopsignal=QUIT
numprocs=1
[program:opsmanage-web]
command=$PYTHON_PATH/bin/python3 manage.py runserver 0.0.0.0:8000 --http_timeout 1200
directory=$OPSMANAGE_PATH
stdout_logfile=/var/log/opsmanage-web.log
stderr_logfile=/var/log/opsmanage-web-error.log
autostart=true
autorestart=true
redirect_stderr=true
stopsignal=QUIT
EOF
#
#启动celery
supervisord -c $SUPER_PATH
#配置nginx
mkdir -p /var/log/nginx
touch /var/log/nginx/opsmanage_access.log
sed -i 's/\<listen 80;\>/listen 80;/g' $NGINX_PATH/conf/nginx.conf
sed -i '0,/server_name localhost;/a\ access_log \/var\/log\/nginx\/opsmanage_access.log;\n error_log \/var\/log\/nginx\/opsmanage_error.log;' $NGINX_PATH/conf/nginx.conf
sed -i '0,/index index.html index.htm;/a\ proxy_next_upstream off;\n proxy_set_header X-Real-IP $remote_addr;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header Host $host;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection "upgrade";\n proxy_pass http:\/\/\'$NEW_IP':8000$request_uri;' $NGINX_PATH/conf/nginx.conf
sed -i '/deny all;/a\ location \/static {\n expires 30d;\n autoindex on;\n add_header Cache-Control private;\n alias \/mnt\/OpsManage\/static\/;\n }\n' $NGINX_PATH/conf/nginx.conf
$NGINX_PATH/sbin/nginx -s reload
#
#重启
shutdown -t 30 -r
#reboot
| true |
3d08bf068eaae4aaba09f47d457f38fd2bcb0a50 | Shell | tobiasschulz/scripts | /bin/sparse | UTF-8 | 138 | 2.875 | 3 | [] | no_license | #!/bin/bash
if [ "x$1" = "x" ] || [ "x$2" = "x" ]
then
echo Syntax: $0 FILE SIZE
exit 0
fi
dd if=/dev/zero of=$1 bs=1 count=1 seek=$2
| true |
ccd6dc0cad8cd5f18e2cbac6a717058da3f7ce5e | Shell | talasago/auroraserverless_crud_test | /sh/01_create_aurora_serverless.bash | UTF-8 | 967 | 2.671875 | 3 | [] | no_license | #!/bin/bash
#
# 前提条件
# 作成済みのvpcが存在すること
cd $(dirname $0)
source ./00_config.ini
aws rds create-db-cluster \
--backup-retention-period "1" \
--copy-tags-to-snapshot \
--db-cluster-identifier "${db_cluster_identifier}" \
--db-cluster-parameter-group-name "default.aurora-mysql5.7" \
--db-subnet-group-name "${subnet_group_name}" \
--database-name "${database_name}" \
--no-deletion-protection \
--engine "aurora-mysql" \
--engine-mode "serverless" \
--engine-version "5.7.mysql_aurora.2.07.1" \
--master-user-password "${master_user_password}" \
--master-username "${master_username}" \
--pre-signed-url "" \
--storage-encrypted \
--enable-http-endpoint \
--region ap-northeast-1 \
--profile ${profile} > ./log/create_db_cluster_log.json
| true |
f4ea5e7c3f9bbb28b075dbd06f3cf0bec52b9a02 | Shell | imenmansouri/projet.sh | /fonctions.sh-2 (1).txt | UTF-8 | 1,145 | 3.578125 | 4 | [] | no_license | #!/bin/bash
lister()
{
ls /sys/class/net
}
sauvegarder()
{
echo "entrez l interface"
read $i
if [[ $i="ens33" ]]
then ifconfig ens33 >> interface_ens33
fi
if [[ $i="lo" ]]
then ifconfig lo >> interface_lo
fi
}
desactiver()
{
systemctl stop NetworkManager
systemctl status NetworkManager
}
configman()
{
echo "entrez l interface"
read $i
echo "interface $i"
if [[ $i="ens33" ]]
then
sudo ifconfig ens33 192.0.0.10 netmask 255.255.255.0 broadcast 192.0.0.0
route add default gw 30.0.01
route -n
service networking restart
ifconfig ens33
fi
}
c=$(zenity --entry --title="Commandes" --text="Tapez votre option"
)
case $c in
-l)lister;;
-s)sauvegarder;;
-d)desactiver;;
-c)configman;;
-h)
echo "-l,lister les interfaces reseaux"
echo "-s,sauvegarder configuration d'une interface reseau"
echo "-d,desactiver NetworkManager"
echo "-c,configurer manuellement un reseau"
echo "-d+interface,configurer dynamiquement une interface"
;;
*)
echo "erreur, essayez option -h"
esac | true |
fa8148a4013843b780107a06a3f4baafc930c75b | Shell | josefaidt/dotfiles | /setup.sh | UTF-8 | 1,653 | 3.078125 | 3 | [] | no_license | #!/bin/bash
echo 'Setting up your new device...'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `setup.sh` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# install homebrew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# install the basics
brew install \
fish \
node \
yarn \
tmux \
ranger \
tree
# change default shell to /usr/local/bin/fish
# install Fisher and Bass (for nvm, if we need it later)
curl https://git.io/fisher --create-dirs -sLo ~/.config/fish/functions/fisher.fish
fisher add edc/bass
# install GUI applications
brew tap caskroom/cask
brew cask install \
google-chrome \
brave-browser \
firefox \
visual-studio-code \
sublime-text \
spotify \
spotmenu \
docker \
spectacle \
github \
vlc \
postman \
graphql-playground \
anaconda
# change default shell to fish
sudo echo /usr/local/bin/fish >> /etc/shells
chsh -s /usr/local/bin/fish
cp -r ./fish ~/.config
# copy other dotfiles
cp ./tmux/.tmux.conf ~
mkdir ~/.vim
cp -r ./vim/colors ~/.vim
cp ./vim/.vimrc ~
# reload config
# source ~/.config/fish/fish.config
# install ESLint and other npm packages (uses custom fish function)
yarn global add \
eslint babel-eslint eslint-loader \
prettier eslint-config-prettier eslint-plugin-prettier \
eslint-config-standard eslint-plugin-standard \
eslint-plugin-node \
eslint-plugin-jsx-a11y \
eslint-plugin-promise \
eslint-plugin-import \
eslint-plugin-react \
eslint-plugin-react-hooks
cp ./.eslintrc.js ~/.config
| true |
4fee5cda95ae30a9094a83398dd8eea285bfb3b2 | Shell | reflex-frp/reflex-platform | /try-reflex | UTF-8 | 2,757 | 3.921875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. "$DIR/scripts/common-setup.sh"
ROOT_PERMISSION_FIX_SUGGESTION="please use your operating system's \"repair permissions\" feature, if it has one, or manually remove write permissions for your user from the '/' directory."
if [ -e /homeless-shelter ] ; then
echo "It looks like your system has a directory or file at '/homeless-shelter'. This will cause the try-reflex build to fail, because the Nix package manager assumes that /homeless-shelter does not exist and cannot be created."
echo
echo "If you intentionally created /homeless-shelter, please submit an issue at $REPO/issues, and we will try to find a workaround for this situation. However, usually, this directory is created by accident when a Nix script is run by a user who has write permissions to the root directory."
echo
echo "If this is the case, please remove the /homeless-shelter directory, then ensure that your root directory is not writable by the current user. To do this, $ROOT_PERMISSION_FIX_SUGGESTION"
exit 1
elif mkdir /homeless-shelter 2>/dev/null ; then
rmdir /homeless-shelter
echo "It looks like your filesystem's root directory is writable by the current user. This will cause nix to fail building try-reflex, and may also indicate a security vulnerability. Note that you should not run try-reflex as root."
echo
echo "To fix this problem, $ROOT_PERMISSION_FIX_SUGGESTION"
exit 1
fi
echo "Entering the reflex sandbox..."
# This must be built first, so that the ghcjs.reflex-todomvc built below is built in parallel with everything else
prebuild_try_reflex_shell
SUGGESTION_GHC=$(cat <<EOF
To run a simple app:
$ ghc -dynamic -XOverloadedStrings -e 'import Reflex.Dom' -e 'mainWidget $ text \"Hello, world!\"'
Or to see a more complex native binary GUI example (based on the source at https://github.com/reflex-frp/reflex-todomvc/blob/master/src/Main.hs):
$ reflex-todomvc
EOF
)
SUGGESTION_GHCJS=$(cat <<EOF2
To create a simple web GUI:
$ cat >hello.hs <<EOF
{-# LANGUAGE OverloadedStrings #-}
import Reflex.Dom
main = mainWidget $ text \"Hello, world!\"
EOF
$ ghcjs hello.hs
Then navigate your browser to file://$(pwd)/hello.jsexe/index.html
Or to see a more complex GUI example (based on the source at https://github.com/reflex-frp/reflex-todomvc/blob/master/src/Main.hs), navigate your browser to file://$(nix-build $NIXOPTS --no-out-link "$DIR" -A ghcjs.reflex-todomvc)/bin/reflex-todomvc.jsexe/index.html
EOF2
)
INFO=$(cat <<EOF
You are now in a shell with access to the Reflex functional reactive programming engine.
$SUGGESTION_GHC
$SUGGESTION_GHCJS
EOF
)
terminate_logging
try_reflex_shell --command "echo \"$INFO\" ; return" "$@"
| true |
9c4b2267f300d3fc39b038a3f7dc98928bcd89b3 | Shell | IHTSDO/snow-owl | /documentation/src/main/asciidoc/scripts/migration_4.6_to_4.7/snowowl_migration_4.6_to_4.7_phase_1_table_creation.sh | UTF-8 | 4,160 | 3.484375 | 3 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"EPL-1.0"
] | permissive | #!/usr/bin/env bash
#
# Copyright (c) 2013-2016 B2i Healthcare. All rights reserved.
#
# Creates the new Code System tables for the terminology.
# Usage: ./snowowl_migration_4.6_to_4.7_phase_1_table_creation.sh <password> <terminology>
# E.g. ./snowowl_migration_4.6_to_4.7_phase_1_table_creation.sh admin snomed
PASSWORD=$1
TERMINOLOGY=$2
MYSQL=`which mysql`
USER=root
if [ "x$PASSWORD" = "x" ]; then
echo -e "Please set the mysql password before running this script. Exiting with error."
exit 1
fi
if [ "x$TERMINOLOGY" = "x" ]; then
echo -e "Please set the terminology name (e.g. snomed) before running this script. Exiting with error."
exit 1
fi
COMMAND="USE ${TERMINOLOGY}Store;"
echo -e "Starting Snow Owl migration procedure phase 1 - Table creation."
echo -e "\t1. Creating new Code System tables."
COMMAND="${COMMAND} DROP TABLE IF EXISTS \`terminologymetadata_codesystem\`;"
COMMAND="${COMMAND} CREATE TABLE \`terminologymetadata_codesystem\` (
\`cdo_id\` bigint(20) NOT NULL,
\`cdo_version\` int(11) NOT NULL,
\`cdo_branch\` int(11) NOT NULL,
\`cdo_created\` bigint(20) NOT NULL,
\`cdo_revised\` bigint(20) NOT NULL,
\`cdo_resource\` bigint(20) NOT NULL,
\`cdo_container\` bigint(20) NOT NULL,
\`cdo_feature\` int(11) NOT NULL,
\`shortName\` varchar(2048) DEFAULT NULL,
\`codeSystemOID\` varchar(255) DEFAULT NULL,
\`name\` varchar(2048) DEFAULT NULL,
\`maintainingOrganizationLink\` varchar(2048) DEFAULT NULL,
\`language\` varchar(255) DEFAULT NULL,
\`citation\` varchar(2048) DEFAULT NULL,
\`iconPath\` varchar(2048) DEFAULT NULL,
\`terminologyComponentId\` varchar(2048) DEFAULT NULL,
\`repositoryUuid\` varchar(255) DEFAULT NULL,
\`branchPath\` varchar(2048) DEFAULT NULL,
\`extensionOf\` bigint(20) DEFAULT NULL,
\`codeSystemVersions\` int(11) DEFAULT NULL,
UNIQUE KEY \`terminologymetadata_CodeSystem_idx0\` (\`cdo_id\`,\`cdo_version\`,\`cdo_branch\`),
KEY \`terminologymetadata_CodeSystem_idx1\` (\`cdo_id\`,\`cdo_revised\`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8; "
COMMAND="${COMMAND} DROP TABLE IF EXISTS \`terminologymetadata_codesystem_codesystemversions_list\`;"
COMMAND="${COMMAND} CREATE TABLE \`terminologymetadata_codesystem_codesystemversions_list\` (
\`cdo_source\` bigint(20) DEFAULT NULL,
\`cdo_branch\` int(11) DEFAULT NULL,
\`cdo_version_added\` int(11) DEFAULT NULL,
\`cdo_version_removed\` int(11) DEFAULT NULL,
\`cdo_idx\` int(11) DEFAULT NULL,
\`cdo_value\` bigint(20) DEFAULT NULL,
KEY \`terminologymetadata_CodeSystem_codeSystemVersions_list_idx0\` (\`cdo_source\`),
KEY \`terminologymetadata_CodeSystem_codeSystemVersions_list_idx1\` (\`cdo_branch\`),
KEY \`terminologymetadata_CodeSystem_codeSystemVersions_list_idx2\` (\`cdo_version_added\`),
KEY \`terminologymetadata_CodeSystem_codeSystemVersions_list_idx3\` (\`cdo_version_removed\`),
KEY \`terminologymetadata_CodeSystem_codeSystemVersions_list_idx4\` (\`cdo_idx\`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8; "
COMMAND="${COMMAND} DROP TABLE IF EXISTS \`terminologymetadata_codesystemversion\`;"
COMMAND="${COMMAND} CREATE TABLE \`terminologymetadata_codesystemversion\` (
\`cdo_id\` bigint(20) NOT NULL,
\`cdo_version\` int(11) NOT NULL,
\`cdo_branch\` int(11) NOT NULL,
\`cdo_created\` bigint(20) NOT NULL,
\`cdo_revised\` bigint(20) NOT NULL,
\`cdo_resource\` bigint(20) NOT NULL,
\`cdo_container\` bigint(20) NOT NULL,
\`cdo_feature\` int(11) NOT NULL,
\`versionId\` varchar(2048) DEFAULT NULL,
\`description\` varchar(2048) DEFAULT NULL,
\`parentBranchPath\` varchar(2048) DEFAULT NULL,
\`effectiveDate\` timestamp NULL DEFAULT NULL,
\`importDate\` timestamp NULL DEFAULT NULL,
\`lastUpdateDate\` timestamp NULL DEFAULT NULL,
UNIQUE KEY \`terminologymetadata_CodeSystemVersion_idx0\` (\`cdo_id\`,\`cdo_version\`,\`cdo_branch\`),
KEY \`terminologymetadata_CodeSystemVersion_idx1\` (\`cdo_id\`,\`cdo_revised\`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8; "
${MYSQL} -u${USER} -p${PASSWORD} -e "${COMMAND}"
echo -e "\tCreating new Code System tables process is complete."
echo -e "Snow Owl migration procedure phase 1 - Table creation is complete."
| true |
8753a798a8a42f3f2a09d3510985a22ee585923b | Shell | lucasdc6/Facultad | /4to/2do_Semestre/PDyTR/Practicas/Practica_4/Entrega/punto3/run | UTF-8 | 510 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
if [ $# -eq 0 -o "$1" = "-h" -o "$1" = "--help" ]; then
cat <<-HELP
Usage: $0
$0 read outputFile inputFile
$0 write inputFile outputFile
$0 readwrite outputFile inputFile
$0 list directory
HELP
exit 0
fi
if [ $# -eq 2 ]; then
java -cp ../lib/jade.jar:classes jade.Boot -gui -container -host localhost -agents "mol:AgentePunto3($1,$2)"
else
java -cp ../lib/jade.jar:classes jade.Boot -gui -container -host localhost -agents "mol:AgentePunto3($1,$2,$3)"
fi
| true |
880cc5aa7f92380b2cdd2d599ca8becdd829fd7c | Shell | bluhm/mirror-openbsd | /signify-releases | UTF-8 | 2,403 | 3.734375 | 4 | [
"ISC"
] | permissive | #!/bin/sh
# verify all releases, architectures and packages with signify
set -eu
if [ $# != 0 ]
then
echo usage: signify-releases >&2
exit 2
fi
versions=`jot - 99 55 -1`
debug="${debug:=debug}"
ftp="/data/mirror/openbsd/ftp"
tag="signify-releases[$$]"
logger -p daemon.info -t "$tag" "openbsd start"
for version in $versions
do
release="${version%[0-9]}.${version##[0-9]}"
dir="$ftp/$release"
[ -d "$dir" ] || continue
logger -p daemon.info -t "$tag" "openbsd $version start"
# verify release directory
key="/etc/signify/openbsd-$version-base.pub"
if ! ( cd "$dir" && signify -C -p $key -x SHA256.sig )
then
logger -p daemon.warning -s -t "$tag" "openbsd $dir fail"
fi | logger -p "daemon.$debug" -t signify
# verify each architecture release
for dir in $ftp/$release/*
do
[ -d "$dir" ] || continue
arch="${dir##*/}"
case "$arch" in
Changelogs|packages|tools) continue ;;
esac
logger -p daemon.info -t "$tag" "openbsd $arch start"
if ! ( cd "$dir" && signify -C -p $key -x SHA256.sig )
then
logger -p daemon.warning -s -t "$tag" \
"openbsd $dir fail"
else
logger -p daemon.notice -t "$tag" \
"openbsd $arch success"
fi | logger -p "daemon.$debug" -t signify
done
# verify each architecture package
key="/etc/signify/openbsd-$version-pkg.pub"
for dir in $ftp/$release/packages/*
do
[ -d "$dir" ] || continue
package="${dir##*/}"
logger -p daemon.info -t "$tag" "openbsd $package start"
if ! ( cd "$dir" && signify -C -p $key -x SHA256.sig )
then
logger -p daemon.warning -s -t "$tag" \
"openbsd $dir fail"
else
logger -p daemon.notice -t "$tag" \
"openbsd $package success"
fi | logger -p "daemon.$debug" -t signify
done
# verify each patch
dir="$ftp/patches/$release/common"
[ -d "$dir" ] || continue
key="/etc/signify/openbsd-$version-base.pub"
for file in "$dir"/*.sig
do
[ -f "$file" ] || continue
patch="${file##*/}"
logger -p daemon.info -t "$tag" "openbsd $patch start"
if ! ( cd "$dir" && signify -V -e -p $key -x $patch -m /dev/null )
then
logger -p daemon.warning -s -t "$tag" \
"openbsd $file fail"
else
logger -p daemon.notice -t "$tag" \
"openbsd $patch success"
fi | logger -p "daemon.$debug" -t signify
done
logger -p daemon.notice -t "$tag" "openbsd $version success"
done
logger -p daemon.notice -t "$tag" "openbsd finished"
exit 0
| true |
c15ec040802a238a3c69e93594787cd29f0d0eb9 | Shell | KestrelInstitute/Specware | /Scripts/unix/Verify_Lisp | UTF-8 | 439 | 3.328125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash --noprofile
PATH=/bin:/usr/bin:/etc:/sbin:/usr/sbin:/usr/local/bin:${PATH}
LISP_MODE=$1
if [ -f $SPECWARE4/Scripts/unix/Verify_${LISP_MODE} ]; then
# bash convention for . means the file is processed inline (as if copied here)
echo $SPECWARE4/Scripts/unix/Verify_${LISP_MODE} $2
. $SPECWARE4/Scripts/unix/Verify_${LISP_MODE} $2
else
echo " Verify_Lisp: Unrecognized lisp mode. LISP_MODE = [$LISP_MODE]"
exit 1
fi
| true |
e24a8f78dfb7109ff228e5e734823e4ec5336cd4 | Shell | zacck-zz/oapth | /scripts/common.sh | UTF-8 | 1,146 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -euxo pipefail
export RUST_BACKTRACE=1
export RUSTFLAGS='
-D bad_style
-D future_incompatible
-D missing_debug_implementations
-D missing_docs
-D nonstandard_style
-D rust_2018_compatibility
-D rust_2018_idioms
-D trivial_casts
-D unused_qualifications
-D warnings
'
test_package_with_feature() {
local package=$1
local features=$2
/bin/echo -e "\e[0;33m***** Testing ${package} with features '${features}' *****\e[0m\n"
cargo test --manifest-path "${package}"/Cargo.toml --features "${features}" --no-default-features
tools $package "--features ${features}"
}
tools() {
local package=$1
local features=$2
/bin/echo -e "\e[0;33m***** Running Clippy on ${package} | ${features} *****\e[0m\n"
cargo clippy $features --manifest-path "${package}"/Cargo.toml -- \
-D clippy::restriction \
-D warnings \
-A clippy::implicit_return \
-A clippy::missing_docs_in_private_items
/bin/echo -e "\e[0;33m***** Running Rustfmt on ${package} *****\e[0m\n"
cargo fmt --all --manifest-path "${package}"/Cargo.toml
}
| true |
630df4ed1007a3ee55b856835a176403f772e36b | Shell | efournier92/scanz | /_src/utils/time.bash | UTF-8 | 224 | 2.890625 | 3 | [] | no_license | #!/bin/bash
#----------------
# Name :
# Description :
# Author : E Fournier
# Dependencies :
# Arguments :
# Example Usage :
#----------------
get_time_now() {
echo $(date '+%y-%m-%d_%H%M%S')
}
| true |
b71f3bd2b4da0b48ed491e0787a5ed0e1fe082ce | Shell | ligurio/snippets | /mutation-testing/mutate.sh | UTF-8 | 2,096 | 3.421875 | 3 | [] | no_license | #!/bin/bash
set -e
# sources/libs/kos/coresrv/entity/entity_api.c make knentitytests/qtest
# sources/libs/kos/coresrv/sl/sl-static.c make knsltests/qtest
# sources/libs/kos/coresrv/sl/sl_api.c make knsltests/qtest
# sources/libs/kos/coresrv/task/task_api.c make kntasktests/qtest
# sources/libs/kos/coresrv/thread/thread_api.c make knthreadtests/qtest
# sources/libs/kos/coresrv/time/time_api.c make kntimetests/qtest
# sources/libs/kos/coresrv/vlog/vlog_api.c make knvlogtests/qtest
prepare() {
local mutant=$1
local path=$2
echo "copy $mutant to $path"
cp $mutant $path
}
runtest() {
local path=$1
local test_cmd=$2
echo "run tests"
echo "$test_cmd"
python check_status.py "$test_cmd" && echo "SURVIVED ($path)" || true
}
process_file() {
local path=$1
local test_cmd=$2
local filename=$(basename $path)
local name="${filename%.*}"
local pattern="$name.mutant.*.c"
echo "Path: $1"
echo "Command line: $2"
echo "Pattern: $pattern"
local mutated_sources=$(find $name -name "$pattern" -print)
local backup="$path._"
echo "backup source file: $path --> $backup"
cp $path $backup
for m in $mutated_sources; do
echo "current mutant: $m"
prepare $m $path
runtest $m "$test_cmd"
diff -u $path $m || true
done
echo "restore source file: $backup --> $path"
cp $backup $path
echo "============================================="
}
process_file "../sources/libs/kos/coresrv/entity/entity_api.c" "make knentitytests/qtest"
process_file "../sources/libs/kos/coresrv/sl/sl-static.c" "make knsltests/qtest"
process_file "../sources/libs/kos/coresrv/sl/sl_api.c" "make knsltests/qtest"
process_file "../sources/libs/kos/coresrv/task/task_api.c" "make kntasktests/qtest"
process_file "../sources/libs/kos/coresrv/thread/thread_api.c" "make knthreadtests/qtest"
process_file "../sources/libs/kos/coresrv/vlog/vlog_api.c" "make knvlogtests/qtest"
#process_file "../sources/libs/kos/coresrv/time/time_api.c" "make kntimetests/qtest" | true |
3a54cdaf160f9f736b697520dfd509ecd3363336 | Shell | EdmundsEcho/dotfiles | /tmux.conf | UTF-8 | 5,844 | 3.109375 | 3 | [] | no_license | #!/bin/bash
#----------------
# .tmux.conf
#----------------
### 3 ways to set config
# bind-key
# bind-key -n
# bind-key -r
#
# set and set-option are the same
## prefix key to ctrl+s
unbind C-b
set -g prefix C-s
# send the prefix to client inside window (ala nested sessions)
bind-key -r C-s send-prefix
# -r -> repeat
# disable wait after escape, so vim is usable
set -sg escape-time 0
# Name the window when being created
bind-key C new-window \; command-prompt -p "Name for this new window: " "rename-window '%%'"
# don't rename windows automatically
set-option -g allow-rename off
# Window sequence: send the current window to the end of the window list
# tmux escape + <Shift> left or right arrow key
bind-key T swap-window -t 0
bind-key S-Left swap-window -t -1
bind-key S-Right swap-window -t +1
# Set default terminal
# NOTE: do not set in zsh as it will overwrite TERM value
# set TERM value in either Terminal app or iTerm
set -g default-terminal "screen-256color"
set-option -g default-shell /usr/local/bin/zsh
# Fix for Sierra
# set -g default-shell $SHELL
# TESTING
#set -g default-command "reattach-to-user-namespace -l ${SHELL}"
# tmux fix for cursor not changing shape when in vim-insert mode
# TESTING
# set -g -a terminal-overrides ',*:Ss=\E[%p1%d q:Se=\E[2 q'
# Enable the mouse
set -g mouse on
## Split window into panes
bind-key - split-window -v -c '#{pane_current_path}'
bind-key \ split-window -h -c '#{pane_current_path}'
bind-key / split-window -h -c '#{pane_current_path}'
bind-key c new-window -c '#{pane_current_path}'
#bind-key b break-pane -d
#bind-key C-j choose-tree
#bind K kill-session
# switch panes using ctr-hjkl without prefix <ctr-c>
# Includes coordination wiht vim
is_vim='echo "#{pane_current_command}" | grep -iqE "(^|\/)g?(view|n?vim?)(diff)?$"'
bind -n C-h if-shell "$is_vim" "send-keys C-h" "select-pane -L"
bind -n C-j if-shell "$is_vim" "send-keys C-j" "select-pane -D"
bind -n C-k if-shell "$is_vim" "send-keys C-k" "select-pane -U"
bind -n C-l if-shell "$is_vim" "send-keys C-l" "select-pane -R"
#bind -n C-\ if-shell "$is_vim" "send-keys C-\\" "select-pane -l"
# switch panes using Alt-arrow without prefix
bind -n M-Left select-pane -L
bind -n M-Right select-pane -R
bind -n M-Up select-pane -U
bind -n M-Down select-pane -D
## Pane resizing
## Fine adjustment (1 or 2 cursor cells per bump)
## -n does not require prefix
bind -n M-h resize-pane -L 2
bind -n M-l resize-pane -R 2
bind -n M-j resize-pane -D 1
bind -n M-k resize-pane -U 1
# Status Bar
# ====================
set-option -g status on
# control/toggle use of status bar
bind-key q set-option status
set -g status-interval 1
set -g status-justify left
set -g status-bg default
set -g status-fg white
set -g status-left ''
set -g status-right-length 90
# Desktop vs Portable (commands sent to the shell)
if '[[ "$USER" =~ edmund ]]' \
'set -g status-right "#W | %I:%M %p - %a, %h %d "'
if '[[ "$USER" =~ edmundcape ]]' \
'set -g status-right "#W | Batt: #{battery_icon} #{battery_percentage} #{battery_remain} | %I:%M %p - %a, %h %d "'
set -g status-bg default
set -g status-fg white
#
## Set window split options
set-option -g pane-active-border-fg green
#set-option -g pane-active-border-bg default
set-option -g pane-border-fg '#696969'
set-option -g pane-border-bg black
#
## Highlighting the active window in status bar
setw -g window-status-current-bg default
setw -g window-status-current-fg default
setw -g window-status-bg default
setw -g window-status-fg white
setw -g window-status-activity-fg default
setw -g window-status-bell-attr blink
setw -g window-status-bell-bg default
setw -g window-status-bell-fg default
setw -g window-status-format '#[fg=white] ● '
setw -g window-status-current-format '#[fg=colour51] ● '
# clock
#set-window-option -g clock-mode-colour colour64 #green
# --------------
# END status bar
# Tmux ability to influence window size of apps running in pane
setw -g aggressive-resize on
## Relaod the config file manually
bind-key r source-file ~/.tmux.conf \; display-message "~/.tmux.conf reloaded"
# TESTING
set -g @shell_mode 'vi'
# Vi copypaste mode
setw -g mode-keys vi
# TESTING
setw -g xterm-keys on
# Messages
set -g message-attr bold
set -g message-fg black
set -g message-bg yellow
set -g message-command-fg blue
set -g message-command-bg black
# color of window
set -g window-active-style 'fg=#CCE0B8,bg=#000000' # lowest before black = #010101
set -g window-style 'fg=#CCE0B8,bg=#212121' # move to white to increase contrast
# match with Airline and Vim
# color pane borders
set -g pane-active-border-fg colour51
set -g pane-active-border-bg '#000000'
set -g pane-border-fg '#696969'
set -g pane-border-bg '#000000'
# Capture when a window comes in and out of focus
set -g focus-events on
# Plugin manager and plugins for Tmux
# Run <prefix> + S-I to install plugins
# =====================================
set -g @plugin 'tmux-plugins/tpm'
# Plugins
# continuous saving and restores tmux sessions
set -g @plugin 'tmux-continuum'
set -g @plugin 'tmux-plugins/tmux-resurrect'
# ... include the neovim sessions
set -g @resurrect-strategy-nvim 'session'
# copy to sytem clipboard
set -g @plugin 'tmux-plugins/tmux-yank'
# other tmux settings that are "sensible"
set -g @plugin 'tmux-plugins/tmux-sensible'
if '[[ "$USER" =~ edmundcape ]]' \
'set -g @plugin "tmux-plugins/tmux-battery"'
set -g @plugin 'tmux-plugins/tmux-battery'
# Other examples:
# set -g @plugin 'github_username/plugin_name'
# set -g @plugin 'git@github.com/user/plugin'
# set -g @plugin 'git@bitbucket.com/user/plugin'
# Initialize TMUX plugin manager
# Keep this line at the very bottom of tmux.conf
# Note: This is a bash script that zsh needs to pass on
run-shell "$HOME/.tmux/plugins/tpm/tpm"
| true |
805d81c905081c0ca17bcbb60db5ce318f6c9b20 | Shell | HortonworksUniversity/Operations-Labs | /build/security/ambari-bootstrap-master/extras/falcon/create-cluster-dirs.sh | UTF-8 | 1,056 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
## creates the dirs required for adding a Falcon cluster
clusterName="${clusterName:-clusterName}"
########################################################################
## Set magic variables for current file & dir
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__file="${__dir}/$(basename "${BASH_SOURCE[0]}")"
__base="$(basename ${__file} .sh)"
source ${__dir}/../ambari_functions.sh
ambari_configs
realm=$(${ambari_config_get} kerberos-env | awk -F'"' '$2 == "realm" {print $4}' | head -1)
if [ ! -f /etc/security/keytabs/hdfs.headless.keytab ]; then true
else
sudo sudo -u hdfs kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs-${ambari_cluster}@${realm}
fi
sudo sudo -u hdfs hadoop fs -mkdir -p /apps/falcon/${clusterName}/staging /apps/falcon/${clusterName}/working
sudo sudo -u hdfs hadoop fs -chmod 777 /apps/falcon/${clusterName}/staging
sudo sudo -u hdfs hadoop fs -chmod 755 /apps/falcon/${clusterName}/working
sudo sudo -u hdfs hadoop fs -chown -R falcon:hadoop /apps/falcon/${clusterName}
| true |
9888f96cd75b156be6c426c585bd27aba58b0aa0 | Shell | christianmesch/arch | /configs/rofi/scripts/displaymenu.sh | UTF-8 | 613 | 3.25 | 3 | [] | no_license | #!/bin/bash
rofi_command="rofi -p -no-show-icons -theme list-menu.rasi"
primary="$( xrandr | grep primary | cut -d ' ' -f 1 )"
options="$( xrandr | grep ' connected' | grep -v primary | cut -d ' ' -f 1 )"
chosen="$(echo -e "$options" | $rofi_command -dmenu)"
is_active="$( xrandr --listactivemonitors | awk '{print $4}' | grep -w $chosen)"
if [[ -n "$is_active" ]]; then
notify-send "Disconnecting display $chosen"
xrandr --output $chosen --off
elif [[ -n "$chosen" ]]; then
notify-send "Connecting display $chosen right of $primary"
xrandr --output $chosen --auto --right-of $primary
fi
| true |
e19b7e8e8b1a5302fcb8391507ac39b9d2d83e40 | Shell | neyfrota/docker-remote-desktop | /devlab/devlab | UTF-8 | 5,699 | 4 | 4 | [] | no_license | #!/bin/bash
# ==========================================
# os specific
# ==========================================
export xargs_command="xargs --no-run-if-empty "
case "$(uname -s)" in
Darwin)
export xargs_command="xargs "
export ABSPATH=`pwd -P`
;;
Linux)
export ABSPATH=`dirname $(readlink -f $0)`
;;
CYGWIN*|MINGW32*|MSYS*)
export ABSPATH=`dirname $(readlink -f $0)`
;;
*)
export ABSPATH=`dirname $(readlink -f $0)`
;;
esac
# ==========================================
# check if we are in right place
# ==========================================
cd $ABSPATH/
if [[ -f devlab && -f ../Dockerfile ]]
then
true
else
echo "Oops! I do not know where i am :/"
exit;
fi
# ==========================================
# prepare dev data folder
# ==========================================
mkdir /tmp/home 2>/dev/null
mkdir /tmp/home/user 2>/dev/null
if [ ! -d /tmp/home/user ]
then
echo "Oops! Fail create /tmp/home/user :/"
exit;
fi
# ==========================================
# some functions
# ==========================================
print_usage() {
echo "=========================================="
echo "development lab"
echo "=========================================="
echo ""
echo "usage: $0 <command>"
echo ""
echo "commands:"
echo " status -> display status"
echo " start -> start development instance"
echo " stop -> stop development instance"
echo " log -> tail instance log"
echo " rebuild -> destroy and build development instance"
echo " destroy -> destroy development instance"
echo " console -> access console at development instance"
}
set_active_instance_id() {
#
active_id=;
#
now_id=`docker ps | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | head -n 1 `
#now_id=""
#echo "now_id=$now_id"
if [ -z $now_id ]; then
#echo "no now_id"
return 1
fi
#
now_status=`docker inspect --format '{{ .State.Running }}' $now_id`
#echo "now_status=$now_status"
if [ $now_status == "true" ]; then
active_id=$now_id;
return 0
fi
#
#echo "not running now_status"
return 1
}
# =====================================================================
# actions
# =====================================================================
#
# empty
if [ -z "$1" ]; then
print_usage
echo ""
exit;
fi
#
if [ $1 == "status" ]; then
set_active_instance_id
if [ -z $active_id ]; then
echo "No running instance"
else
echo "We have a running instance id=$active_id"
fi
exit;
fi
#
if [ $1 == "log" ]; then
set_active_instance_id
if [ -z $active_id ]; then
echo "No running instance"
else
echo "log instance id=$active_id"
docker logs -f -t $active_id
fi
exit;
fi
#
if [ $1 == "rebuild" ]; then
docker ps | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker kill
docker ps -a | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker rm
docker images | tail -n +2 | grep docker-remote-desktop | awk '{print $3}' | $xargs_command docker rmi -f
cd ..
docker build -t docker-remote-desktop .
exit;
fi
#
if [ $1 == "build" ]; then
docker ps | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker kill
docker ps -a | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker rm
docker images | tail -n +2 | grep docker-remote-desktop | awk '{print $3}' | $xargs_command docker rmi -f
cd ..
docker build -t docker-remote-desktop .
exit;
fi
#
if [ $1 == "push" ]; then
docker ps | tail -n +2 | awk '{print $1,$2}' | grep "neyfrota/docker-remote-desktop" | awk '{print $1}' | $xargs_command docker kill
docker ps -a | tail -n +2 | awk '{print $1,$2}' | grep "neyfrota/docker-remote-desktop" | awk '{print $1}' | $xargs_command docker rm
docker images | tail -n +2 | grep "neyfrota/docker-remote-desktop" | awk '{print $3}' | $xargs_command docker rmi -f
cd ..
docker build -t neyfrota/docker-remote-desktop .
docker push neyfrota/docker-remote-desktop
exit;
fi
#
if [ $1 == "destroy" ]; then
echo "We need SU permission to remove /tmp/docker-remote-desktop"
#sudo rm -Rf /tmp/docker-remote-desktop
docker ps -a | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker kill
docker ps -a | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker rm
docker images | tail -n +2 | grep docker-remote-desktop | awk '{print $3}' | $xargs_command docker rmi -f
exit;
fi
#
if [ $1 == "start" ]; then
set_active_instance_id
if [ -z $active_id ]; then
echo "No running instance, Lets start"
cd ..
docker run -d -e "development=true" -p 22:22 -e "pubkey=$(cat ~/.ssh/id_rsa.pub)" -v /tmp/home:/home neyfrota/docker-remote-desktop
set_active_instance_id
if [ -z $active_id ]; then
echo "Fail start. No running instance"
else
echo "New running instance id=$active_id"
fi
else
echo "We have a running instance id=$active_id"
fi
exit;
fi
#
if [ $1 == "stop" ]; then
docker ps | tail -n +2 | awk '{print $1,$2}' | grep docker-remote-desktop | awk '{print $1}' | $xargs_command docker kill
set_active_instance_id
if [ -z $active_id ]; then
echo "No running instance"
else
echo "Fail stop. We still have a running instance id=$active_id"
fi
exit;
fi
#
if [ $1 == "console" ]; then
set_active_instance_id
if [ -z $active_id ]; then
echo "No running instance"
else
echo "Connecting to instance id=$active_id"
docker exec -i -t $active_id /bin/bash
fi
exit;
fi
#
| true |
061ccd98432f718d6a6d4992e7e7808c38f9219f | Shell | georgemarshall/python-plans | /python2/cleo/plan.sh | UTF-8 | 877 | 2.671875 | 3 | [] | no_license | pkg_name=cleo
pkg_distname=${pkg_name}
pkg_version=0.6.0
pkg_origin=python2
pkg_license=('MIT')
pkg_maintainer="George Marshall <george@georgemarshall.name>"
pkg_description="Cleo allows you to create beautiful and testable \
command-line interfaces."
pkg_upstream_url=https://github.com/sdispater/cleo
pkg_dirname=${pkg_distname}-${pkg_version}
pkg_source=https://pypi.org/packages/source/c/cleo/${pkg_dirname}.tar.gz
pkg_shasum=818646a23e6bfc459be3a56651d3831de2f568a5262af04be86902fc18c67144
pkg_deps=(
python2/python
python2/backpack
python2/pastel
python2/psutil
python2/pylev
)
pkg_build_deps=(
python2/setuptools
)
pkg_env_sep=(
['PYTHONPATH']=':'
)
do_build() {
python setup.py build
}
do_install() {
add_path_env 'PYTHONPATH' "$PYTHON_SITE_PACKAGES"
python setup.py install \
--prefix="$pkg_prefix" \
--no-compile \
--old-and-unmanageable # bypass egg install
}
| true |
47a92c73dc82c0c70b1057076f9862151cb61e2a | Shell | blog-neas/bash_scripts_to_big_data | /4.sh | UTF-8 | 98 | 3.046875 | 3 | [] | no_license |
x=1
while [ $x -le 4 ]
do
file="output$x.dat"
cp output.dat $file
x=$(( $x + 1 ))
done
| true |
8677c561a03eddcc65c5483fa8d66fedc2000131 | Shell | anran-world/react-admin-plus | /bash.sh | UTF-8 | 413 | 3.0625 | 3 | [
"MIT"
] | permissive | git add .
git commit -m "add: map page"
git pull origin master
git merge master
echo 代码合并完毕,正在推送到远程仓库
sleep 1s
function obtain_git_branch {
br=`git branch | grep "*"`
echo ${br/* /}
}
branch=`obtain_git_branch`
echo 当前分支: $branch
sleep 2s
git push --set-upstream origin $branch
echo 代码已推送到远程分支
sleep 1s
npm run fast
echo 已构建完成
| true |
3f7783748d3e5b37b50df5a9609dac4ba0cd0e4a | Shell | misc0110/LiveTikZ | /makedeb.sh | UTF-8 | 1,502 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if test `whoami` != "root"; then echo "You need to run this target using fakeroot: fakeroot -u make deb"; exit 1; fi
mkdir -pv livetikz/usr/bin
mkdir -pv livetikz/usr/share/doc/livetikz/
mkdir -pv livetikz/usr/share/man/man1/
mkdir -pv livetikz/usr/share/applications/
mkdir -pv livetikz/usr/share/icons/hicolor/256x256/apps
mkdir -pv livetikz/usr/share/livetikz
cp build/livetikz livetikz/usr/bin
strip livetikz/usr/bin/livetikz
mkdir -p livetikz/DEBIAN
sed "s/%VERSION%/$(cat VERSION)/" docs/debian-control > livetikz/DEBIAN/control
echo "initial version" > livetikz/usr/share/doc/livetikz/changelog
echo "Copyright 2020, Michael Schwarz" > livetikz/usr/share/doc/livetikz/copyright
gzip -c -9 -n livetikz/usr/share/doc/livetikz/changelog > livetikz/usr/share/doc/livetikz/changelog.gz
gzip -c -9 -n docs/livetikz.1 > livetikz/usr/share/man/man1/livetikz.1.gz
cp data/livetikz.desktop livetikz/usr/share/applications/
cp data/livetikz.png livetikz/usr/share/icons/hicolor/256x256/apps
rm livetikz/usr/share/doc/livetikz/changelog
cp data/templates/* livetikz/usr/share/livetikz/
chmod -R 0755 livetikz/usr
chmod 0644 livetikz/usr/share/doc/livetikz/*
chmod 0644 livetikz/usr/share/man/man1/livetikz.1.gz
chmod 0644 livetikz/usr/share/icons/hicolor/256x256/apps/*
chmod 0644 livetikz/usr/share/applications/*
chmod 0644 livetikz/usr/share/livetikz/*
chown -R root:root livetikz/
dpkg-deb --build livetikz
rm -rf livetikz
lintian livetikz.deb
mv livetikz.deb dist/livetikz_$(cat VERSION)_amd64.deb
| true |
298602df72c33e4a85d24a39f97ae6bb3e172d4e | Shell | ashu0008/assignment | /prepare-env.sh | UTF-8 | 736 | 3.59375 | 4 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
#
# This script will create a URL-safe base64-encoded 32-byte key and a directory structure to input json files and
# output decrypted xml files
#
# Run ./prepare-env.sh create the key and directory structure.
#
# Generated key can be mounted to app-sender and app-receiver containers to allow symmetric encryption and decryption.
# Mount input to /usr/share/app-sender/input of app-sender.
# Mount status-db to /usr/share/app-sender/status-db of app-sender.
# Mount output to /usr/share/app-receiver/output of app-receiver.
#
BASE_PATH=$(dirname \"$(readlink "$0")\")
mkdir -p $BASE_PATH/input $BASE_PATH/output $BASE_PATH/status-db
dd if=/dev/urandom bs=32 count=1 2>/dev/null | openssl base64 > $BASE_PATH/key
| true |
b90943e375828df02f4ff40848b56867da871b7e | Shell | asyavuz/TF_chipseq_pipeline | /install_dependencies.sh | UTF-8 | 19,551 | 3.6875 | 4 | [] | no_license | ################ local installation ##################
if [ "$#" -lt 1 ]
then
SOFTWARE=$HOME/software_bds
else
SOFTWARE=$1
fi
echo
echo "Installation Directory: $SOFTWARE"
echo
# DO NOT CHANGE NAMING OF SOFTWARE DIRECTORY!
BASHRC=$HOME/.bashrc
FLAGDIR=$SOFTWARE/flags
mkdir -p $FLAGDIR
SCRIPTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
SOFTWARES_APT_GET=(
build-essential
zlib1g-dev
libncurses5-dev
gfortran
openssl
libssl-dev
libfreetype6-dev
liblapack-dev
pkg-config
poppler-utils
libboost-all-dev
graphviz
libcurl4-openssl-dev
libxp6
libgsl0-dev
)
#libboost-all-dev
SOFTWARES_YUM=(
gcc
gcc-c++
kernel-devel
lapack-devel
libXpm-devel
libXp-devel
libXmu-devel
wget
bc
zlib-devel
ncurses-devel
gcc-gfortran
openssl
openssl-devel
freetype-devel
poppler-utils
boost-devel
graphviz
libcurl-devel
libpng-devel
bzip2
gsl-devel
)
#boost-devel
LINUX_ID_LIKE="non-debian,non-fedora"
if [[ $(cat /etc/*-release | grep 'Red Hat\|fedora' | wc -l) > 0 ]]; then
LINUX_ID_LIKE=fedora
fi
if [[ $(cat /etc/*-release | grep debian | wc -l) > 0 ]]; then
LINUX_ID_LIKE=debian
fi
echo
echo Found Linux distribution: ${LINUX_ID_LIKE} based.
echo
echo
echo Checking softwares on your system...
echo
EXIT=0
if [ ${LINUX_ID_LIKE} == debian ]; then
for i in "${SOFTWARES_APT_GET[@]}"; do
if [ $(dpkg -l | awk '{print $2;}' | grep $i | wc -l) != 0 ]; then
echo
echo " * $i found on your system."
else
echo
echo " * $i not found on your system."
echo " Please install $i using the following commmand or ask administrator."
echo " ============================================================="
echo " sudo apt-get install $i"
echo " ============================================================="
EXIT=1
fi
done
elif [ ${LINUX_ID_LIKE} == fedora ]; then
for i in "${SOFTWARES_YUM[@]}"; do
if [ $(rpm -q $i | grep -v "is not installed" | wc -l) != 0 ]; then
echo
echo " * $i found on your system."
else
echo
echo " * $i not found on your system."
echo " Please install $i using the following commmand or ask administrator."
echo " ============================================================="
if [ $i == "lapack-devel" ]; then
echo " # find yum repo for (server-optional) in /etc/yum.repos.d"
echo " grep -rl "server-optional" /etc/yum.repos.d"
echo
echo " # enable the repo (repo name can vary)"
echo " vim /etc/yum.repos.d/[ANY_REPO_FOUND]"
echo
echo " [rhui-REGION-rhel-server-optional]"
echo " ..."
echo " enabled=1"
echo
echo " # install lapack-devel"
echo " sudo yum install lapack-devel"
else
echo " sudo yum install $i"
fi
echo " ============================================================="
EXIT=1
fi
done
else
echo
echo "Your linux system is not based on Fedora (Red Hat, ...) or Debian (Ubuntu, ...)."
echo "Installer will fail if you didn't manually install all required softwares."
echo "List of required softwares: "
echo " gcc, gcc-c++, kernel-devel, lapack-devel, libXpm-devel, libXp-devel, libXmu-devel, wget, bc, zlib-devel, ncurses-devel, gcc-gfortran, openssl, openssl-devel, freetype-devel, poppler-utils, boost-devel, graphviz, libcurl-devel, libpng-devel, bzip2"
fi
if [ $(which git | wc -l) == 0 ]; then
echo
echo " * Git not found on your system."
echo " Please install git using the following commmand or ask administrator."
echo " ============================================================="
if [ ${LINUX_ID_LIKE} == debian ]; then
echo " sudo apt-get install git"
elif [ ${LINUX_ID_LIKE} == fedora ]; then
echo " sudo yum install git"
else
echo " Manually install git on your system"
fi
echo " ============================================================="
echo " You can also install git on your local directory."
echo " (https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)"
EXIT=1
else
echo
echo " * Git found on your system."
fi
NEED_JAVA_INSTALL=0
if [ $(which java | wc -l) == 0 ]; then
echo
echo " * Java not found on your system."
EXIT=1
NEED_JAVA_INSTALL=1
else
JAVA_VER=$(java -version 2>&1 | grep "version" | cut -d'"' -f2 | cut -d'.' -f1-2)
echo
echo " * Java found on your system (version: ${JAVA_VER})."
if [[ (( ${JAVA_VER} < 1.7 )) ]]; then
echo " Java version is too low. Version needs to be >= 1.7"
echo " If you have multiple versions of java installed on your system, choose the latest one."
echo " ============================================================="
echo " sudo update-alternatives --config java"
echo " ============================================================="
EXIT=1
NEED_JAVA_INSTALL=1
fi
fi
if [ ${NEED_JAVA_INSTALL} == 1 ]; then
echo " Please install java using the following commmand or ask administrator."
echo " ============================================================="
if [ ${LINUX_ID_LIKE} == debian ]; then
echo " sudo apt-get install openjdk-8-jre"
echo ""
echo " or"
echo ""
echo " sudo apt-get install openjdk-7-jre"
elif [ ${LINUX_ID_LIKE} == fedora ]; then
echo " sudo yum install java-1.8.0-openjdk"
fi
echo " ============================================================="
echo " You can also install java (jre version >= 1.7) on your local directory."
echo " (http://java.com/en/download/manual.jsp?locale=en)"
fi
if [ $EXIT == 1 ]; then
echo
echo "WARNING!!"
echo
echo "Some of the softwares are not installed on your system."
echo "We strongly recommend to install all softwares listed above and re-run install_dependencies.sh."
echo
echo "However, you can proceed if you have equivalent softwares locally installed on your system."
echo "Otherwise, compilation of some bio-softwares will fail."
echo
echo "If you have trouble with 'sudo apt-get (or yum) install [PACKAGE]', try with 'sudo apt-get (or yum) update' first. "
echo
read -p "Are you sure that you want to proceed? [yes/no] " yn
case $yn in
yes ) echo "YES";;
* ) exit;;
esac
fi
if [ ! -f $BASHRC ]; then
echo > $BASHRC
fi
FAILED=0
function add_to_bashrc {
echo
echo "Adding following lines to your $BASHRC ..."
for i in "${CONTENTS[@]}"; do
if [ $(grep "$i" "$BASHRC" | wc -l ) == 0 ]; then
echo $i
echo $i >> $BASHRC
fi
done
}
function chk_exit_code {
if [ $? == 0 ]; then
echo "." > $1
else
echo
echo =====================================================
echo Installation has failed due to non-zero exit code!
echo =====================================================
echo
echo "Press [Enter] key to continue..."
read -p ""
FAILED=1
fi
}
echo
echo "=============================================================================="
echo "Starting automatic installation for dependencies for ChIP-seq pipeline."
echo "Make sure you have enough disk space (at least 3GB) on your file system."
echo "All dependencies will be installed under $SOFTWARE."
echo "=============================================================================="
echo "Press [Enter] key to continue..."
read -p ""
echo
mkdir -p $SOFTWARE
if [ ! -f $FLAGDIR/BDS ]; then
cd $SOFTWARE
git clone https://github.com/pcingola/BigDataScript
cd BigDataScript
git checkout tags/v0.9999
cp distro/bds_Linux.tgz $HOME
cd $HOME
tar zxvf bds_Linux.tgz
chk_exit_code $FLAGDIR/BDS
cp $SCRIPTDIR/bds.config $HOME/.bds/
CONTENTS=("export PATH=\$PATH:\$HOME/.bds/")
add_to_bashrc
rm -f bds_Linux.tgz
fi
if [ ! -f $FLAGDIR/TABIX ]; then
# local installation for tabix (0.2.6)
cd $SOFTWARE
wget https://sourceforge.net/projects/samtools/files/tabix/tabix-0.2.6.tar.bz2/download -O tabix-0.2.6.tar.bz2 -N --no-check-certificate
tar jxvf tabix-0.2.6.tar.bz2
rm -f tabix-0.2.6.tar.bz2
cd tabix-0.2.6
make
chk_exit_code $FLAGDIR/TABIX
fi
if [ ! -f $FLAGDIR/BWA ]; then
# Local installation for bwa (0.7.3)
cd $SOFTWARE
git clone https://github.com/lh3/bwa bwa-0.7.3
cd bwa-0.7.3
git checkout tags/0.7.3
make
chk_exit_code $FLAGDIR/BWA
fi
if [ ! -f $FLAGDIR/SAMTOOLS ]; then
# Local installation for samtools (0.1.19)
cd $SOFTWARE
git clone https://github.com/samtools/samtools samtools-0.1.19
cd samtools-0.1.19
git checkout tags/0.1.19
make
chk_exit_code $FLAGDIR/SAMTOOLS
fi
if [ ! -f $FLAGDIR/BEDTOOLS ]; then
# Local installation for bedtools (2.19.1)
cd $SOFTWARE
wget http://pkgs.fedoraproject.org/repo/pkgs/BEDTools/bedtools-2.19.1.tar.gz/58de5377c3fb1bc1ab5a2620cf48f846/bedtools-2.19.1.tar.gz -N
tar zxvf bedtools-2.19.1.tar.gz
rm -f bedtools-2.19.1.tar.gz
cd bedtools2-2.19.1
make
chk_exit_code $FLAGDIR/BEDTOOLS
fi
if [ ! -f $FLAGDIR/UCSCTOOLS ]; then
# Local installation for UCSC tools
cd $SOFTWARE
mkdir -p ucsc_tools
cd ucsc_tools
wget http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64/wigToBigWig -N
wget http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64/bedGraphToBigWig -N
wget http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64/bigWigInfo -N
wget http://hgdownload.soe.ucsc.edu/admin/exe/linux.x86_64/bedClip -N
chk_exit_code $FLAGDIR/UCSCTOOLS
chmod 755 *
fi
if [ ! -f $FLAGDIR/PICARD ]; then
# Local installation for PICARD tools (1.92)
cd $SOFTWARE
wget http://mitra.stanford.edu/kundaje/software/picard-tools-1.92.tar.gz -N
tar zxvf picard-tools-1.92.tar.gz
rm -f picard-tools-1.92.tar.gz
#wget http://sourceforge.net/projects/picard/files/picard-tools/1.92/picard-tools-1.92.zip/download -O picard-tools-1.92.zip -N
#unzip picard-tools-1.92.zip
#rm -f picard-tools-1.92.zip
chk_exit_code $FLAGDIR/PICARD
cd picard-tools-1.92
chmod 755 *
fi
if [ ! -f $FLAGDIR/PHANTOMPEAKQUALTOOL ]; then
# Local installation for run_spp.R (Anshul's phantompeakqualtool)
cd $SOFTWARE
wget https://phantompeakqualtools.googlecode.com/files/ccQualityControl.v.1.1.tar.gz -N
tar zxvf ccQualityControl.v.1.1.tar.gz
chk_exit_code $FLAGDIR/PHANTOMPEAKQUALTOOL
rm -f ccQualityControl.v.1.1.tar.gz
chmod 755 phantompeakqualtools/*
fi
if [ ! -f $FLAGDIR/R2 ]; then
# Local installation instruction for R (2.15.1) and relevant packages
cd $SOFTWARE
wget http://cran.r-project.org/src/base/R-2/R-2.15.1.tar.gz -N
tar zxvf R-2.15.1.tar.gz
rm -f R-2.15.1.tar.gz
cd R-2.15.1
./configure --with-readline=no --with-x=no --enable-R-static-lib
make
chk_exit_code $FLAGDIR/R2
fi
if [ ! -f $FLAGDIR/R2PACKAGE ]; then
cd $SOFTWARE
wget http://mitra.stanford.edu/kundaje/software/spp_1.13.tar.gz -N
echo > tmp.R
echo 'withCallingHandlers(install.packages("snow", repos="http://cran.us.r-project.org"),warning = function(w) quit(save = "no", status = 1, runLast = FALSE))' >> tmp.R
echo 'withCallingHandlers(install.packages("snowfall", repos="http://cran.us.r-project.org"),warning = function(w) quit(save = "no", status = 1, runLast = FALSE))' >> tmp.R
echo 'withCallingHandlers(install.packages("bitops", repos="http://cran.us.r-project.org"),warning = function(w) quit(save = "no", status = 1, runLast = FALSE))' >> tmp.R
echo 'withCallingHandlers(install.packages("caTools", repos="http://cran.us.r-project.org"),warning = function(w) quit(save = "no", status = 1, runLast = FALSE))' >> tmp.R
echo 'source("http://bioconductor.org/biocLite.R")' >> tmp.R
echo 'biocLite("Rsamtools",suppressUpdates=TRUE)' >> tmp.R
echo 'withCallingHandlers(install.packages("./spp_1.13.tar.gz"),warning = function(w) quit(save = "no", status = 1, runLast = FALSE))' >> tmp.R
$SOFTWARE/R-2.15.1/bin/Rscript tmp.R
chk_exit_code $FLAGDIR/R2PACKAGE
rm -f tmp.R
fi
if [ ! -f $FLAGDIR/LAPACK ]; then
#LAPACK
mkdir -p $SOFTWARE/blas
cd $SOFTWARE/blas
wget http://www.netlib.org/lapack/lapack.tgz -N
tar xzf lapack.tgz
rm -f lapack.tgz
cd lapack-*/
cp INSTALL/make.inc.gfortran make.inc # On Linux with lapack-3.2.1 or newer
make lapacklib
chk_exit_code $FLAGDIR/LAPACK
make clean
fi
if [ ! -f $FLAGDIR/PYTHON2 ]; then
# Local installation instruction for Python (2.7.2) and relevant packages (for macs2)
cd $SOFTWARE
wget https://www.python.org/ftp/python/2.7.2/Python-2.7.2.tgz -N
tar zxvf Python-2.7.2.tgz
rm -f Python-2.7.2.tgz
cd Python-2.7.2
./configure --prefix=$SOFTWARE/python2.7 --enable-unicode=ucs4
make altinstall prefix=$SOFTWARE/python2.7 exec-prefix=$SOFTWARE/python2.7
chk_exit_code $FLAGDIR/PYTHON2
fi
if [ ! -f $FLAGDIR/PIP2 ]; then
ln -s $SOFTWARE/python2.7/bin/python2.7 $SOFTWARE/python2.7/bin/python2
ln -s $SOFTWARE/python2.7/bin/python2.7 $SOFTWARE/python2.7/bin/python
cd $SOFTWARE
cd python2.7/bin
wget https://bootstrap.pypa.io/get-pip.py --no-check-certificate -N
./python2 get-pip.py
$SOFTWARE/python2.7/bin/python2.7 -m pip install --upgrade setuptools
chk_exit_code $FLAGDIR/PIP2
fi
if [ ! -f $FLAGDIR/CYTHON2 ]; then
cd $SOFTWARE/Python-2.7.2
wget http://cython.org/release/Cython-0.22.tar.gz -N
tar zxvf Cython-0.22.tar.gz
cd Cython-0.22
$SOFTWARE/python2.7/bin/python2.7 setup.py install --prefix=$SOFTWARE/python2.7
chk_exit_code $FLAGDIR/CYTHON2
fi
cd $SOFTWARE
if [ ! -f $FLAGDIR/PYTHON2NUMPY ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" numpy
chk_exit_code $FLAGDIR/PYTHON2NUMPY
fi
if [ ! -f $FLAGDIR/PYTHON2MATPLOTLIB ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" matplotlib
chk_exit_code $FLAGDIR/PYTHON2MATPLOTLIB
fi
if [ ! -f $FLAGDIR/PYTHON2PYSAM ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" pysam==0.8.2
chk_exit_code $FLAGDIR/PYTHON2PYSAM
fi
if [ ! -f $FLAGDIR/PYTHON2PYBIGWIG ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" pyBigwig
chk_exit_code $FLAGDIR/PYTHON2PYBIGWIG
fi
if [ ! -f $FLAGDIR/PYTHON2SCIPY ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" scipy
chk_exit_code $FLAGDIR/PYTHON2SCIPY
fi
if [ ! -f $FLAGDIR/PYTHON2LEVEN ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" python-levenshtein
chk_exit_code $FLAGDIR/PYTHON2LEVEN
fi
if [ ! -f $FLAGDIR/PYTHON2DEEP ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --upgrade --install-option="--prefix=$SOFTWARE/python2.7" deeptools==1.5.12
chk_exit_code $FLAGDIR/PYTHON2DEEP
fi
# for atac
if [ ! -f $FLAGDIR/PYTHON2PYBEDTOOLS ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" pybedtools
chk_exit_code $FLAGDIR/PYTHON2PYBEDTOOLS
fi
# for atac
if [ ! -f $FLAGDIR/PYTHON2PANDAS ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" pandas
chk_exit_code $FLAGDIR/PYTHON2PANDAS
fi
# for atac
if [ ! -f $FLAGDIR/PYTHON2METASEQ ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" metaseq
chk_exit_code $FLAGDIR/PYTHON2METASEQ
fi
# for atac
if [ ! -f $FLAGDIR/PYTHON2JINJA2 ]; then
$SOFTWARE/python2.7/bin/python2.7 -m pip install --install-option="--prefix=$SOFTWARE/python2.7" jinja2
chk_exit_code $FLAGDIR/PYTHON2JINJA2
fi
if [ ! -f $FLAGDIR/MACS2 ]; then
# Local installation instruction for MACS2
cd $SOFTWARE
git clone https://github.com/taoliu/MACS/
cd MACS
$SOFTWARE/python2.7/bin/python2.7 setup_w_cython.py install --prefix=$SOFTWARE/python2.7
chk_exit_code $FLAGDIR/MACS2
chmod 755 $SOFTWARE/MACS/bin/*
fi
if [ ! -f $FLAGDIR/DEEPTOOLS ]; then
# deepTools (signal track gen.)
cd $SOFTWARE
rm -rf deepTools
git clone https://github.com/fidelram/deepTools
cd deepTools
git checkout tags/1.5.12
$SOFTWARE/python2.7/bin/python2.7 setup.py install --prefix=$SOFTWARE/python2.7
chk_exit_code $FLAGDIR/DEEPTOOLS
fi
if [ ! -f $FLAGDIR/PYTHON3 ]; then
# Local installation instruction for Python (3.4.3) and relevant packages (for Nathan Boley's IDR)
cd $SOFTWARE
wget https://www.python.org/ftp/python/3.4.3/Python-3.4.3.tgz -N
tar zxvf Python-3.4.3.tgz
rm -f Python-3.4.3.tgz
cd Python-3.4.3
./configure --prefix=$SOFTWARE/python3.4
make altinstall prefix=$SOFTWARE/python3.4 exec-prefix=$SOFTWARE/python3.4
chk_exit_code $FLAGDIR/PYTHON3
ln -s $SOFTWARE/python3.4/bin/python3.4 $SOFTWARE/python3.4/bin/python3
fi
if [ ! -f $FLAGDIR/CYTHON3 ]; then
cd $SOFTWARE/Python-3.4.3
wget http://cython.org/release/Cython-0.22.tar.gz -N
tar zxvf Cython-0.22.tar.gz
cd Cython-0.22
$SOFTWARE/python3.4/bin/python3.4 setup.py install --prefix=$SOFTWARE/python3.4
chk_exit_code $FLAGDIR/CYTHON3
fi
cd $SOFTWARE
if [ ! -f $FLAGDIR/PYTHON3NUMPY ]; then
$SOFTWARE/python3.4/bin/easy_install-3.4 numpy
chk_exit_code $FLAGDIR/PYTHON3NUMPY
fi
if [ ! -f $FLAGDIR/PYTHON3MATPLOTLIB ]; then
$SOFTWARE/python3.4/bin/easy_install-3.4 matplotlib
chk_exit_code $FLAGDIR/PYTHON3MATPLOTLIB
fi
if [ ! -f $FLAGDIR/PYTHON3SCIPY ]; then
$SOFTWARE/python3.4/bin/easy_install-3.4 scipy
chk_exit_code $FLAGDIR/PYTHON3SCIPY
fi
if [ ! -f $FLAGDIR/IDR2 ]; then
# Local installation instruction for IDR2( Nathan Boley's IDR )
cd $SOFTWARE
git clone --recursive https://github.com/nboley/idr.git
cd idr
$SOFTWARE/python3.4/bin/python3.4 setup.py install --prefix=$SOFTWARE/python3.4
chk_exit_code $FLAGDIR/IDR2
fi
if [ ! -f $FLAGDIR/IDR ]; then
# Local installation instruction for Anshul Kundaje's IDR
cd $SOFTWARE
wget https://sites.google.com/site/anshulkundaje/projects/idr/idrCode.tar.gz?attredirects=0 -O idrCode.tar.gz -N
tar zxvf idrCode.tar.gz
chk_exit_code $FLAGDIR/IDR
rm -f idrCode.tar.gz
fi
if [ ! -f $FLAGDIR/GEM ]; then
# Local installation instruction for gem
cd $SOFTWARE
#wget http://cgs.csail.mit.edu/gem/download/gem.v2.6.tar.gz -N
wget http://groups.csail.mit.edu/cgs/gem/download/gem.v2.6.tar.gz -N
tar zxvf gem.v2.6.tar.gz
chk_exit_code $FLAGDIR/GEM
rm -f gem.v2.6.tar.gz
cd gem
chmod 755 $SOFTWARE/gem/*.jar
fi
if [ ! -f $FLAGDIR/WIGGLER ]; then
# Local installation instruction for Wiggler (for generating signal tracks)
cd $SOFTWARE
wget https://align2rawsignal.googlecode.com/files/align2rawsignal.2.0.tgz -N
tar zxvf align2rawsignal.2.0.tgz
chk_exit_code $FLAGDIR/WIGGLER
rm -f align2rawsignal.2.0.tgz
fi
if [ ! -f $FLAGDIR/MCR ]; then
cd $SOFTWARE
wget http://www.broadinstitute.org/~anshul/softwareRepo/MCR2010b.bin -N
chmod 755 MCR2010b.bin
echo '-P installLocation="'$SOFTWARE'/MATLAB_Compiler_Runtime"' > tmp.stdin
./MCR2010b.bin -silent -options "tmp.stdin"
chk_exit_code $FLAGDIR/MCR
rm -f tmp.stdin
rm -f MCR2010b.bin
fi
# for atac
if [ ! -f $FLAGDIR/PRESEQ ]; then
cd $SOFTWARE
rm -rf preseq
git clone https://github.com/smithlabcode/preseq --recursive
cd preseq
git checkout tags/v2.0.2
make
chk_exit_code $FLAGDIR/PRESEQ
fi
# WARNING
if [ $FAILED == 1 ]; then
echo
echo =====================================================
echo "Failed to install some dependencies, check your system requirements and re-run the installer."
echo =====================================================
echo
else
echo
echo =====================================================
echo "Done Installing all dependencies for ChIP-Seq pipeline."
echo =====================================================
echo
fi
| true |
776c7be6452e1041a3ff249d9fbd4b9aa995e45c | Shell | exu/poligon | /install/recruit.INSTALL | UTF-8 | 910 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
USERLOGIN='jacek.wysocki'
WEBROOT="/home/$USERLOGIN/htdocs/web"
MD5HASH=`echo -n "$USERLOGIN"|tr '.' '!'|md5sum| awk '{ print $1}'`
apt-get -y install apache2
apt-get -y install php5 php5-gd php5-pgsql php5-dev php5-cli php-pear php5-curl
apt-get -y install libapache2-mod-php5
apt-get -y install make
apt-get -y install libpcre3-dev
apt-get -y install acl
apt-get -y install mysql-server mysql-client
apt-get -y install phpmyadmin
pecl install apc
echo 'extension=apc.so' > /etc/php5/conf.d/apc.ini
a2enmod rewrite
sed -i 's/memory_limit = .*/memory_limit = 256M/' /etc/php5/apache2/php.ini
service apache2 restart
#locales
echo 'pl_PL ISO-8859-2' > /var/lib/locales/supported.d/pl
echo 'pl_PL.UTF-8 UTF-8' >> /var/lib/locales/supported.d/pl
echo 'fr_FR.UTF-8 UTF-8' > /var/lib/locales/supported.d/fr
echo 'de_DE.UTF-8 UTF-8' > /var/lib/locales/supported.d/de
dpkg-reconfigure locales
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.