blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
4
115
path
stringlengths
2
970
src_encoding
stringclasses
28 values
length_bytes
int64
31
5.38M
score
float64
2.52
5.28
int_score
int64
3
5
detected_licenses
listlengths
0
161
license_type
stringclasses
2 values
text
stringlengths
31
5.39M
download_success
bool
1 class
2bae72aa2517a733180bca4eed080e9264402d80
Shell
wylkon/dotfiles
/lib/rubygems
UTF-8
663
3.328125
3
[]
no_license
#!/usr/bin/env zsh run_rubygems() { e_process "Init Rbenv" echo 'if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi' >> ~/.zshrc rbenv init e_process "Installing the latest version of Ruby" rbenv install 2.7.1 rbenv global 2.7.1 e_process "Installing the latest version of Ruby" gem install bundler e_process "Configuring Bundler for faster, parallel gem installation ..." number_of_cores=$(sysctl -n hw.ncpu) bundle config --global jobs $((number_of_cores - 1)) e_process "Installing utils..." gem install colorls gem install travis gem install wordmove [[ $? ]] && e_success "Done" }
true
44574db0311051b2c24cb0202675d1b102df0662
Shell
logic855/codingwithme-ansible
/start.sh
UTF-8
683
2.765625
3
[]
no_license
echo "Prepare for the ansible env: dockerfile/ansible and several ubuntu hosts" echo "run 'stop.sh' if they already started before" echo "You will enter into bash for ansible environment (inside docker container)" echo "" docker run -d --name haproxy -p :1080 -p :80 larrycai/ubuntu-sshd docker run -d --name web1 larrycai/ubuntu-sshd docker run -d --name web2 larrycai/ubuntu-sshd docker run -d --name database larrycai/ubuntu-sshd docker run -it --link haproxy:haproxy_1 \ --link web1:web1_1 \ --link web2:web2_1 \ --link database:database_1 \ --volume `pwd`:/data \ dockerfile/ansible
true
5253ccc88bf6f35c9e1d0fe55f7cd5337487a521
Shell
agent-system/aizuspider_description
/run_cnoid.sh
UTF-8
420
2.734375
3
[]
no_license
#!/bin/bash pkill -9 choreonoid ##trap "pkill choreonoid -g 0" SIGINT SIGKILL SIGTERM echo $@ >&2 AIZU_DIR=$(rospack find aizuspider_description) if [ ${CNOID_TASK} == "TASK_A" ]; then ln -sf ${AIZU_DIR}/Task3-Agent-System2019_a.cnoid ${AIZU_DIR}/Task3-Agent-System2019.cnoid else ln -sf ${AIZU_DIR}/Task3-Agent-System2019_b.cnoid ${AIZU_DIR}/Task3-Agent-System2019.cnoid fi choreonoid --start-simulation $@
true
b5834700a95ee4b454fb660211beca494f9c847d
Shell
LandRegistry/db2-express
/scripts/provision-vm
UTF-8
1,050
3.140625
3
[]
no_license
#!/bin/bash echo "Provisioning box" echo "Installing DB2" tar xvf /vagrant/v10.5_linuxx64_expc.tar.gz yum -y install libaio yum -y install pam.i686 yum upgrade libstdc++ yum -y install libstdc++.i686 yum upgrade zlib yum -y install zlib.i686 ./expc/db2setup -r /vagrant/db2expc.rsp echo "Creating the DB2 sample DB" su - db2inst1 -c "db2sampl" echo "Installing UnZip" yum -y install unzip echo "Installing Java Open JDK 1.8" yum -y install java-1.8.0-openjdk yum -y install java-1.8.0-openjdk-devel echo "Installing Git" yum -y install git echo "Installing Gradle 2.8" mkdir /opt/gradle curl -o /tmp/gradle-2.8-bin.zip http://downloads.gradle.org/distributions/gradle-2.8-bin.zip unzip -oq /tmp/gradle-2.8-bin.zip -d /opt/gradle ln -sfnv gradle-2.8 /opt/gradle/latest echo "Updating bash_profile with Gradle" printf "\n## GRADLE 2.8 ##\nexport GRADLE_HOME=/opt/gradle/latest\nexport PATH=\$PATH:\$GRADLE_HOME/bin \n## GRADLE 2.8 ##" >> /home/vagrant/.bash_profile source ~/.bash_profile echo "Copying motd" cp /vagrant/config/motd /etc/motd
true
6bd0a68f6ab9d04dae3152af010ec2f7346f721e
Shell
katylava/dotkyl
/lib/010-aliases.zsh
UTF-8
2,655
2.515625
3
[]
no_license
# safety alias r='rm -ir' alias rem='rmtrash' # listing and grepping alias lgrep='ll | grep' # alias ll='gls --color=auto -NFalh' # alias ls='gls --color=auto -NFh' alias la='lsd -Fa' alias ll='lsd -Fal' alias ls='lsd -F' alias exa='exa -F --icons' # frequently edited files alias edit.aliases='nvim ~/.dotkyl/lib/*aliases.zsh' alias edit.kubeconf='nvim ~/.kube/config' alias edit.npmrc='nvim ~/.npmrc' alias edit.path='nvim ~/.dotkyl/lib/*path.zsh' alias edit.profile='nvim ~/.zshrc' alias edit.prompt='nvim ~/.dotkyl/lib/*prompt.zsh' alias edit.vimrc='nvim ~/.dotkyl/nvim/init.vim' alias unstuck='nvim ~/Dropbox/desktop/unstuck.md' # switch npmrc alias npm.coc='cp ~/.npmrc.coc ~/.npmrc' alias npm.work='cp ~/.npmrc.work ~/.npmrc' # things i forget alias cmdfreq='fc -l 1 | awk '\''{CMD[$2]++;count++;}END { for (a in CMD)print CMD[a] " " CMD[a]/count*100 "% " a;}'\'' | grep -v "./" | column -c3 -s " " -t | sort -nr | nl | head -n25' alias diff2html='pygmentize -f html -O style=colorful,full -l diff -O encoding=utf-8' alias eject='hdiutil detach' alias encoding="vim -c 'execute \"silent !echo \" . &fileencoding | q'" alias ezrsync='rsync -avhC --progress --no-o' alias flushdns='sudo dscacheutil -flushcache && sudo killall -HUP mDNSResponder && say "flushed DNS"' alias history='fc -ilD 1' alias killcam='sudo killall AppleCameraAssistant; sudo killall VDCAssistant' alias locate='glocate' alias locip='ifconfig | grep "inet " | grep -v 127.0.0.1 | cut -d " " -f 2 | head -n 1' alias noderl='env NODE_NO_READLINE=1 rlwrap node' alias rpass='</dev/urandom tr -dc A-Za-z0-9 | head -c 10' alias updatedb="export LC_ALL='C' && sudo gupdatedb" alias yqdebug="yq '.' -o json | yq -I2 -P" # parse and reformat yaml. usage: cat file.yaml | yqpipe # things i just hate typing alias bat='bat --theme="Dracula"' alias browse='gh repo view --web' alias dc='docker compose' alias dice='rolldice -s' alias edocker='eval "$(docker-machine env default)"' alias epyenv='eval "$(pyenv init -)"' alias enodenv='eval "$(nodenv init -)"' alias freecell='python3 ~/Code/Projects/pyfreecell/freecell.py -w6' alias jira='jira-cli' alias k='kubectl' alias kc='kubectl config use-context' alias kcc='kubectl config current-context' alias kd='ksc dev-gke' alias kgp='kubectl get pods' alias kl='kubectl logs' alias klist="kubectl config view -o jsonpath='{.contexts[*].name}'" alias kp='ksc prod-gke' alias ql='qlmanage -p 2>/dev/null' alias repostats='onefetch -E --no-merges --number-of-authors 5' alias sopr='source ~/.zshrc' alias t='todolist' alias tabview='tabview --width max' alias tree='tree -I "*.pyc|node_modules|__pycache__"' alias year='gcal $(date +%Y)'
true
f592d7776e0677d7000b25c15cb416433d2d351a
Shell
zackad/dotfiles
/.profile
UTF-8
698
2.796875
3
[]
no_license
# Setting locale export LOCALE_ARCHIVE=/usr/lib/locale/locale-archive export LC_ALL="en_US.UTF-8" export LANG="en_US" # Prompt gpg passphrase on cli export GPG_TTY="$(tty)" # Add directories into PATH # Jetbrains Toolbox App PATH="${PATH}:${HOME}/Applications/JetBrains/Toolbox/scripts" PATH="${PATH}:${HOME}/.local/share/JetBrains/Toolbox/scripts" # Composer binary PATH="${PATH}:${HOME}/.config/composer/vendor/bin" export PATH="${PATH}" # Default editor export EDITOR=vim # Load nix profile if [ -e ~/.nix-profile/etc/profile.d/nix.sh ]; then . ~/.nix-profile/etc/profile.d/nix.sh; fi # Prepend nix profile into xdg-data export XDG_DATA_DIRS="$HOME/.nix-profile/share:$XDG_DATA_DIRS"
true
94cf35db63012823a8b41a3dac5f3dd4692be45a
Shell
kalikin/enumMemberRefs
/profiler/build.sh
UTF-8
742
3.421875
3
[]
no_license
#!/bin/bash [ -z "${BuildOS:-}" ] && export BuildOS=Linux [ -z "${BuildArch:-}" ] && export BuildArch=x64 [ -z "${BuildType:-}" ] && export BuildType=Debug [ -z "${CORECLR_PATH:-}" ] && export CORECLR_PATH=~/git/runtime/src/coreclr [ -z "${CORECLR_BIN:-}" ] && export CORECLR_BIN=~/git/runtime/artifacts/bin/coreclr/$BuildOS.$BuildArch.$BuildType printf ' CORECLR_PATH : %s\n' "$CORECLR_PATH" printf ' BuildOS : %s\n' "$BuildOS" printf ' BuildArch : %s\n' "$BuildArch" printf ' BuildType : %s\n' "$BuildType" printf ' Building ...' if [ ! -d "bin/" ]; then mkdir bin/ fi pushd bin export CC=/usr/bin/clang export CXX=/usr/bin/clang++ cmake ../ -DCMAKE_BUILD_TYPE=Debug make -j8 popd printf 'Done.\n'
true
ad422963ec0531225f1a323990206a6a0eff8e6b
Shell
manuel-192/m-m
/PKGBUILDs/eos-r8168-helper/eos-r8168-helper
UTF-8
4,650
4.28125
4
[]
no_license
#!/bin/bash # Use database to set the correct Ethernet driver. echo2() { echo "$@" >&2 ; } printf2() { printf "$@" >&2 ; } DIE() { echo2 "$progname: error: $1"; exit 1; } debug() { [ "$debug" = "yes" ] && echo2 "$@" ; } GetId() { echo "$lspci" | sed -n "/$devstring/,/^$/p" | grep -w "$devstring" | sed 's|.*\[\([0-9a-f:]*\)\].*|\1|' ; } GetDriver() { echo "$lspci" | sed -n "/$devstring/,/^$/p" | grep 'Kernel driver in use' | awk '{print $NF}' ; } FindCard() { echo "$lspci" | sed -n "/$devstring/,/^$/p" | grep -w "$devstring" ; } Options() { local opts opts="$(getopt -o=dsh --longoptions check,debug,display,help,save,sendlog --name "$progname" -- "$@")" || exit 1 eval set -- "$opts" while true ; do case "$1" in -h | --help) cat <<EOF Usage: $progname [options] Options: --save -s Store your card and *working* driver info temporarily for --sendlog. --sendlog Send the stored temporary info to internet using eos-sendlog. --display -d Display the known good driver name from the local system database. --help -h Show this help and exit. Detected values: Ethernet card id: $id Ethernet card driver: $driver Database files: System database file: $database_sys Temporary database file: $database_tmp EOF exit 0 ;; -s | --save) mode=save ;; -d | --display) mode=display ;; --sendlog) mode=sendlog ;; # "hidden" options: --debug) debug=yes ;; --check) mode=check ;; --) shift ; break ;; esac shift done [ -n "$1" ] && devstring="$1" } Main() { local progname="$(basename "$0")" # Options may modify these variables: local mode=display # save or display local devstring="Ethernet controller" # device search string local debug=no # Database variables local database_sys=/usr/share/endeavouros/drivers_ethernet_r8168 local database_tmp=/tmp/drivers_ethernet_r8168.tmp local lspci="$(lspci -vnn)" local id=$(GetId) local driver=$(GetDriver) Options "$@" local dbline="" local cardinfoline=$(FindCard) #if [ ! -r "$database" ] ; then # touch "$database" || DIE "cannot create database file $database" #fi case "$mode" in save) # It is possible that more than one driver is supported by the same id. if (! eos-connection-checker) ; then DIE "Connection failure! Will not store driver '$driver' to database. Please check your connections." fi if [ -r $database_sys ] ; then dbline="$(grep "^$id $driver # " "$database_sys")" fi if [ -z "$dbline" ] ; then if [ -r "$database_tmp" ] ; then dbline="$(grep "^$id $driver # " "$database_tmp")" fi if [ -z "$dbline" ] ; then echo2 "Adding driver '$driver' with id '$id' to the temporary database." printf "%s %s # %s\n" "$id" "$driver" "$cardinfoline" > "$database_tmp" else debug "user database has line: $dbline" echo2 "Driver '$driver' already added to the temporary database." fi printf2 "Please run the following terminal command now:\n %s\n" "$progname --sendlog" else echo2 "Info: driver $driver already exists in the database, nothing more to do." debug "system database has line:" debug "$dbline" fi ;; sendlog) [ -r "$database_tmp" ] || DIE "file '$database_tmp' not found, use option '--save' first." cat "$database_tmp" | eos-sendlog rm -f "$database_tmp" # this file not needed anymore ;; display) [ -r $database_sys ] || DIE "database '$database' does not exist." dbline="$(grep -w "^$id" "$database_sys")" driver="$(echo "$dbline" | awk '{print $2}')" [ -n "$driver" ] || DIE "database does not contain a driver for '$id'" debug "Database has the following card info:" debug "$dbline" echo "$driver" ;; check) local i1=$(echo "$id" | cut -d':' -f1) local i2=$(echo "$id" | cut -d':' -f2) cat /usr/share/hwdata/pci.ids | sed -n "/^$i1 /,/^[0-9a-f]/p" | sed '$d' | grep -P "^\t$i2 " ;; esac } Main "$@"
true
9987dda4b6da457885770a94c91b160060a8b198
Shell
AppCivico/radardoalagamento-api
/deploy/envs.sample.sh
UTF-8
1,367
2.65625
3
[ "MIT" ]
permissive
#!/bin/bash ## AVISO: ## Este é um arquivo de exemplo. Copie-o e defina as variaveis com os ## valores corretos ## # $ cp envs.sample.sh envs_local.sh # # Edite as variaveis com os dados corretos # $ TUPA_ENV_FILE=deploy/envs_local.sh deploy/restart_services.sh source ~/perl5/perlbrew/etc/bashrc export TUPA_API_WORKERS=1 # diretorios # diretorio de log dos daemons export TUPA_LOG_DIR='/caminho/para/pasta/dos/logs' # diretorio raiz do projeto export TUPA_APP_DIR='/home/ubuntu/radardoalagamento-api' export TUPA_SQITCH_DEPLOY_NAME=local # Altera porta se necessário export TUPA_API_PORT=2029 export TUPA_MODE='tupa' export CATALYST_DEBUG=1 export DBIC_TRACE=1 export DBIC_TRACE_PROFILE=console # Banco de dados export TUPA_DB_HOST=127.0.0.1 export TUPA_DB_PASS=xa export TUPA_DB_PORT=5432 export TUPA_DB_USER=postgres export TUPA_DB_NAME=tupa # Sendgrid é o serviço de envio de emails. É necessário possuir uma conta. export SENDGRID_USER=username123 export SENDGRID_PASSWORD=XXXxx88WWWbbb export TUPA_API_HOST=dtupa # arquivo de config do Catalyst. Faça uma cópia do tupa_web_app_local_example.pl # Ex: cp tupa_web_app_local_example.pl tupa_web_app_local.pl export TUPA_CONFIG_FILE="$TUPA_APP_DIR/tupa_web_app_local.pl" # Contacte-nos para conseguir essas credenciais export SAISP_USER=saisp-user-example export SAISP_PASS=saips-password
true
3fe6ecd381c0466886d659b366c283b63f99b90e
Shell
parkr/dotfiles
/bin/scan
UTF-8
500
3.4375
3
[]
no_license
#!/bin/bash #/ Usage: scan <output_filename> #/ Source: https://gist.github.com/jvns/b5651fd6272eddcd935e5e30874a935f DEST_FILENAME="$1" if [ -z "$DEST_FILENAME" ]; then echo "fatal: no output filename given" >&2 echo "please provide filename to write to current directory" >&2 exit 1 fi set -ex DIR=`mktemp -d` CUR="$PWD" cd "$DIR" scanimage \ --batch=page%04d.tiff \ --format tiff \ -d 'epjitsu' \ --resolution 300 \ --batch-prompt convert *.tiff "$CUR/$DEST_FILENAME" rm -r "$DIR"
true
72532fb633e623f2674d691dad6997c3b5987d87
Shell
teamsspaul/Thinkpad
/Old_Code/Pertubation stuff/bin/rm_old_failed_slurm
UTF-8
620
3
3
[]
no_license
#!/bin/bash #This program will search through the RunDirectory and remove old slurm files that do not have "/hr" printed in them #this is because the "/hr" prints when the mcnp execution is complete. RunDirectory="/yellow/users/paulmmendoza/project/Run_BM_10c" slurmSearch="slurm" #Delete slurm files (I am pretty sure this will work if typed like below, but it works the other way too) #find $RunDirectory -type f -name "${slurmSearch}*" | xargs -I % grep -L "\/hr" % | xargs -I % rm % slurmIncompleteDelete="$(find $RunDirectory -type f -name "${slurmSearch}*" | xargs -I % grep -L "\/hr" % | xargs -I % rm %)"
true
b1f383ccdc960d60f61da2ea4f1c2fc05d997534
Shell
jonathanly/dotfiles
/.aliases
UTF-8
631
2.828125
3
[]
no_license
#!/usr/bin/env bash alias ..="cd .." alias ...="cd ../.." alias ....="cd ../../.." alias .....="cd ../../../.." alias dl="cd ~/Downloads" alias dt="cd ~/Desktop" # Hide/show all desktop icons (useful when presenting) alias hidedesktop="defaults write com.apple.finder CreateDesktop -bool false && killall Finder" alias showdesktop="defaults write com.apple.finder CreateDesktop -bool true && killall Finder" # Lock the screen (when going AFK) alias afk="/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend" # Reload the shell (i.e. invoke as a login shell) alias reload="exec ${SHELL} -l"
true
eaa8ea7948c5799d129001f6edd7dd72abfcce4d
Shell
rmathew/Misc
/bin/getdiff
UTF-8
1,115
4.21875
4
[]
no_license
#!/bin/sh # getdiff: # # A simple shell script to find the difference between the given # files, say, "foo/bar" and "foo/bar.orig". The output is a # "diff -upN -U 2" context difference preceded by "Index: " and "====" # lines somewhat similar to that output by CVS. # # This output can be treated as a patch against the original # files (without the ".orig" suffix) to convert them to the modified # files like this: # # patch -p0 < output-from-this-script # # Author: Ranjit Mathew (rmathew@gmail.com) # Version: 1.1 # if test -z "$1"; then echo echo USAGE: getdiff \[file1 \[file2 \[file3 ...\]\]\] echo exit 1 fi for i in "$@" do if [ ! -f "$i" ] then echo ERROR: File \"$i\" does not exist exit 2; fi if [ ! -f "$i.orig" ] then echo ERROR: File \"$1.orig\" does not exist exit 3; fi done for i in "$@" do if ! diff -q "$i.orig" "$i" >/dev/null then echo Index: "$i" echo =================================================================== NAME4SED=`echo $i | sed 's/\\//\\\\\\//g'` diff -upN -U2 "$i.orig" "$i" | sed s/$NAME4SED\\.orig/$NAME4SED/g fi done
true
e5f1c897b69cd8c79a66e8d253e94a970bbd2492
Shell
spencerrecneps/bna-tools
/add_scores.sh
UTF-8
5,131
3.796875
4
[]
no_license
#!/bin/bash ############################################################# ## Add missing BNA category scores to shapefile and ## save to new location ## Inputs: ## input file -> $1 ## output file -> $2 ############################################################# cd "$(dirname "$0")" # check that input file exists if [ ! -e ${1} ]; then echo "${1} is not a file" exit 1 fi echo "From ${1} to ${2}" # input indir=`dirname ${1}` if [[ ${1} == *.shp ]]; then infile=`basename ${1} .shp` inext="shp" elif [[ ${1} == *.zip ]]; then infile=`basename ${1} .zip` inext="zip" else echo "Cannot process ${1}" exit 1 fi # output outdir=`dirname ${2}` if [[ ${2} == *.shp ]]; then outfile=`basename ${2} .shp` outext="shp" elif [[ ${2} == *.zip ]]; then outfile=`basename ${2} .zip` outext="zip" else echo "Cannot output to ${2}" exit 1 fi # prepare output shp if [[ inext == "shp" ]]; then if [[ outext == "shp" ]]; then for f in ${indir}/${infile}.*; do cp -- "$f" "${outdir}/${outfile}.${f##*.}"; done file="${outdir}/${outfile}.shp" else tempdir=`mktemp -d` for f in ${indir}/${infile}.*; do cp -- "$f" "${tempdir}/${outfile}.${f##*.}"; done file="${tempdir}/${outfile}.shp" fi else if [[ outext == "shp" ]]; then unzip ${1} -d ${outdir} file="${outdir}/${outfile}.shp" else tempdir=`mktemp -d` unzip ${1} -d ${tempdir} file="${tempdir}/${outfile}.shp" fi fi # add fields to shapefile ogrinfo ${file} \ -sql "ALTER TABLE ${outfile} ADD COLUMN \"CORESVCS\" float" ogrinfo ${file} \ -sql "ALTER TABLE ${outfile} ADD COLUMN \"OPPRTNTY\" float" ogrinfo ${file} \ -sql "ALTER TABLE ${outfile} ADD COLUMN \"RECREATION\" float" ################################ # calculate scores ################################ # CORESVCS ogrinfo ${file} \ -dialect SQLite \ -sql " UPDATE ${outfile} SET \"CORESVCS\" = ( 20 * COALESCE(\"DOCTORS_SC\",0) + 10 * COALESCE(\"DENTISTS_S\",0) + 20 * COALESCE(\"HOSPITA_02\",0) + 10 * COALESCE(\"PHARMAC_02\",0) + 25 * COALESCE(\"SUPERMA_02\",0) + 15 * COALESCE(\"SOCIAL__02\",0) ) / ( 20 * (CASE WHEN COALESCE(\"DOCTORS_HI\",0) = 0 THEN 0 ELSE 1 END) + 10 * (CASE WHEN COALESCE(\"DENTISTS_H\",0) = 0 THEN 0 ELSE 1 END) + 20 * (CASE WHEN COALESCE(\"HOSPITA_01\",0) = 0 THEN 0 ELSE 1 END) + 10 * (CASE WHEN COALESCE(\"PHARMAC_01\",0) = 0 THEN 0 ELSE 1 END) + 25 * (CASE WHEN COALESCE(\"SUPERMA_01\",0) = 0 THEN 0 ELSE 1 END) + 15 * (CASE WHEN COALESCE(\"SOCIAL__01\",0) = 0 THEN 0 ELSE 1 END) ) WHERE \"OVERALL_SC\" IS NOT NULL AND COALESCE(\"DOCTORS_HI\",0) + COALESCE(\"DENTISTS_H\",0) + COALESCE(\"HOSPITA_01\",0) + COALESCE(\"PHARMAC_01\",0) + COALESCE(\"SUPERMA_01\",0) + COALESCE(\"SOCIAL__01\",0) > 0" # OPPRTNTY ogrinfo ${file} \ -dialect SQLite \ -sql " UPDATE ${outfile} SET \"OPPRTNTY\" = ( 35 * COALESCE(\"EMP_SCORE\",0) + 35 * COALESCE(\"SCHOOLS_SC\",0) + 10 * COALESCE(\"COLLEGES_S\",0) + 20 * COALESCE(\"UNIVERS_02\",0) ) / ( 35 * (CASE WHEN COALESCE(\"EMP_HIGH_S\",0) = 0 THEN 0 ELSE 1 END) + 35 * (CASE WHEN COALESCE(\"SCHOOLS_HI\",0) = 0 THEN 0 ELSE 1 END) + 10 * (CASE WHEN COALESCE(\"COLLEGES_H\",0) = 0 THEN 0 ELSE 1 END) + 20 * (CASE WHEN COALESCE(\"UNIVERS_02\",0) = 0 THEN 0 ELSE 1 END) ) WHERE \"OVERALL_SC\" IS NOT NULL AND COALESCE(\"EMP_HIGH_S\",0) + COALESCE(\"SCHOOLS_HI\",0) + COALESCE(\"COLLEGES_H\",0) + COALESCE(\"UNIVERS_02\",0) > 0" # RECREATION ogrinfo ${file} \ -dialect SQLite \ -sql " UPDATE ${outfile} SET \"RECREATION\" = ( 40 * COALESCE(\"PARKS_SCOR\",0) + 35 * COALESCE(\"TRAILS_SCO\",0) + 25 * COALESCE(\"COMMUNI_02\",0) ) / ( 40 * (CASE WHEN COALESCE(\"PARKS_HIGH\",0) = 0 THEN 0 ELSE 1 END) + 35 * (CASE WHEN COALESCE(\"TRAILS_HIG\",0) = 0 THEN 0 ELSE 1 END) + 25 * (CASE WHEN COALESCE(\"COMMUNI_01\",0) = 0 THEN 0 ELSE 1 END) ) WHERE \"OVERALL_SC\" IS NOT NULL AND COALESCE(\"PARKS_HIGH\",0) + COALESCE(\"TRAILS_HIG\",0) + COALESCE(\"COMMUNI_01\",0) > 0" # rezip if necessary if [[ ${outext} == "zip" ]]; then zip -j ${2} ${tempdir}/${outfile}.* rm -rf ${tempdir} fi
true
6295b535d11551da562c7275277a32a5ade47ee8
Shell
xklnono/anakin_test
/build_cpu/build.sh
UTF-8
1,909
3.234375
3
[]
no_license
#!/bin/bash ANAKIN2_UT_PATH="/home/qa_work/CI/workspace/sys_anakin_merge_build/output" TENSORRT_UT_PATH="/home/qa_work/CI/workspace/sys_tensorRT_merge_build/output" TENSORRT_K1200_UT_PATH="/home/qa_work/CI/workspace/sys_tensorRT_merge_build/output_k1200" WORK_PATH=`pwd` declare -a model_name declare -a model_result_dir model_name=(chinese_ner language_model neural_machine_translation sequence_labeling text_classification) model_result_dir=(images_output output models fluid_models time multi_thread_time paddle_output input_file) ##(1)mkdir for new model for modlename in ${model_name[*]} do if [ ! -d "/home/qa_work/CI/workspace/sys_anakin_compare_output/${modlename}/" ]; then cd /home/qa_work/CI/workspace/sys_anakin_compare_output mkdir ${modlename} for dir in ${model_result_dir[*]} do mkdir ${modlename}/${dir} done else echo "has no new models" fi done exit ##(2)clear the output dir for modlename in ${model_name[*]} do if [ -d "/home/qa_work/CI/workspace/sys_anakin_compare_output/${modlename}/images_output" ] ;then cd /home/qa_work/CI/workspace/sys_anakin_compare_output/${modlename}/images_output rm -rf * fi if [ -d "/home/qa_work/CI/workspace/sys_anakin_compare_output/${modlename}/output" ] ;then cd /home/qa_work/CI/workspace/sys_anakin_compare_output/${modlename}/paddle_output rm -rf * fi done ##(2)clear the UT dir if [ -d $ANAKIN2_UT_PATH ]; then cd $ANAKIN2_UT_PATH echo `pwd` rm -rf * fi #if [ -d $TENSORRT_UT_PATH ];then # cd $TENSORRT_UT_PATH # echo `pwd` # rm -rf * #fi # #if [ -d $TENSORRT_K1200_UT_PATH ];then # cd $TENSORRT_K1200_UT_PATH # echo `pwd` # rm -rf * #fi ##(3)compile the anakin UT cd $WORK_PATH ./build_anakin.sh exit ##(4)compile the paddle UT cd $WORK_PATH ./build_paddle.sh cd $WORK_PATH ./build_lego.sh
true
8f71807b945434587dbf60f7ea58119eff673cf1
Shell
yicheinchang/ModuleManager
/System/lib/logevent/email
UTF-8
5,363
3.484375
3
[ "MIT" ]
permissive
#!/bin/sh if [ $# -lt 1 ]; then echo "Need At Least one argument : Event ID" exit 1 fi event_id=${1} param_1=${2} param_2=${3} param_3=${4} param_4=${5} param_5=${6} param_6=${7} param_7=${8} #email from occur_time=`date "+%Y-%m-%d %H:%M:%S"` msmtp="/usr/bin/msmtp" #msmtp="/tmp/msmtp" mddisk="/dev/md1" mdname="md1" swapname="md0" swapdisk="/dev/md0" sqlite="/usr/bin/sqlite" confdb="/etc/cfg/conf.db" ip_addr=`/sbin/ifconfig eth0|grep "addr:"|awk '{print substr($2,RSTART+6)}'` model=`cat /proc/mdstat|grep $mdname|cut -d ' ' -f4` host_name=`hostname` body='From: <%s> To: <%s> Subject: %s event (%s level) occurred Hello %s This notification message is generated automatically from %s (%s). The system experienced the following event(s). %s Please be aware of the event(s) above. Also if necessary, please react to the event(s). ============================================== The automated message is generated at %s by %s (%s) (%s). ' ################################################# ## Define procedure ################################################# ## called to retrieve sqlite notif_addr get_domainname() { sqlcmd="select v from conf where k like 'nic1_domainname%'" nicdomain=`${sqlite} ${confdb} "${sqlcmd}"` if [ "$event_id" != "EmailTest" ]; then sqlcmd="select v from conf where k='notif_from'" mailfrom=`$sqlite $confdb "${sqlcmd}"` if [ "$mailfrom" == "" ]; then echo "admin@$host_name.$nicdomain" else echo $mailfrom fi else if [ "$param_7" != "" ]; then echo $param_7 else echo "admin@$host_name.$nicdomain" fi fi } ## called to retrieve sqlite notif_addr get_maill_addr() { if [ "$event_id" != "EmailTest" ];then sqlcmd="select v from conf where k like 'notif_addr%'" ${sqlite} ${confdb} "${sqlcmd}" else echo ${param_6} fi } ## called to retrieve sqlite notif_account get_maill_auth_id() { if [ "$event_id" != "EmailTest" ];then sqlcmd="select v from conf where k='notif_account'" ${sqlite} ${confdb} "${sqlcmd}" else echo ${param_4} fi } ## called to retrieve sqlite notif_password get_maill_auth_passwd() { if [ "$event_id" != "EmailTest" ];then sqlcmd="select v from conf where k='notif_password'" ${sqlite} ${confdb} "${sqlcmd}" else echo ${param_5} fi } ## called to retrieve sqlite notif_auth get_maill_auth_method() { if [ "$event_id" != "EmailTest" ];then sqlcmd="select v from conf where k='notif_auth'" ${sqlite} ${confdb} "${sqlcmd}" else echo ${param_3} fi } ## called to retrieve sqlite notif_smtp smtp mail server address get_maill_server() { if [ "$event_id" != "EmailTest" ];then sqlcmd="select v from conf where k='notif_smtp'" ${sqlite} ${confdb} "${sqlcmd}" else echo ${param_1} fi } ## called to retrieve sqlite notif_smtp smtp mail server port get_maill_port() { if [ "$event_id" != "EmailTest" ];then sqlcmd="select v from conf where k='notif_smtport'" ${sqlite} ${confdb} "${sqlcmd}" else echo ${param_2} fi } ## called to retrieve user's setting about notification get_notif_setting() { if [ "$event_id" != "EmailTest" ];then ## mail, beep if [ ! "$1" = "" ];then field=notif_"${1}" sqlcmd="select v from conf where k='${field}'" ${sqlite} ${confdb} "${sqlcmd}" fi else echo 1 fi } ## format mail body to send out mail_body() { mail_from=$1 mail_to=$2 recips=$(get_maill_addr) recips=`echo $recips | tr "\n" " "` if [ "${model}" = "linear" ]; then model=JBOD fi case $event_id in EmailTest) #test level="information" description=" This is a test email sent by \"${host_name}\". If you received this email, that means the configuration was set up correctly. " printf "${body}" "${mail_from}" "${mail_to}" "Email Test" "${level}" "${recips}" "${host_name}" "${ip_addr}" "${description}" "${occur_time}" "${host_name}" "${model}" "${ip_addr}" exit 0 ;; 997) ########################################## # Event_ID=$event_id # Event_Message_ID=$param_1 # Level=$param_2 # MSG=$param_3 ########################################## event_id_tmp=$event_id event_id=${param_1} level=${param_2} description="${param_3}" ;; *) exit 1 ;; esac printf "${body}" "${mail_from}" "${mail_to}" "Em${event_id}E" "${level}" "${recips}" "${host_name}" "${ip_addr}" "${description}" "${occur_time}" "${host_name}" "${model}" "${ip_addr}" #printf "${body}" "Em${event_id}E" "${level}" "${recips}" "${host_name}" "${ip_addr}" "${description}" "${occur_time}" "${host_name}" "${model}" "${ip_addr}" event_id=${event_id_tmp} } setting=`get_notif_setting mail` if [ ${setting} -eq 1 ]; then user=$(get_maill_auth_id) p=$(get_maill_auth_passwd) host=$(get_maill_server) port=$(get_maill_port) auth="" if [ ! "${user}" = "" ] && [ ! "${p}" = "" ]; then auth="$(get_maill_auth_method)" else auth="off" fi email_from=`get_domainname` #from="$host_name@$ip_addr" from=$email_from recips=$(get_maill_addr) for recip in ${recips} do if [ ! "${recip}" = "" ]; then mail_body "${from}" "${recip}"| ${msmtp} --from="${from}" --host=${host} --port=${port} --auth="${auth}" --user="${user}" --password "${p}" ${recip} fi done fi
true
15526d9f22df0a3f581c164629957fffd150fee3
Shell
spokhyan/DO378-apps
/DO378/labs/secure-review/start-services.sh
UTF-8
620
2.828125
3
[ "Apache-2.0" ]
permissive
#!/bin/bash pushd ~/DO378/labs/secure-review/quarkus-conference/ echo "Starting the 'microservice-jwt' project " cd microservice-jwt mvn clean quarkus:dev -Ddebug=5005 & JWT_PID=$! sleep 5 cd .. echo "Starting the 'microservice-speaker' project " cd microservice-speaker mvn clean quarkus:dev -Ddebug=5006 & SPEAKER_PID=$! sleep 5 cd .. echo "Starting the 'microservice-session' project " cd microservice-session mvn clean quarkus:dev -Ddebug=5007 & SESSION_PID=$! sleep 5 cd .. echo read -p "Press enter to Terminate" echo kill $JWT_PID $SPEAKER_PID $SESSION_PID sleep 2 echo "All services terminated" echo popd
true
324ed352a3aa9dc7a0a371fb75364e58f24af22a
Shell
CM0R31R4/motor_desarrollo
/InOutExe/respaldo_aut_tablas.sh
UTF-8
12,103
3.25
3
[]
no_license
#!/bin/bash cd /home/motor/InOutExe/ #Base de Datos. BASE="dmotor" ############################################################################## #/usr/bin/psql $BASE -c "drop table tablas_respaldo; #create table tablas_respaldo( # tab_origen varchar(50), -- Nombre de la tabla a respaldar. # tab_destino varchar(50), -- Nombre de tabla de destino del respaldo. # estado_resp integer -- 0 = Inactivo | 1 = Activo #); #create index tablas_respaldo_01 on tablas_respaldo (tab_origen);" #/usr/bin/psql $BASE -c "insert into tablas_respaldo values('tx_bono3', 'historico_tx_bono3', 0);" #/usr/bin/psql $BASE -c "insert into tablas_respaldo values('tx_cias', 'historico_tx_cias', 0);" #/usr/bin/psql $BASE -c "insert into tablas_respaldo values('respaldo_envbonis', 'historico_envbonis', 0);" #/usr/bin/psql $BASE -c "insert into tablas_respaldo values('respaldo_cia', 'historico_cia', 0);" ############################################################################## #Directorio de Respaldo. DIRECTORIO="/home/motor/InOutExe/Respaldo" #Fecha para formar el nombre del archivo. DATE_FILE=`/usr/bin/psql -At -F';' -c "SELECT to_char(CURRENT_TIMESTAMP -INTERVAL '0 days','YYYY-MM-DD');" $BASE`; #Fechas de Inicio y Termino para respaldar. DATE=`/usr/bin/psql -At -F';' -c "SELECT to_char(CURRENT_TIMESTAMP -INTERVAL '0 days','YYYY-MM-DD');" $BASE`; DATE_INI=`/usr/bin/psql -At -F';' -c "SELECT to_char(CURRENT_TIMESTAMP -INTERVAL '7 days','YYYY-MM-DD');" $BASE`; #Mail destinatario del reporte. MAIL="claudio.moreira@acepta.com" #MAIL="jaime.cossio@acepta.com" #Consulta por registros a respaldar desde tabla_respaldos. REG=`/usr/bin/psql -At -F';' -c "SELECT tab_origen, tab_destino, estado_resp::varchar FROM tablas_respaldo WHERE estado_resp = 1;" $BASE`; DATA=($REG) I=0; STR=${DATA[$I]} TAB_ORIGEN=$(echo $STR | cut -f 1 -d';') TAB_DESTINO=$(echo $STR | cut -f 2 -d';') RESPALDA=$(echo $STR | cut -f 3 -d';') #Busca registros con estado en 1 para respaldar. Si el estado es 0, entonces no hace nada. while [ ${#RESPALDA} -gt 0 ] do #Arma nombre para archivo de respaldo y log. FILE=$DIRECTORIO"/"$TAB_DESTINO"_"$DATE_FILE".dump" LOG=$DIRECTORIO"/log_"$TAB_DESTINO"_"$DATE_FILE".txt" #Construye condicion where segun la tabla de origen. #Esto se debe revisar cada vez que se agrega una tabla para respaldar. case $TAB_ORIGEN in "tx_bono3") WHERE="WHERE fecha_in_tx >= '$DATE_INI' AND fecha_in_tx <= '$DATE' AND rut_benef <> '0000000001-9'" ;; "tx_cias") WHERE="WHERE tiempo_ini_cia >= '$DATE_INI' AND tiempo_ini_cia <= '$DATE'" ;; "respaldo_cia") WHERE="WHERE fecha >= '$DATE_INI' AND fecha <= '$DATE'" ;; "respaldo_envbonis") WHERE="WHERE fecha >= '$DATE_INI' AND fecha <= '$DATE'" ;; #"tx_bono3_test") # WHERE="WHERE fecha_in_tx >= '$DATE_INI' AND fecha_in_tx <= '$DATE' AND rut_benef <> '0000000001-9'" #;; #"tx_cias_test") # WHERE="WHERE tiempo_ini_cia >= '$DATE_INI' AND tiempo_ini_cia <= '$DATE'" #;; #"respaldo_cia_test") # WHERE="WHERE fecha >= '$DATE_INI' AND fecha <= '$DATE'" #;; #"respaldo_envbonis_test") # WHERE="WHERE fecha >= '$DATE_INI' AND fecha <= '$DATE'" #;; *) echo "======================================================================" echo "Error en la estructura de la consulta. Revisar." echo "======================================================================" >>$LOG echo "Error en la estructura de la consulta. Revisar." >> $LOG /usr/bin/mutt -s "Error en la estructura de la consulta. Revisar." $MAIL < $LOG exit 1 ;; esac echo "======================================================================" echo "======================================================================" >>$LOG #Verifica si hay registros para respaldar. echo "Verifica si hay registros para respaldar en $TAB_ORIGEN." echo "Verifica si hay registros para respaldar en $TAB_ORIGEN." >> $LOG CUENTA_ORI=`/usr/bin/psql -At -F';' -c "SELECT COUNT(*) FROM $TAB_ORIGEN $WHERE;" $BASE`; &>> $LOG if [[ $CUENTA_ORI -eq 0 ]]; then echo "No hay registros para generar respaldo (Revisar tabla $TAB_ORIGEN)." echo "No hay registros para generar respaldo (Revisar tabla $TAB_ORIGEN)." >> $LOG /usr/bin/mutt -s "No hay registros para generar respaldo (Revisar tabla $TAB_ORIGEN)." $MAIL < $LOG else #Verifica si ya existe archivo de respaldo en directorio de respaldo. #if [ -e "$FILE" ]; then # echo "Error, el archivo $FILE ya existe en carpeta de respaldo (Revisar carpeta)." # echo "Error, el archivo $FILE ya existe en carpeta de respaldo (Revisar carpeta)." >> $LOG # /usr/bin/mutt -s "Error, archivo ya existente en carpeta de respaldos." $MAIL < $LOG # exit 1; #fi #Hora de Inicio. HORA_INI=`/usr/bin/psql -At -F';' -c "SELECT to_char(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS')" $BASE`; echo ">> Hora de Inicio de respaldo de tabla $TAB_ORIGEN : $HORA_INI" &>>$LOG #Rango de fechas del respaldo. echo "Respaldo desde el $DATE_INI hasta el $DATE" &>>$LOG #Truncate a tabla de destino. echo "Truncate a tabla $TAB_DESTINO." echo "Truncate a tabla $TAB_DESTINO." >> $LOG /usr/bin/psql $BASE -c "TRUNCATE table $TAB_DESTINO" &>> $LOG #Vacuum full a tabla de destino. echo "Vacuum full a tabla $TAB_DESTINO." echo "Vacuum full a tabla $TAB_DESTINO." >> $LOG /usr/bin/psql $BASE -c "VACUUM FULL $TAB_DESTINO" &>> $LOG #Reindex en tabla de destino. echo "Reindex en tabla $TAB_DESTINO." echo "Reindex en tabla $TAB_DESTINO." >> $LOG /usr/bin/psql $BASE -c "REINDEX table $TAB_DESTINO" &>> $LOG #Copiar desde tabla de origen a tabla de destino. echo "Copiar desde tabla $TAB_ORIGEN a tabla $TAB_DESTINO." echo "Copiar desde tabla $TAB_ORIGEN a tabla $TAB_DESTINO." >> $LOG /usr/bin/psql $BASE -c "INSERT INTO $TAB_DESTINO SELECT * FROM $TAB_ORIGEN $WHERE;" &>> $LOG #Cuenta registros en tabla de destino. CUENTA_DEST=`/usr/bin/psql -At -F';' -c "SELECT COUNT(*) FROM $TAB_DESTINO $WHERE;" $BASE`; &>> $LOG echo "Cuenta registros en tabla de destino ($TAB_DESTINO) = $CUENTA_DEST." echo "Cuenta registros en tabla de destino ($TAB_DESTINO) = $CUENTA_DEST." >> $LOG #Compara tabla de origen v/s tabla de destino. echo "Compara tabla $TAB_ORIGEN ($CUENTA_ORI) v/s tabla $TAB_DESTINO ($CUENTA_DEST)." echo "Compara tabla $TAB_ORIGEN ($CUENTA_ORI) v/s tabla $TAB_DESTINO ($CUENTA_DEST)." >> $LOG if [[ $CUENTA_ORI -ne $CUENTA_DEST ]]; then echo "Error, hay diferencia en cant. de registros entre $TAB_ORIGEN ($CUENTA_ORI) y $TAB_DESTINO ($CUENTA_DEST)." echo "Error, hay diferencia en cant. de registros entre $TAB_ORIGEN ($CUENTA_ORI) y $TAB_DESTINO ($CUENTA_DEST)." >> $LOG echo "Se eliminan registros copiados a tabla $TAB_DESTINO." echo "Se eliminan registros copiados a tabla $TAB_DESTINO." >> $LOG /usr/bin/psql $BASE -c "DELETE FROM $TAB_DESTINO $WHERE;" /usr/bin/mutt -s "Error, diferencia en cant. de registros entre $TAB_ORIGEN y $TAB_DESTINO." $MAIL < $LOG else #Copiar a archivo file.dump desde tabla de origen. /usr/bin/psql $BASE -c "\COPY (SELECT * FROM $TAB_ORIGEN $WHERE) TO '$FILE'" &>> $LOG echo "Generando archivo $FILE ..." echo "Generando archivo $FILE ..." >> $LOG #Verifica consistencia lineas del archivo contra registros de tabla de origen. REGS_ORI=`wc -l $FILE| awk {'print $1'}` echo "Cuenta lineas del archivo ($REGS_ORI) contra registros en tabla de origen ($CUENTA_ORI)." echo "Cuenta lineas del archivo ($REGS_ORI) contra registros en tabla de origen ($CUENTA_ORI)." >> $LOG if [[ $REGS_ORI -ne $CUENTA_ORI ]]; then echo "Error, hay diferencia en cant. de registros entre $TAB_ORIGEN ($CUENTA_ORI) y registros en archivo ($REGS_ORI)." echo "Error, hay diferencia en cant. de registros entre $TAB_ORIGEN ($CUENTA_ORI) y registros en archivo ($REGS_ORI)." >> $LOG echo "Se eliminan registros copiados a tabla $TAB_DESTINO y archivo generado." echo "Se eliminan registros copiados a tabla $TAB_DESTINO y archivo generado." >> $LOG /usr/bin/psql $BASE -c "DELETE FROM $TAB_DESTINO $WHERE;" rm -f $FILE /usr/bin/mutt -s "Error, diferencia en cant. de registros entre $TAB_ORIGEN y archivo." $MAIL < $LOG else #Carga el respaldo en Amazon-RDS echo "Carga el respaldo en Amazon-RDS." echo "Carga el respaldo en Amazon-RDS." >> $LOG /usr/bin/psql -h motoramazon.cjmbprft2nd8.us-east-1.rds.amazonaws.com -p 5432 -U motor -d motorimed -c "\COPY $TAB_ORIGEN FROM '$FILE'" &>> $LOG #Cuenta registros cargados en tabla de destino en Amazon. echo "Cuenta registros cargados en tabla de destino ($TAB_ORIGEN) en Amazon." echo "Cuenta registros cargados en tabla de destino ($TAB_ORIGEN) en Amazon." >> $LOG CUENTA_DEST_AMAZON=`/usr/bin/psql -h motoramazon.cjmbprft2nd8.us-east-1.rds.amazonaws.com -p 5432 -U motor -d motorimed -At -c "SELECT COUNT(*) FROM $TAB_ORIGEN $WHERE"`; &>> $LOG #Compara cantidad de registros entre tabla de origen y tabla de destino en Amazon. echo "Compara cantidad de registros entre tabla de origen y tabla de destino en Amazon." echo "Compara cantidad de registros entre tabla de origen y tabla de destino en Amazon." >> $LOG if [[ $CUENTA_DEST_AMAZON -ne $CUENTA_ORI ]]; then echo "Error, diferencia en cant. de registros entre $TAB_ORIGEN ($CUENTA_ORI) y Amazon ($CUENTA_DEST_AMAZON)." echo "Error, diferencia en cant. de registros entre $TAB_ORIGEN ($CUENTA_ORI) y Amazon ($CUENTA_DEST_AMAZON)." >> $LOG echo "Se eliminan registros de $TAB_DESTINO, se borra archivo y se eliminan registros en Amazon ($TAB_ORIGEN)." echo "Se eliminan registros de $TAB_DESTINO, se borra archivo y se eliminan registros en Amazon ($TAB_ORIGEN)." >> $LOG /usr/bin/psql $BASE -c "DELETE FROM $TAB_DESTINO $WHERE;" rm -f $FILE /usr/bin/psql -h motoramazon.cjmbprft2nd8.us-east-1.rds.amazonaws.com -p 5432 -U motor -d motorimed -At -c "DELETE FROM $TAB_ORIGEN $WHERE" /usr/bin/mutt -s "Error, diferencia en cant. de registros entre $TAB_ORIGEN y Amazon." $MAIL < $LOG else #Borra los registros respaldados de la tabla de origen. echo "Borra los registros respaldados de la tabla $TAB_ORIGEN." echo "Borra los registros respaldados de la tabla $TAB_ORIGEN." >> $LOG /usr/bin/psql $BASE -c "DELETE FROM $TAB_ORIGEN $WHERE" &>> $LOG #Vacuum full a tabla de origen. echo "Vacuum full a tabla $TAB_ORIGEN." echo "Vacuum full a tabla $TAB_ORIGEN." >> $LOG /usr/bin/psql $BASE -c "VACUUM FULL VERBOSE $TAB_ORIGEN" &>> $LOG #Reindex en tabla de origen. echo "Reindex en tabla $TAB_ORIGEN." echo "Reindex en tabla $TAB_ORIGEN." >> $LOG /usr/bin/psql $BASE -c "REINDEX table $TAB_ORIGEN" &>> $LOG echo "Proceso Respaldo OK. Se ha generado un archivo de nombre: $FILE." echo "Cantidad de registros en tabla de origen ($TAB_ORIGEN): $CUENTA_ORI." >> $LOG echo "Cantidad de registros en tabla de destino ($TAB_DESTINO): $CUENTA_DEST." >> $LOG echo "Cantidad de registros en tabla de Amazon ($TAB_ORIGEN): $CUENTA_DEST_AMAZON." >> $LOG echo "Cantidad de registros en archivo: $REGS_ORI." >> $LOG echo "Proceso Respaldo OK. Se ha generado un archivo de nombre: $FILE." >> $LOG #Hora de Termino. HORA_FIN=`/usr/bin/psql -At -F';' -c "SELECT to_char(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS')" $BASE`; echo ">> Hora de termino del respaldo : $HORA_FIN" >>$LOG #Si el proceso de respaldo fue exitoso, entonces envia un correo con los datos. /usr/bin/mutt -s "Proceso Respaldo OK. Se ha generado un archivo de nombre: $FILE" $MAIL < $LOG fi fi fi fi I=`expr $I + 1` STR=${DATA[$I]} TAB_ORIGEN=$(echo $STR | cut -f 1 -d';') TAB_DESTINO=$(echo $STR | cut -f 2 -d';') RESPALDA=$(echo $STR | cut -f 3 -d';') done exit 1;
true
6d82144bfbd0aa4566a072858eba6e0e53f7317e
Shell
sergej-samsonow/amt
/development/tests/01-setup-amt/01-setup-from-readme
UTF-8
595
3.6875
4
[ "Apache-2.0" ]
permissive
#!/bin/bash # check environment is empty if [ ! -z "$(ls -A)" ] then echo "can't continue test environment is not empty" exit 1 fi # execute setup command `mkdir amt && cd amt && wget -q https://raw.githubusercontent.com/sergej-samsonow/amt/master/amt && chmod u+x amt` # check amt command is present and executable if [ ! -d "amt" ] then echo "amt directory not created" exit 1 fi if [ ! -f "amt/amt" ] then echo "amt command file not downloaded" exit 1 fi if [ ! -x "amt/amt" ] then echo "amt command file not executable" exit 1 fi # anything ok exit 0
true
23df1a9040bfc73f922f2466e3690937b11825f1
Shell
keshri007/allprogram
/day5/if_else_max_min.sh
UTF-8
887
3.03125
3
[]
no_license
##!/bin/bash -x a=$((RANDOM%900+100)) b=$((RANDOM%900+100)) c=$((RANDOM%900+100)) d=$((RANDOM%900+100)) e=$((RANDOM%900+100)) echo $a" "$b" "$c" "$d" "$e echo "max" if [ $a -gt $b -a $a -gt $c -a $a -gt $d -a $a -gt $e ] then echo $a else if [ $b -gt $a -a $b -gt $c -a $b -gt $d -a $b -gt $e ] then echo $b else if [ $c -gt $b -a $c -gt $a -a $c -gt $d -a $c -gt $e ] then echo $c else if [ $d -gt $b -a $d -gt $c -a $d -gt $a -a $d -gt $e ] then echo $d else echo $e fi fi fi fi echo "min" if [ $a -lt $b -a $a -lt $c -a $a -lt $d -a $a -lt $e ] then echo $a else if [ $b -lt $a -a $b -lt $c -a $b -lt $d -a $b -lt $e ] then echo $b else if [ $c -lt $b -a $c -lt $a -a $c -lt $d -a $c -lt $e ] then echo $c else if [ $d -lt $b -a $d -lt $c -a $d -lt $a -a $d -lt $e ] then echo $d else echo $e fi fi fi fi
true
59023490c6ed871d3915894858457e24cf592f6f
Shell
adc613/Dotfiles
/installs/install-node.sh
UTF-8
379
3.3125
3
[]
no_license
#/bin/bash # macOS-specific installations. if [ $(uname -s) = 'Darwin' ]; then brew install node brew install yarn elif [ $(uname -s) = "Linux" ]; then if [ $(which apt-get) != "" ]; then sudo apt-get install -y build-essential curl -sL https://deb.nodesource.com/setup_9.x | sudo -E bash - sudo apt-get install -y nodejs fi else echo "Get a real OS" fi
true
2376e884b8ed8836abf77d653235e0ee70f69044
Shell
adolfosbh/gra2mol
/examples/Grammar2Model.examples.script2Model/files/src/scripts/A.3
UTF-8
128
2.625
3
[]
no_license
#!/bin/sh if [ "$1" = "configure" ] && which install-docs >/dev/null 2>&1 ; then install-docs -i /usr/share/doc-base/DOC-ID fi
true
61238d93cfecafce7aa26947000ae8c2d24a1b2c
Shell
svn2github/openwatcom-__test__
/branch/v2/btclean.sh
UTF-8
573
3.25
3
[]
no_license
#!/bin/sh # # Script to clean a bootstrap build of Open Watcom tools. # Run builder (if it exists) to clean out most of the files. if [ ! -f $OWBINDIR/builder ]; then echo Cannot find builder - did you run boot.sh? else cd $OWROOT/src builder bootclean cd $OWROOT fi # Nuke the builder and wmake bootstrap directories cd $OWROOT/src/builder rm -rf $OWOBJDIR cd $OWROOT/src/make rm -rf $OWOBJDIR # Remove any log files cd $OWROOT/src rm -f *.lo? # Finally delete the builder and wmake executables cd $OWROOT rm -f $OWBINDIR/wmake rm -f $OWBINDIR/builder
true
adbaf85667fe88bd1a47869634ccc6ec641789df
Shell
dingwell/everyday_benchmarks
/test_compression.sh
UTF-8
6,819
3.953125
4
[]
no_license
#!/bin/bash # # Copyright 2016 Adam Dingwell <adam@YOGHURT> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # This script will test several compression tools on grib files # It will output processing times and compression ratios set -e FILES=$@ # Settings: # (TODO: allow setting this from the CLI) DO_GZIP=true DO_BZIP=true DO_XZ=true DO_LZOP=true total_size_kB(){ # Takes a list of files as argument du -B kB -c $@|tail -n1|sed 's/kB.*//' # WARNING: it takes time before the new size shows up # don't use this function on newly created or modified files! } get_suffix(){ # Returns the default suffix for a given compression tool # Takes one argument (the compression tool) CMD="$1" if ! command -v $CMD >/dev/null 2>&1; then echo "Command: '$CMD' not found, aborting" exit 1 fi # Create an empty file: TMPFILE=${0##*/}-${PID}.tmp touch $TMPFILE $CMD $TMPFILE NEWFILE=$(ls $TMPFILE.*) SUFFIX=$(echo "$NEWFILE"|sed 's/'"$TMPFILE"'\.//') echo $SUFFIX rm $NEWFILE } get_compr_ratio_gz(){ # Takes a list of gzip-compressed files as argument: gzip -l $@ |tail -n1|egrep -o '[0-9]+.[0-9]%' } get_compr_ratio_lzo(){ # Takes a list of .lz files as argument TMP=$(lzop -l $@ |tail -n1|egrep -o '[0-9]+.[0-9]%') TMP=$(echo $TMP|sed 's/%//') # Remove '%' local RATIO=$(echo "scale=1; 100-$TMP"|bc) echo "$RATIO"'%' } get_compr_ratio_bz2(){ # Takes a list of bzip2-compressed files as argument local COMPR_SIZE=$(cat $@|wc -c) # Size in bytes local ORIG_SIZE=$(bzip2 -dc $@|wc -c) local RATIO=$(echo "scale=1;100-100*$COMPR_SIZE/$ORIG_SIZE"|bc) echo "$RATIO"'%' } get_compr_ratio_xz(){ # Takes a list of xz-compressed files as argument local COMPR_SIZE=$(cat $@|wc -c) # Size in bytes local ORIG_SIZE=$(xz -dc $@|wc -c) local RATIO=$(echo "scale=1;100-100*$COMPR_SIZE/$ORIG_SIZE"|bc) echo "$RATIO%" } test_compression(){ # Takes compression tool as argument # $1 - compression command # $2 - compression level (1-9) # Needs the following variables: The pipe and plus characters can be used to visually separate columns although this is not needed. Multiple separator lines after another are treated as one separator line. # FILES - list of files to compress local CMD="$1" local LVL="$2" #get_suffix for $CMD SUFFIX=$(get_suffix "$CMD") # Get compression suffix # Ensure that no already compressed files exist: if ls *.$SUFFIX &> /dev/null; then echo "Found some .$SUFFIX files in working directory!" 1>&2 echo "Please remove before running test!" 1>&2 exit 1 fi # Begin compression test: T0=$SECONDS if [[ $CMD == "lzop" ]]; then $CMD -U -$LVL $FILES # Run command (U=delete original files) else # Most tools will remove the uncompressed files by default $CMD -$LVL $FILES # Run compress command fi T1=$SECONDS DT_COM=$(( T1-T0 )) #echo "Compression time = ${DT}s" COMP_RATIO=$(get_compr_ratio_$SUFFIX *.$SUFFIX) #echo "Compression ratio = $COMP_RATIO" # Begin decompression test: T0=$SECONDS if [[ $CMD == "lzop" ]]; then $CMD -d -U *.$SUFFIX # Run command (U=delete original files) else # Most tools will remove the uncompressed files by default $CMD -d *.$SUFFIX # Run decompress command fi T1=$SECONDS DT_DEC=$(( T1-T0 )) #echo "Decompression time = ${DT}s" echo "$COMP_RATIO ${DT_COM}s ${DT_DEC}s" } print_header(){ local HEADER='| | ' if $DO_GZIP; then HEADER="$HEADER"'| gzip ' fi if $DO_LZOP; then HEADER="$HEADER"'| lzop ' fi if $DO_BZIP; then HEADER="$HEADER"'| bzip2 ' fi if $DO_XZ; then HEADER="$HEADER"'| xz ' fi local NWORDS=$(echo $HEADER|wc -w) local NCOLS=$(echo "$NWORDS/2-1"|bc) echo "|------------------------------------------------------------|" echo "$HEADER" echo "|------------------------------------------------------------|" } print_footer(){ echo 'FOOTER?' } KB_BEFORE=$(total_size_kB $FILES) # With unit echo "Total uncompressed size = $KB_BEFORE kB" echo "'RATIO' below is the amount of space _saved_" print_header # Perform tests: for lvl in $(seq 9); do RATIOS="" C_TIMES="" U_TIMES="" if $DO_GZIP; then A="$(test_compression gzip $lvl)" RATIOS="$RATIOS | $(echo $A|awk '{print $1}')" C_TIMES="$C_TIMES | $(echo $A|awk '{printf "%6d", $2}')" U_TIMES="$U_TIMES | $(echo $A|awk '{printf "%6d", $3}')" fi if $DO_LZOP; then A="$(test_compression lzop $lvl)" RATIOS="$RATIOS | $(echo $A|awk '{print $1}')" C_TIMES="$C_TIMES | $(echo $A|awk '{printf "%6d", $2}')" U_TIMES="$U_TIMES | $(echo $A|awk '{printf "%6d", $3}')" fi if $DO_BZIP; then A="$(test_compression bzip2 $lvl)" RATIOS="$RATIOS | $(echo $A|awk '{print $1}')" C_TIMES="$C_TIMES | $(echo $A|awk '{printf "%6d", $2}')" U_TIMES="$U_TIMES | $(echo $A|awk '{printf "%6d", $3}')" fi if $DO_XZ; then A="$(test_compression xz $lvl)" RATIOS="$RATIOS | $(echo $A|awk '{print $1}')" C_TIMES="$C_TIMES | $(echo $A|awk '{printf "%6d", $2}')" U_TIMES="$U_TIMES | $(echo $A|awk '{printf "%6d", $3}')" fi echo "| | COMP. RATIO $RATIOS" echo "|LEV$lvl | COMP. TIME [s]$C_TIMES" echo "| | DEC. TIME [s]$U_TIMES" echo "|------------------------------------------------------------|" done print_footer
true
8598723ce545b3348c30229dbbdf202bdad81c08
Shell
Semen2/installarchefi
/VM-archlinux-2.sh
UTF-8
976
3.03125
3
[]
no_license
#!/bin/bash read -p "Введите имя компьютера: " hostname read -p "Введите имя пользователя: " username echo 'Прописываем имя компьютера' echo $hostname > /etc/hostname ln -sf /usr/share/zoneinfo/Asia/Yekaterinburg /etc/localtime echo "en_US.UTF-8 UTF-8" > /etc/locale.gen echo "ru_RU.UTF-8 UTF-8" >> /etc/locale.gen locale-gen echo 'LANG="ru_RU.UTF-8"' > /etc/locale.conf echo 'KEYMAP=ru' >> /etc/vconsole.conf echo 'FONT=cyr-sun16' >> /etc/vconsole.conf pacman -Syy efibootmgr -d /dev/sda -p 1 -c -L "Arch Linux" -l /vmlinuz-linux -u "root=/dev/sda2 rw initrd=\initramfs-linux.img" echo 'efibootmgr -o XXXX XXXX - номер записи' useradd -m -G wheel -s /bin/bash $username echo 'Создаем root пароль' passwd echo 'Устанавливаем пароль пользователя' passwd $username echo 'Устанавливаем SUDO' echo '%wheel ALL=(ALL) ALL' >> /etc/sudoers
true
33e758084585b8daff66d14142a7a5f344ffe65c
Shell
DaMandal0rian/terra-chain-node
/prepare_node.sh
UTF-8
491
2.546875
3
[]
no_license
#!/usr/bin/env bash source .env rm -rf terra-chain-spec # Checkout chain spec files if not exist git clone https://github.com/ContractLand/terra-chain-spec.git cd terra-chain-spec git checkout $NETWORK cd .. # Populate config with env variables sed -i "s/NODE_NAME/$NODE_NAME/g" ./monitor/app.json sed -i "s/DASHBOARD_IP/$DASHBOARD_IP/g" ./monitor/app.json sed -i "s/DASHBOARD_PORT/$DASHBOARD_PORT/g" ./monitor/app.json sed -i "s/DASHBOARD_SECRET/$DASHBOARD_SECRET/g" ./monitor/app.json
true
ed941a41ea0ec5f376717aa37a82dd8def69c715
Shell
RIFTIO/RIFT.ware
/modules/tools/scripts/container_tools/usr/rift/container_tools/base.setup.sh
UTF-8
592
2.96875
3
[ "Apache-2.0" ]
permissive
# Install enum34 if we are at python 3.4 or earlier test -h /usr/rift && rm -f /usr/rift chmod 777 /usr/rift /usr/rift/usr/share if python3 -c 'import sys;assert sys.version_info < (3,5)' >/dev/null 2>&1; then echo "found python3 older than 3.5 ... installing enum34==1.0.4" cmd $PIP3_INSTALLER install enum34==1.0.4 else echo "found python3 at least version 3.5 .. skipping enum34" fi if [[ $RIFT_PLATFORM == 'ub16' ]]; then cat <<EOF >>/etc/apt/apt.conf.d/20auto-upgrades APT::Periodic::Unattended-Upgrade "0"; APT::Periodic::Download-Upgradeable-Packages "0"; EOF fi
true
bfeaad6ce1defe538da806384647c8dc83f92e52
Shell
ArnaudJean/module06
/RNA-Seq/sortAll.sh
UTF-8
574
3.171875
3
[]
no_license
#!/usr/bin/env bash #sortAll.sh filepath= "/Paired/" leftSuffix = ".R1.fastq" rightSuffix = "R2.fastq" samFilePath = "sam/" samFileSuffix =".sam" bamfilePath = "bam/" bamSuffix = ".bam" pairedOutPath ="Paired" unpairedOutPath = "Unpaired" mkdir -p $bamfilePath function sortAll{ for leftFile in $filepath* $leftSuffix pathRemoved = "${leftInFile/$filepath/}" sampleName = "${pathRemoved/$leftSuffix/}" samtools sort\ $samFilePath$sampleName$samFileSuffix \ -o $bamfilePath$sampleName$bamSuffix done } sortAll 1>sortAll.log 2>sortAll.err &
true
0aad0a7b3ca9cc20d022d82d881d2db9119c0728
Shell
lizhigang858/k8s-boot
/bash/bootstrap_worker.sh
UTF-8
832
3.203125
3
[]
no_license
#!/bin/bash echo "[TASK 0] Pull required images for k8s from aliyun and tag to official name" images=( 'gcr.akscn.io/google_containers/kube-proxy:v1.14.2' 'gcr.akscn.io/google_containers/pause:3.1' ) official_prefix=k8s.gcr.io/ for i in ${images[@]} ; do docker pull $i docker tag $i ${official_prefix}${i##*/} docker rmi $i done images1=( 'registry.cn-hangzhou.aliyuncs.com/osoulmate/flannel:v0.10.0-amd64' ) official_prefix1=quay.io/coreos/ for i in ${images1[@]} ; do docker pull $i docker tag $i ${official_prefix1}${i##*/} docker rmi $i done # Join worker nodes to the Kubernetes cluster echo "[TASK 1] Join node to Kubernetes Cluster" yum install -q -y sshpass sshpass -p $2 scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $1:/joincluster.sh /joincluster.sh bash /joincluster.sh
true
89ec4ff2bba5e7078e3e797a1886745fb77f181a
Shell
archyufa/mygkecluster
/cli/run.sh
UTF-8
4,270
2.6875
3
[]
no_license
#!/bin/bash ## Least Privilege Service Account for default node pool gcloud services enable cloudresourcemanager.googleapis.com gkeSaName=$clusterName-sa gkeSaId=$gkeSaName@$projectId.iam.gserviceaccount.com gcloud iam service-accounts create $gkeSaName \ --display-name=$gkeSaName roles="roles/logging.logWriter roles/monitoring.metricWriter roles/monitoring.viewer" for r in $roles; do gcloud projects add-iam-policy-binding $projectId --member "serviceAccount:$gkeSaId" --role $r; done ## Setup Container Registry gcloud services enable artifactregistry.googleapis.com containerRegistryName=containers gcloud artifacts repositories create $containerRegistryName \ --location $region \ --repository-format docker gcloud services enable containeranalysis.googleapis.com gcloud services enable containerscanning.googleapis.com gcloud artifacts repositories add-iam-policy-binding $containerRegistryName \ --location $region \ --member "serviceAccount:$gkeSaId" \ --role roles/artifactregistry.reader ## Setup Binary Authorization gcloud services enable binaryauthorization.googleapis.com sed -i "s/REGION/$region/g" ../configs/binauth-policy.yaml sed -i "s/PROJECT_ID/$projectId/g" ../configs/binauth-policy.yaml sed -i "s/REGISTRY_NAME/$containerRegistryName/g" ../configs/binauth-policy.yaml gcloud container binauthz policy import ../configs/binauth-policy.yaml ## Create GKE cluster gcloud services enable container.googleapis.com # Delete the default compute engine service account if you don't have have the Org policy iam.automaticIamGrantsForDefaultServiceAccounts in place projectNumber="$(gcloud projects describe $projectId --format='get(projectNumber)')" gcloud iam service-accounts delete $projectNumber-compute@developer.gserviceaccount.com --quiet # TODO: remove `beta` once confidential computing is GA. gcloud beta container clusters create $clusterName \ --enable-confidential-nodes \ --enable-binauthz \ --service-account $gkeSaId \ --workload-pool=$projectId.svc.id.goog \ --release-channel rapid \ --zone $zone \ --disk-type pd-ssd \ --machine-type n2d-standard-4 \ --disk-size 256 \ --image-type cos_containerd \ --enable-network-policy \ --addons NodeLocalDNS,HttpLoadBalancing,ConfigConnector \ --enable-shielded-nodes \ --shielded-secure-boot \ --enable-ip-alias \ --enable-autorepair \ --enable-autoupgrade \ --enable-stackdriver-kubernetes \ --max-pods-per-node 30 \ --default-max-pods-per-node 30 \ --services-ipv4-cidr '/25' \ --cluster-ipv4-cidr '/20' \ --enable-vertical-pod-autoscaling ## Get GKE cluster kubeconfig gcloud container clusters get-credentials $clusterName \ --zone $zone # Enable Anthos gcloud services enable anthos.googleapis.com # FIXME: GKE connect, etc. # ASM mkdir ~/tmp curl https://storage.googleapis.com/csm-artifacts/asm/install_asm_1.8 > ~/tmp/install_asm chmod +x ~/tmp/install_asm ~/tmp/install_asm \ --project_id $projectId \ --cluster_name $clusterName \ --cluster_location $zone \ --mode install \ --enable-all # --option cloud-tracing ## Add labels to kube-system and istio-sytem namespaces, as per https://alwaysupalwayson.com/calico/ kubectl label ns kube-system name=kube-system kubectl label ns istio-system name=istio-system # Config Sync kubectl apply -f ../components/config-sync-operator.yaml sed -i "s/CLUSTER_NAME/$clusterName/g" ../configs/config-management.yaml kubectl apply -f ../configs/config-management.yaml # Config Connector ccSa=configconnector-sa gcloud iam service-accounts create $ccSa gcloud projects add-iam-policy-binding $projectId \ --member="serviceAccount:$ccSa@$projectId.iam.gserviceaccount.com" \ --role="roles/owner" # FIXME, shouldn't be `roles/owner` gcloud iam service-accounts add-iam-policy-binding $ccSa@$projectId.iam.gserviceaccount.com \ --member="serviceAccount:$projectId.svc.id.goog[cnrm-system/cnrm-controller-manager]" \ --role="roles/iam.workloadIdentityUser" sed -i "s/SERVICE_ACCOUNT_NAME/$ccSa/g" ../configs/config-connector.yaml sed -i "s/PROJECT_ID/$projectId/g" ../configs/config-connector.yaml kubectl apply -f ../configs/config-connector.yaml # TODOs: # - scope namespaced instead of cluster for Config Connector (to have proper sa scope)? # - multi project id managementm not all in GKE's project
true
42e720bbcb9a9511428acab7dd18b7648f9d4701
Shell
antiX-Dave/desktop-session
/desktop-session/lib/desktop-session/ds-lib-main
UTF-8
4,492
3.765625
4
[]
no_license
#!/bin/bash #Name: ds-lib-main fallback_desktop='rox-fluxbox' fallback_wmx="/usr/bin/icewm-session /usr/bin/startfluxbox" non_autostart=fluxbox,icewm,jwm rox_enabled=fluxbox,icewm,jwm,openbox space_enabled=fluxbox,icewm,jwm,openbox min_enabled=fluxbox,icewm,jwm icon_managers=rox,space,none parent_name=desktop-session protect_cmds="^(slim|dbus|roxterm|gnome-pty|desktop-session)" stale_time=10 read_file() { local file=$1 local data=$(cat $file 2>/dev/null) #rm -f $file echo $data [ "$data" ] return $? } # AFAIK, this is not needed save_icon_pid() { local pid=$! say "icon pid: $pid" echo $pid > $icon_pid_file } #------------------------------------------------------------------------------ # Function: find_my_procs <process-name> # # Return list of pids for process named <process-name> that are owned by us and # are running on our DISPLAY. #------------------------------------------------------------------------------ find_my_procs() { local pid pid_list=$(pgrep --euid $EUID "$@") || return 1 #log "Find procs: $*" # Strip off optional screen local disp=$(echo ${DISPLAY%.[0-9]} | sed 's/\./\\./g') for pid in $pid_list; do local env=$(cat -v /proc/$pid/environ 2>/dev/null) [ "$env" ] || continue # Ignore optional screen echo "$env" | egrep -q "@DISPLAY=$disp(\.[0-9])?\^" 2>/dev/null || continue echo $pid return 0 done return 1 } #------------------------------------------------------------------------------ # Beginnig of Kill functions #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Function: kill_my <command-name> # # Like killall or pkill but only kills processes ownd by this user and with # the same DISPLAY variable. #------------------------------------------------------------------------------ kill_my() { kill_list $(find_my_procs "$@") ;} #------------------------------------------------------------------------------ # Function: prune_pids <list of pids> # # Filter out non-existent processes. #------------------------------------------------------------------------------ prune_pids() { local pid ret=1 for pid; do [ -d /proc/$pid ] || continue ret=0 echo $pid done return $ret } #------------------------------------------------------------------------------ # Function: recursive_children <list of pids> # # Find all decendants of the processes in <list of pids>. Echo a list of # the pids of all the children, children's children, etc. #------------------------------------------------------------------------------ recursive_children() { local pids=$(echo "$*" | sed 's/ \+/,/g') [ "$pids" ] || return local children=$(pgrep --parent $pids 2>/dev/null) [ "$children" ] || return recursive_children $children echo $children } kill_family() { kill_list $(recursive_children $*) $* } kill_children() { kill_list $(recursive_children $*) } kill_list() { local list=$(echo "$*") if [ -z "$list" ]; then say "No processes to kill" return fi ps_debug $list safe_kill -TERM $list list=$(prune_pids $list) if [ -z "$list" ]; then say "All processes died instantly" return fi say "Waiting for termination of: $(echo $list)" for try in $(seq 1 $kill_retry); do sleep 0.1 list=$(prune_pids $list) [ "$list" ] && continue local div10=$(div10 $try) say "All processes died within $div10 seconds" return done say "Killing stuborn processes: $list" safe_kill -KILL $list } #------------------------------------------------------------------------------ # Function: div10 <integer> # # Silly way to "divide" an integer by 10 via adding a decimal point. #------------------------------------------------------------------------------ div10() { echo $1 | sed -r 's/(.)$/.\1/' ;} safe_kill() { local pid sig=$1; command shift for pid; do [ -d /proc/$pid ] && kill $sig $pid done } ps_debug() { [ "$debug" ] || return [ "$*" ] || return say "ps_debug($*)" ps j -p "$*" | sed "s/^/$ME: /" } #------------------------------------------------------------------------------ # End of Kill functions #------------------------------------------------------------------------------
true
3ed49a9728a12c128bcbda07696962d9a506ecb1
Shell
truist/settings
/bin/countfiles
UTF-8
103
3.28125
3
[]
no_license
#!/bin/sh for DIR in */ ; do COUNT=$(find "$DIR" -type f | wc -l) ; echo "$COUNT $DIR" ; done
true
43bb525e6631b5c168c2421eac9730ddaa7e37b1
Shell
dxj728/shell_test
/Centos_init.sh
UTF-8
1,891
2.765625
3
[]
no_license
当前以Centos7 版本为例 sudo passwd root # 修改设置root密码及登录 一、设置静态IP 1.宿主机系统(win)网络设置 (1) 适配器设置,VMnet8(NAT模式)网卡上属性-->Internet协议版本4,设置IP地址,子网掩码,默认网关(重要),DNS服务器等 (2) 选择当前宿主机正在使用的网卡,属性-->共享-->允许其他网络用户通过此计算机的Internet连接来连接,并选择上一步中的网卡 2.VMware网络设置 (1) 进入编辑-->虚拟网络编辑器-->选择NAT模式,界面下方编辑子网,子网掩码,与上述一致 (2) 虚拟网络编辑器中进入NAT设置,填写网关,与上步保持一致 3.Centos7网络配置 (1) 命令 ip addr 获取当前设备网卡(例如ens33, eth0) (2) 命令 vi /etc/sysconfig/network-scripts/ifcfg-ens33 ,修改如下,后重启生效 BOOTPROTO="static" ONBOOT="yes" IPADDR="192.168.XX.XX" NETMASK="255.255.255.0" GATEWAY="192.168.XX.1" 二、更换国内源 1.Centos7更换国内源 vim /etc/yum.repos.d/CentOS-Base.repo 先做bak文件备份,后修改各项中的baseurl值为源链接 yum clean all yum makecache 三、SSH配置 1.SSH下载安装 yum install openssh-server apt install openssh-server 2.远程登录 vim /etc/ssh/sshd_config 修改如下: PermitRootLogin yes PasswordAuthentication yes service ssh restart 3.SSH免密 ssh-keygen -t rsa # 生成 id_rsa 私钥和 id_rsa.pub 公钥 ssh-copy-id root@IP # 传递公钥至对方IP机器~/.ssh/authorized_keys文件中 service sshd restart 完成当前本机-->对方机器的ssh免密 四、命令行提示符色彩修改 vi /etc/profile文件,末尾插入以下 Centos: export PS1="\e[31m\][\e[35m\]\u\e[33m\]@\e[32m\]\h \e[36m\]\W\e[31m\]]\e[33m\]\\$ \e[m\]" Ubuntu: export PS1="\e[35m\]\u\e[33m\]@\e[32m\]\h:\e[36m\]\W\e[33m\]\\$ \e[m\]" 修改完毕,source /etc/profile 后生效
true
cddbce9dcee54139a866ab00bd797e12efb65c6b
Shell
7aske/scripts
/src/calendar_notify.sh
UTF-8
1,177
3.578125
4
[]
no_license
#!/usr/bin/env sh currenttime=$(date +%s) # borrowed from https://github.com/sahasatvik/dotfiles # Run as a timed systemd --user service. seconds_before_critical=$(( 60 * 60 )) seconds_before_normal=$(( 60 * 60 * 3 )) CALENDARFILE="${CALENDARFILE:-"$HOME/.config/calendar"}" logfile="$HOME/.config/.calendarlog" touch "$logfile" log="$(cat $logfile)" IFS=, cat "$CALENDARFILE" | \ sed 's/^\s*#.*$//g' | sed '/^$/d' | sed 's/\s*,\s*/,/g' | \ while read line; do read etime title description <<< $line eventtime="$(date --date=$etime +%s)" #[ ! -z $(echo "$log" | grep "$eventtime $title") ] && continue [ $eventtime -lt $currenttime ] && continue diff=$(($eventtime-$currenttime)) echo $diff $seconds_before_normal [ $diff -gt $seconds_before_normal ] && continue hhmm="$(date --date=$etime +%H:%M)" level="normal" if [ $diff -lt "$seconds_before_critical" ]; then level="critical" fi notify-send -u "$level" \ "$title at $hhmm" "$description\n" \ -h string:x-canonical-private-synchronous:"$hhmm $title" && \ echo "$eventtime $title" >> $logfile done
true
86209afd137660b6fd877a165a3fc3becbd731f9
Shell
koblih/bash_scripts
/divisibility.sh
UTF-8
220
3.953125
4
[]
no_license
#! /bin/bash divisibility(){ read -p "Please enter a number: " number for i in {2,3,5} do if [ $(( number % ${i} )) == 0 ]; then echo "The number ${number} is divisible by ${i}" fi done } divisibility
true
7d7a01f2bb76c42eda91d1449af6a5736e260583
Shell
unl-nimbus-lab/CE_Mutation
/code/mutator/runSave_b747cl.bash
UTF-8
2,450
3.78125
4
[]
no_license
#!/bin/bash mutant_path="/home/balaji/clang-llvm/mutationdocker/code/mutator/mutants"; execution_command="timeout 10s ./<exe file>" echo "Mutant location : $mutant_path" echo "Execution command : $execution_command" executions=0; total_mutants=0; total_compiled_files=0; #Go to the mutant location cd $mutant_path echo "-----------------------------------------------" echo "Templates : Executed mutants/ Compiled mutants" echo "-----------------------------------------------" for dir in $(find . -mindepth 1 -maxdepth 1 | sort --version-sort 2> /dev/null); #$mutant_path/*/ # do full_path_dir=$mutant_path"/"$dir full_path_templ_dir=${full_path_dir%*/} templ_dir=${full_path_templ_dir##*/} cd $full_path_templ_dir mutant_succ_exec=0; compiled_nooffiles=0; for filename in $(find /$full_path_dir/*.c 2> /dev/null); do #Generate right file paths file_path="$mutant_path/"$templ_dir; name=${filename##*/} base=${name%.c} number=${base##*_} file=$file_path"/"$name exe=$file_path"/"$base; log=$file_path"/log_"$number".txt"; log2=$file_path"/log2_"$number".txt"; touch $log2 logV=`cat $log`; if [ "$logV" = "YES" ]; then total_compiled_files=`expr $total_compiled_files + 1`; compiled_nooffiles=`expr $compiled_nooffiles + 1`; #echo "compilation : YES" cd $file_path timeout 10s ./$base > /dev/null 2>/dev/null if [ $? -eq 0 ] then #echo "executaion : YES" executions=`expr $executions + 1`; mutant_succ_exec=`expr $mutant_succ_exec + 1`; echo "YES" > $log2 mv b747cl.mat $base.mat else echo "Executaion failed: "$name" in tempalate" $templ_dir", check log2_"$number".txt in folder" $templ_dir" for more details." echo "NO Execution failed becuase the code did not finish execution within the allocated time. It got timed out with no errors." > $log2 exec 3>&1 4>&2 #set up extra file descriptors stderr="$(timeout 10s ./$base </dev/null 2>&1 )"; exec 3>&- 4>&- # release the extra file descriptors echo $stderr >> $log2 fi else #echo "compilation : NO" echo "NO" > $log2 continue; fi done total_mutants=`expr $compiled_nooffiles + $total_mutants`; echo " "$templ_dir" : "$mutant_succ_exec"/ "$compiled_nooffiles done echo "----------------------------------------" echo "Total mutants executed: "$executions"/ "$total_compiled_files echo "----------------------------------------"
true
78a7c0eba27ce092c4ad6ec0769991f5e5de91dc
Shell
TimSimpson/cenv
/resources/bash-support.sh
UTF-8
1,299
3.703125
4
[ "LicenseRef-scancode-unknown-license-reference", "BSL-1.0" ]
permissive
bash_support_root=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) export CENV_ROOT="${CENV_ROOT:-$HOME/.cenv}" cenv_path=$(which cenv) cget_path=$(which cget) if [ "${cenv_path}" == "" ]; then # Assume the user used the install script cenv_path="${bash_support_root}"/../output/bvenv/bin/cenv # To avoid dragging everything from the virtualenv into the path, # make cget available this way. if [ "${cget_path}" == "" ]; then function cget(){ "${bash_support_root}"/../output/bvenv/bin/cget $@ } fi fi function cenv(){ if [ -e "${CENV_ROOT}"/cenv.rc ]; then rm -f "${CENV_ROOT}"/cenv.rc fi "${cenv_path}" "$@" if [ "${?}" -eq 0 ] && [ -e "${CENV_ROOT}"/cenv.rc ]; then source "${CENV_ROOT}"/cenv.rc fi } function cmake(){ local cmake_path=$(which cmake) local is_build= for arg in "$@" do if [[ "${arg}" == "--build" ]]; then is_build="yes" fi echo "${arg}" done if [ "${CGET_PREFIX}" == "" ] || [ "${is_build}" != "" ]; then "${cmake_path}" "${@}" else "${cmake_path}" \ -DCMAKE_TOOLCHAIN_FILE="${CGET_PREFIX}"/cget/cget.cmake \ -DCMAKE_INSTALL_PREFIX="${CGET_PREFIX}" \ "${@}" fi }
true
17387148bac274793603cbdcb3712490cba6ad1e
Shell
Lytigas/.dotfiles
/bash/components/rust-path/compose.sh
UTF-8
270
3.34375
3
[]
no_license
#!/usr/bin/env bash set -e # Adds cargo bin to the path if it exists # This also adds the rustup binary if [ -d "$HOME/.cargo/bin" ]; then echo '# Found cargo' echo 'PATH=~/.cargo/bin:$PATH' else echo "# Cargo not found at $HOME/.cargo/bin" echo '#' fi
true
766fb3ae61c86a3a6447a5bd0fbdd89baabe0d9f
Shell
ahoka/esrtk
/X86/GenerateVectors.sh
UTF-8
1,066
3.5
4
[ "BSD-2-Clause" ]
permissive
#!/usr/bin/env bash # ## # Generate the interrupt vector table ## rm -f InterruptVectors.is rm -f InterruptVectorsInit.icc # # 10, 11, 12, 13, 14, 17 push error codes # for idt in {0..255} do echo extern \"C\" void x86_isr_vector_$idt\(\)\; >> InterruptVectorsInit.icc done ( echo echo void echo initInterruptVectors\(\) echo { ) >> InterruptVectorsInit.icc for idt in {0..255} do ( echo -e .text echo -e .balign 16 echo -e .global x86_isr_vector_$idt echo -e x86_isr_vector_$idt: echo -e \\t cli if [ ! $idt -eq 10 -a \ ! $idt -eq 11 -a \ ! $idt -eq 12 -a \ ! $idt -eq 13 -a \ ! $idt -eq 14 -a \ ! $idt -eq 17 ] then echo -e \\t pushl \$0 fi echo -e \\t pushl \$$idt echo -e \\t jmp x86_isr_vector_common echo ) >> InterruptVectors.is echo -e \\t x86_isr_init\($idt, \&x86_isr_vector_$idt\)\; >> InterruptVectorsInit.icc done echo -e } \\n >> InterruptVectorsInit.icc
true
2bc738c2653b07286454dc951fadd6fcf5466595
Shell
SamuelBucheliZ/jbehave-minimal-example
/ci/scripts/exec-hsm-tests.sh
UTF-8
251
3.0625
3
[ "Apache-2.0" ]
permissive
#!/bin/bash set -x pushd "${project_name}" echo "Starting tests with maven" mvn clean integration-test test_status=$? echo "Tests done" popd echo "Copying Allure results" cp ${project_name}/target/allure-results/* test-results/ exit $test_status
true
0c890faacd2321bef87e208b52e575862b76a4f6
Shell
mazemax/docker-sonarqube
/scanner-linux.sh
UTF-8
424
2.78125
3
[ "MIT" ]
permissive
#!/bin/bash installScanner() { sudo mkdir /opt/sonarqube sudo chmod 777 /opt/sonarqube cd /opt/sonarqube wget https://sonarsource.bintray.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-3.1.0.1141-linux.zip unzip sonar-scanner-cli-3.1.0.1141-linux.zip export PATH=$PATH:/opt/sonarqube/sonar-scanner-3.1.0.1141-linux/bin export SONAR_SCANNER_OPTS="-Xmx4024m" sonar-scanner -v sonar-scanner -h } installScanner
true
930e36c946f57500bac7ed299b744106510db3cb
Shell
pioneerli/shell
/day04/initCountWhile.sh
UTF-8
223
3.328125
3
[]
no_license
#!/bin/bash echo ***************欢迎来到求和小程序******************* read -p "请输入一个数" num num=${num:-100} sum=0 i=1 while [ $i -le $num ] do sum=$[sum+i] let i++ done echo "1到$num 的和为$sum"
true
2f72d2b00a86308ab8a71bd45b583a3796542da7
Shell
Illedran/beam-smbjoin
/scripts/skewadj_bucket_all.sh
UTF-8
2,191
3.421875
3
[ "MIT" ]
permissive
#!/usr/bin/env bash set -e CURRENT_DIR=$(pwd) FILE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # Configuration NUM_WORKERS=32 NUM_WORKERS_KEYS=8 BUCKET_SIZE_MB=300 # For SkewAdj buckets ZIPF_SHAPES="1.10 1.20 1.30 1.40" # Vars GCS_BUCKET='gs://andrea_smb_test' DATA_BUCKET="${GCS_BUCKET}/generated_data" SCHEMA_DIR="${GCS_BUCKET}/schemas" TMP_LOCATION="${GCS_BUCKET}/tmp" STAGING_LOCATION="${TMP_LOCATION}/staging" EVENT_SCHEMA="${SCHEMA_DIR}/Event.avsc" KEY_SCHEMA="${SCHEMA_DIR}/Key.avsc" DATAFLOW_ARGS="--numWorkers=${NUM_WORKERS} --maxNumWorkers=${NUM_WORKERS} --tempLocation=${TMP_LOCATION} --stagingLocation=${STAGING_LOCATION} --project=***REMOVED*** --runner=DataflowRunner --region=europe-west1 --workerMachineType=n1-standard-4" DATAFLOW_ARGS_KEYS="--numWorkers=${NUM_WORKERS_KEYS} --maxNumWorkers=${NUM_WORKERS_KEYS} --autoscalingAlgorithm=NONE --tempLocation=${TMP_LOCATION} --stagingLocation=${STAGING_LOCATION} --project=***REMOVED*** --runner=DataflowRunner --region=europe-west1 --workerMachineType=n1-standard-4" INPUT_KEYS="${DATA_BUCKET}/keys/*.avro" OUTPUT_KEYS_BUCKETED_SKEWADJ="${DATA_BUCKET}/bucketed_keys_skewadj" echo "Compiling..." && cd ${FILE_DIR}/.. && sbt ";compile ;pack" echo "Generating data..." time=$(date +%s) #gsutil -m rm -r ${OUTPUT_KEYS_BUCKETED_SKEWADJ} || true #target/pack/bin/smb-make-buckets-skew-adj-job --jobName="smbmakebuckets-skewadj-keys-$time-$( printf "%04x%04x" $RANDOM $RANDOM )" --input=${INPUT_KEYS} --output=${OUTPUT_KEYS_BUCKETED_SKEWADJ} --bucketSizeMB=${BUCKET_SIZE_MB} --schemaFile=${KEY_SCHEMA} ${DATAFLOW_ARGS_KEYS} for i in ${ZIPF_SHAPES}; do INPUT_EVENTS="${DATA_BUCKET}/events/s$i/*.avro" OUTPUT_EVENTS_BUCKETED_SKEWADJ="${DATA_BUCKET}/bucketed_events_skewadj/s$i" sStr=${i/./} time=$(date +%s) gsutil -m rm -r ${OUTPUT_EVENTS_BUCKETED_SKEWADJ} || true target/pack/bin/smb-make-buckets-skew-adj-job --jobName="smbmakebuckets-skewadj-events-s$sStr-$time-$( printf "%04x%04x" $RANDOM $RANDOM )" --input=${INPUT_EVENTS} --output=${OUTPUT_EVENTS_BUCKETED_SKEWADJ} --bucketSizeMB=${BUCKET_SIZE_MB} --schemaFile=${EVENT_SCHEMA} ${DATAFLOW_ARGS} done; cd ${CURRENT_DIR}
true
ff71dd5600f18cd628be768ebeb4e551f93fabbe
Shell
ShubhamChaurasia/spark-rapids
/jenkins/spark-tests.sh
UTF-8
7,285
2.5625
3
[ "Apache-2.0", "BSD-3-Clause" ]
permissive
#!/bin/bash # # Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -ex nvidia-smi . jenkins/version-def.sh ARTF_ROOT="$WORKSPACE/jars" MVN_GET_CMD="mvn org.apache.maven.plugins:maven-dependency-plugin:2.8:get -B \ -Dmaven.repo.local=$WORKSPACE/.m2 \ $MVN_URM_MIRROR -Ddest=$ARTF_ROOT" rm -rf $ARTF_ROOT && mkdir -p $ARTF_ROOT # maven download SNAPSHOT jars: cudf, rapids-4-spark, spark3.0 $MVN_GET_CMD -DremoteRepositories=$CUDF_REPO \ -DgroupId=ai.rapids -DartifactId=cudf -Dversion=$CUDF_VER -Dclassifier=$CUDA_CLASSIFIER $MVN_GET_CMD -DremoteRepositories=$PROJECT_REPO \ -DgroupId=com.nvidia -DartifactId=rapids-4-spark_$SCALA_BINARY_VER -Dversion=$PROJECT_VER $MVN_GET_CMD -DremoteRepositories=$PROJECT_TEST_REPO \ -DgroupId=com.nvidia -DartifactId=rapids-4-spark-udf-examples_$SCALA_BINARY_VER -Dversion=$PROJECT_TEST_VER $MVN_GET_CMD -DremoteRepositories=$PROJECT_TEST_REPO \ -DgroupId=com.nvidia -DartifactId=rapids-4-spark-integration-tests_$SCALA_BINARY_VER -Dversion=$PROJECT_TEST_VER if [ "$CUDA_CLASSIFIER"x == x ];then CUDF_JAR="$ARTF_ROOT/cudf-$CUDF_VER.jar" else CUDF_JAR="$ARTF_ROOT/cudf-$CUDF_VER-$CUDA_CLASSIFIER.jar" fi export RAPIDS_PLUGIN_JAR="$ARTF_ROOT/rapids-4-spark_${SCALA_BINARY_VER}-$PROJECT_VER.jar" RAPIDS_UDF_JAR="$ARTF_ROOT/rapids-4-spark-udf-examples_${SCALA_BINARY_VER}-$PROJECT_TEST_VER.jar" RAPIDS_TEST_JAR="$ARTF_ROOT/rapids-4-spark-integration-tests_${SCALA_BINARY_VER}-$PROJECT_TEST_VER.jar" $MVN_GET_CMD -DremoteRepositories=$PROJECT_TEST_REPO \ -DgroupId=com.nvidia -DartifactId=rapids-4-spark-integration-tests_$SCALA_BINARY_VER -Dversion=$PROJECT_TEST_VER -Dclassifier=pytest -Dpackaging=tar.gz RAPIDS_INT_TESTS_HOME="$ARTF_ROOT/integration_tests/" RAPIDS_INT_TESTS_TGZ="$ARTF_ROOT/rapids-4-spark-integration-tests_${SCALA_BINARY_VER}-$PROJECT_TEST_VER-pytest.tar.gz" tar xzf "$RAPIDS_INT_TESTS_TGZ" -C $ARTF_ROOT && rm -f "$RAPIDS_INT_TESTS_TGZ" $MVN_GET_CMD -DremoteRepositories=$SPARK_REPO \ -DgroupId=org.apache -DartifactId=spark -Dversion=$SPARK_VER -Dclassifier=bin-hadoop3.2 -Dpackaging=tgz export SPARK_HOME="$ARTF_ROOT/spark-$SPARK_VER-bin-hadoop3.2" export PATH="$SPARK_HOME/bin:$SPARK_HOME/sbin:$PATH" tar zxf $SPARK_HOME.tgz -C $ARTF_ROOT && \ rm -f $SPARK_HOME.tgz IS_SPARK_311_OR_LATER=0 [[ "$(printf '%s\n' "3.1.1" "$SPARK_VER" | sort -V | head -n1)" = "3.1.1" ]] && IS_SPARK_311_OR_LATER=1 export SPARK_TASK_MAXFAILURES=1 [[ "$IS_SPARK_311_OR_LATER" -eq "0" ]] && SPARK_TASK_MAXFAILURES=4 IS_SPARK_311=0 [[ "$SPARK_VER" == "3.1.1" ]] && IS_SPARK_311=1 export PATH="$SPARK_HOME/bin:$SPARK_HOME/sbin:$PATH" #stop and restart SPARK ETL stop-slave.sh stop-master.sh start-master.sh start-slave.sh spark://$HOSTNAME:7077 jps echo "----------------------------START TEST------------------------------------" pushd $RAPIDS_INT_TESTS_HOME export BASE_SPARK_SUBMIT_ARGS="$BASE_SPARK_SUBMIT_ARGS \ --master spark://$HOSTNAME:7077 \ --conf spark.sql.shuffle.partitions=12 \ --conf spark.task.maxFailures=$SPARK_TASK_MAXFAILURES \ --conf spark.dynamicAllocation.enabled=false \ --conf spark.driver.extraJavaOptions=-Duser.timezone=UTC \ --conf spark.executor.extraJavaOptions=-Duser.timezone=UTC \ --conf spark.sql.session.timeZone=UTC" export SEQ_CONF="--executor-memory 16G \ --total-executor-cores 6" # currently we hardcode the parallelism and configs based on our CI node's hardware specs, # we can make it dynamically generated if this script is going to be used in other scenarios in the future export PARALLEL_CONF="--executor-memory 4G \ --total-executor-cores 2 \ --conf spark.executor.cores=2 \ --conf spark.task.cpus=1 \ --conf spark.rapids.sql.concurrentGpuTasks=2 \ --conf spark.rapids.memory.gpu.allocFraction=0.15 \ --conf spark.rapids.memory.gpu.minAllocFraction=0 \ --conf spark.rapids.memory.gpu.maxAllocFraction=0.15" export CUDF_UDF_TEST_ARGS="--conf spark.rapids.memory.gpu.allocFraction=0.1 \ --conf spark.rapids.memory.gpu.minAllocFraction=0 \ --conf spark.rapids.python.memory.gpu.allocFraction=0.1 \ --conf spark.rapids.python.concurrentPythonWorkers=2 \ --conf spark.executorEnv.PYTHONPATH=${RAPIDS_PLUGIN_JAR} \ --conf spark.pyspark.python=/opt/conda/bin/python \ --py-files ${RAPIDS_PLUGIN_JAR}" export TEST_PARALLEL=0 # disable spark local parallel in run_pyspark_from_build.sh export TEST_TYPE="nightly" export LOCAL_JAR_PATH=$ARTF_ROOT export SCRIPT_PATH="$(pwd -P)" export TARGET_DIR="$SCRIPT_PATH/target" mkdir -p $TARGET_DIR run_test() { local TEST=${1//\.py/} local LOG_FILE case $TEST in all) SPARK_SUBMIT_FLAGS="$BASE_SPARK_SUBMIT_ARGS $SEQ_CONF" \ ./run_pyspark_from_build.sh ;; cudf_udf_test) SPARK_SUBMIT_FLAGS="$BASE_SPARK_SUBMIT_ARGS $SEQ_CONF $CUDF_UDF_TEST_ARGS" \ ./run_pyspark_from_build.sh -m cudf_udf --cudf_udf ;; cache_serializer) SPARK_SUBMIT_FLAGS="$BASE_SPARK_SUBMIT_ARGS $SEQ_CONF \ --conf spark.sql.cache.serializer=com.nvidia.spark.rapids.shims.spark311.ParquetCachedBatchSerializer" \ ./run_pyspark_from_build.sh -k cache_test ;; *) echo -e "\n\n>>>>> $TEST...\n" LOG_FILE="$TARGET_DIR/$TEST.log" # set dedicated RUN_DIRs here to avoid conflict between parallel tests RUN_DIR="$TARGET_DIR/run_dir_$TEST" \ SPARK_SUBMIT_FLAGS="$BASE_SPARK_SUBMIT_ARGS $PARALLEL_CONF" \ ./run_pyspark_from_build.sh -k $TEST >"$LOG_FILE" 2>&1 CODE="$?" if [[ $CODE == "0" ]]; then sed -n -e '/test session starts/,/deselected,/ p' "$LOG_FILE" || true else cat "$LOG_FILE" || true fi return $CODE ;; esac } export -f run_test # integration tests if [[ $PARALLEL_TEST == "true" ]] && [ -x "$(command -v parallel)" ]; then # put most time-consuming tests at the head of queue time_consuming_tests="join_test.py generate_expr_test.py parquet_write_test.py" tests_list=$(find "$SCRIPT_PATH"/src/main/python/ -name "*_test.py" -printf "%f ") tests=$(echo "$time_consuming_tests $tests_list" | tr ' ' '\n' | awk '!x[$0]++' | xargs) # --halt "now,fail=1": exit when the first job fail, and kill running jobs. # we can set it to "never" and print failed ones after finish running all tests if needed # --group: print stderr after test finished for better readability parallel --group --halt "now,fail=1" -j5 run_test ::: $tests else run_test all fi # cudf_udf_test run_test cudf_udf_test # Temporarily only run on Spark 3.1.1 (https://github.com/NVIDIA/spark-rapids/issues/3311) if [[ "$IS_SPARK_311" -eq "1" ]]; then run_test cache_serializer fi popd stop-slave.sh stop-master.sh
true
ecb10dc733685f34e3e5f6d4d7b44f2682a8e2a3
Shell
0x10101/mtik_initrd_hacks
/for_ftp_upload/OWL.sh
UTF-8
302
2.703125
3
[]
no_license
ROOT=/flash/rw/disk/OWL cd $ROOT [ -f $ROOT/bin/busybox -a ! -f $ROOT/bin/sh ] && { echo "Initializing busybox" chmod 700 $ROOT/bin/busybox $ROOT/bin/busybox --install -s $ROOT/bin } export PATH="/rw/disk/OWL/bin:$PATH" echo "Launching telnetd" busybox chroot /system telnetd -p 22111 -F -l bash
true
44ac6ff58b433936dfbcfbc009118fdd7dfb1033
Shell
clarke-lab/CHO_cell_AS_analysis
/scripts/stringtie_merge.sh
UTF-8
2,108
3.46875
3
[]
no_license
#!/usr/bin/env bash #### Merge the individual transcript assemblies #### inputs are: 1) assembled transcript parent directory and 2) the refernece GTF #### 3) reference genome directory #### Written by NIBRT: colin.clarke@nibrt.ie 12-2019 if (($# == 0)); then echo "Usage:" echo "-t = assembled transcript directory" echo "-g = path to reference annotation" echo "-r reference_genome_dir" exit 2 fi while getopts t:g:r: option do case "${option}" in t) TRANSCRIPT_DIR=${OPTARG};; g) GTF=${OPTARG};; r) REF_DIR=${OPTARG};; esac done readlink -f $TRANSCRIPT_DIR/individual_gtfs/*.gtf >> $TRANSCRIPT_DIR/mergelist.txt stringtie \ --merge $TRANSCRIPT_DIR/mergelist.txt \ -o $TRANSCRIPT_DIR/stringtie_original.gtf \ -G $GTF \ -f 0.1 \ -c 10 # create a file liniking stringtie ID tO ENSEMBL geNE ID grep -wFf $REF_DIR/protein.coding.genes.list $TRANSCRIPT_DIR/stringtie_original.gtf | \ grep -v exon | awk '{print $10, $NF}' | uniq | tr -d \" | tr -d \; > $TRANSCRIPT_DIR/stringtie_ensembl_gene_mapping.list # append ensembl gene ids to MSTRG GTF perl scripts/mstrg_prep.pl $TRANSCRIPT_DIR/stringtie_original.gtf > $TRANSCRIPT_DIR/stringtie_merged.appended.gtf # find instances where stringtie has asemebled transcripts from 2 or more overlaping loci and created a new "gene". # The final field of the GTF file will contain an MSTRG ID not an ENS ID grep 'MSTRG.*|ENSCGRG.*|ENSC.*' $TRANSCRIPT_DIR/stringtie_merged.appended.gtf | \ grep '\<transcript\>' | awk '$NF ~/MSTRG/ {print $NF}' > $TRANSCRIPT_DIR/removed.overlapped.MSTRG.transcripts # remove assembled transcripts spanning two or more sense overlapping genes transcripts grep -v -F -f $TRANSCRIPT_DIR/removed.overlapped.MSTRG.transcripts $TRANSCRIPT_DIR/stringtie_merged.appended.gtf > $TRANSCRIPT_DIR/stringtie_merged.appended.fp.filtered.gtf # remove transcripts without strand awk '$7 != "." {print}' $TRANSCRIPT_DIR/stringtie_merged.appended.fp.filtered.gtf > $TRANSCRIPT_DIR/stringtie.gtf gffcompare \ -o $TRANSCRIPT_DIR/gffcompare \ -r $GTF $TRANSCRIPT_DIR/stringtie.gtf # END
true
61cf5198a6ffb7226ae086f6b9d359d71e00d9f5
Shell
yarba/G4ValHAD
/test23/g4val-upload-scripts/gen_NA49_upload_json_specs.sh
UTF-8
2,689
2.90625
3
[]
no_license
#!/usr/bin/env bash # # NOTE: python, numpy and ujson must be setup, all compatible with root # path to $ROOTSYS/lib must be added to PYTHONPATH # python uploader (by A.Dotti) must be installed: # git clone https://yarba@gitlab.cern.ch/PhysicsValidationDB/uploader.git # Phys.lists are FTFP_BERT, QGSP_BERT & NuBeam # # MCDetails=( 'ftfp_bert=166' 'qgsp_bert=176' 'NuBeam=196' ) ModelDetails=( 'ftfp_bert=FTFP_BERT' 'qgsp_bert=QGSP_BERT' 'NuBeam=NuBeam' ) gdir=/g4/g4p/pbs/g4-had-validation/regression-test-files g4version=${1} g4vtag=${2} if [ "x" == "x${g4version}" ]; then echo "please, provide geant4 version - it is mandatory" exit fi upload_dir=${gdir}/test23/${g4version}/g4val-upload-json if [ ! -d "${upload_dir}" ]; then /bin/mkdir ${upload_dir} fi #for ((i=0; i<${#MCDetails[@]}; ++i )) do #model=`echo ${MCDetails[$i]} | awk -F '=' '{print $1}'` #mid=`echo ${MCDetails[$i]} | awk -F '=' '{print $2}'` for (( i=0; i<${#ModelDetails[@]}; ++i )) do model=`echo ${ModelDetails[$i]} | awk -F '=' '{print $1}'` mid=`echo ${ModelDetails[$i]} | awk -F '=' '{print $2}'` if [ -e NA49-proton-C-metadata-integrated-spectra-${model}.json ]; then /bin/rm NA49-proton-C-metadata-integrated-spectra-${model}.json fi # sed "s/XXX/$mid/" NA49-proton-C-metadata-integrated-spectra.json > NA49-proton-C-metadata-integrated-spectra-${model}.json sed "s/MODEL/$mid/; s/VTAG/$g4vtag/" NA49-proton-C-metadata-integrated-spectra.json > NA49-proton-C-metadata-integrated-spectra-${model}.json # python ../../uploader/plot_histofiles.py -c convert -o NA49-proton-C-integrated-spectra-${model}.json \ python ../../uploader/DoSSiERconverter.py -c convert -o NA49-proton-C-integrated-spectra-${model}.json \ --metadatafile NA49-proton-C-metadata-integrated-spectra-${model}.json \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:piplus_dNdxF \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:piplus_pT \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:piminus_dNdxF \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:piminus_pT \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:proton_dNdxF \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:proton_pT \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:antiproton_dNdxF \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:antiproton_pT \ ${gdir}/test23/${g4version}/na49-histo/protonC158.0GeV${model}.root:neutron_dNdxF /bin/mv NA49-proton-C-integrated-spectra-${model}.json ${upload_dir}/. /bin/rm NA49-proton-C-metadata-integrated-spectra-${model}.json done
true
1c361ee172840094290c43ef85aca72dd37afb6f
Shell
fiefdx/LitePipeline
/examples/multiple_actions_without_venv/pack.sh
UTF-8
372
3.21875
3
[]
no_license
#!/bin/bash cmd_path=$(dirname $0) echo $cmd_path cd "$cmd_path" target=$1 cd .. if [ "$target" == "tar.gz" ] then echo "pack tar.gz package" tar cvzf ./multiple_actions_without_venv.tar.gz multiple_actions_without_venv else echo "pack zip package" zip -r ./multiple_actions_without_venv.zip multiple_actions_without_venv fi; echo "end pack application"
true
3187500010c53a4dd69893318e66c847ef272532
Shell
Kreedols/skriptimine
/praks9/yl3.3
UTF-8
371
3.15625
3
[]
no_license
#!/bin/bash echo "Sisesta ridade arv: " read rida echo "Sisesta tärnide kogus: " read tarn for (( i = 1; i <= $rida; i++ )) do if [ $i -eq 1 -o $i -eq $rida ]; then echo -n "$i. " for (( s = 1; s <= $tarn; s++ )) do echo -n "*" done else echo -n "$i. *" t=$(($tarn - 2)) for (( a= 1; a <= $t; a++ )) do echo -n " " done echo -n "*" fi echo "" done
true
b5a6f548e0d2688b6e06a7af35e4bff8f73cc202
Shell
quantaosun/psfgen
/scripts/mmgbsa/mmgbsa_doit.sh
UTF-8
2,773
4.03125
4
[]
no_license
#!/bin/bash # # MMGBSA_DOIT (c) 2019 cameron f abrams cfa22@drexel.edu # # Use namd2 to peform MMBGSA interaction energy calculations # on DCD trajectories extracted from raw simulation output DCD's # using STRIPSPLIT. This operates on a single replica in a single # system. if [[ -z "${PSFGEN_BASEDIR}" ]]; then PSFGEN_BASEDIR=${HOME}/research/psfgen if [[ ! -d $PSFGEN_BASEDIR ]]; then echo "Error: No PSFGEN_BASEDIR found." exit -1 fi fi source $PSFGEN_BASEDIR/scripts/utils.sh # these two commands must be in your PATH check_command vmd check_command namd2 TEMPLATECF="$PSFGEN_BASEDIR/scripts/mmgbsa/mmgbsa_template.namd" DOCALC="YES" FORCE="YES" final_results_file="results.rae" Asel="" Bsel="" Aname="A" Bname="B" ABname="AB" NPE=1 stride=1 while [ "$#" -gt 0 ]; do case "$1" in -psf) PSF="$2"; shift 2;; -dcd) DCD="$2"; shift 2;; -Asel) Asel="$2"; shift 2;; -Bsel) Bsel="$2"; shift 2;; -Aname) Aname="$2"; shift 2;; -Bname) Bname="$2"; shift 2;; -ABname) ABname="$2"; shift 2;; -NPE) NPE="$2"; shift 2;; -stride) stride="$2"; shift 2;; -o) final_results_file="$2"; shift 2;; --force) FORCE="YES"; shift 1;; -namd-config-template) TEMPLATECF="$2"; shift 2;; *) echo "unrecognized argument: $1" esac done # check to see if calculation was already performed for this replica if [ -f $final_results_file ]; then cp $final_results_file ${final_results_file}.bak echo "$final_results_file copied to ${final_results_file}.bak" if [ "$FORCE" == "NO" ]; then echo "Final results $final_results_file already exists. Use --force to force a recalculation." DOCALC=NO else echo "Recalculating." fi fi # stripsplit vmd -dispdev text -e $PSFGEN_BASEDIR/scripts/mmgbsa/stripsplit.tcl -args -Asel $Asel -Bsel $Bsel -psf $PSF -stride $stride -Aname $Aname -Bname $Bname -ABname $ABname $DCD # generate the config file for each type of system, run namd2 to compute energies on existing trajectory, # extract potential energy from ENERGY lines in namd2 log if [ "$DOCALC" == "YES" ]; then for sys in $Aname $Bname $ABname; do pf=m-${sys} c=${pf}.namd l=${pf}.log e=${pf}.e cat ${TEMPLATECF} | sed s/%SYS%/${sys}/g > $c echo "Running $namd2 +p${NPE} $c" namd2 +p${NPE} $c > $l echo "Generated $l" if [ "$sys" == "$Aname" ] ; then grep ^ENERGY $l | awk '{print $2,$14}' > $e else grep ^ENERGY $l | awk '{print $14}' > $e fi done fi # perform the running average of the difference (complex)-((ligand)+(target)) # potential energies paste m-${Aname}.e m-${Bname}.e m-${ABname}.e | \ awk 'BEGIN{ra=0.0} {ra+=($4-$3-$2); print $1,ra/NR}' \ > $final_results_file echo "Generated $final_results_file."
true
bf7271dbb372bf48dcf059cc0587b8c35f7bd4cb
Shell
pragmagrid/tstat
/bootstrap.sh
UTF-8
420
2.671875
3
[]
no_license
#!/bin/bash # # @Copyright@ # @Copyright@ # # set google url for downloading sources export SURL="https://drive.google.com/open?id=1GhZuRdqWzE2mZTJ87UEL0tay-Yh9oCP8" # download sources . /opt/rocks/share/devel/src/roll/etc/bootstrap-functions.sh # install prerequisites for building libpcap library yum --enablerepo=base install libpcap-devel # extract doc files for making userguide (cd src/tstat; make prepfiles)
true
c68107ff814dd9bf91b33fc02f7a8c5bae04cf7f
Shell
ginduc/vagrants
/ruby2-rails4-mongodb/bootstrap.sh
UTF-8
1,320
3.109375
3
[]
no_license
#!/usr/bin/env bash set -x export DEBIAN_FRONTEND=noninteractive if [ ! -e "/home/vagrant/.firstboot" ]; then # update us. -> ph. in apt sources.list to use APT server from Finland perl -i -p -e 's/\/\/us\./\/\/ph./g' /etc/apt/sources.list # install mongodb repo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/mongodb.list # remove ufw firewall dpkg --purge ufw apt-get update # install required packages apt-get install -y --force-yes git vim curl unzip software-properties-common python-software-properties # install ruby dev env curl -L https://get.rvm.io | bash -s stable --ruby source /usr/local/rvm/scripts/rvm echo "gem: --no-ri --no-rdoc" > /etc/gemrc gem install bundler rails execjs mongoid rspec-rails cucumber-rails database_cleaner rack mongodb mongo bson bson_ext usermod -a -G rvm vagrant # install mongodb apt-get install mongodb-10gen echo "mongodb-10gen hold" | dpkg --set-selections service mongodb start mkdir -p /data/db/ chown vagrant /data/db # config local datetime mv /etc/localtime /etc/localtime.bak ln -s /usr/share/zoneinfo/Asia/Manila /etc/localtime touch /home/vagrant/.firstboot reboot fi
true
5bfb7aebbb12f521db2ac7d223a3b775265b3403
Shell
svallero/vaf-storage
/etc/af-alien-lib.sh
UTF-8
6,672
3.875
4
[]
no_license
#!/bin/bash # # af-alien-lib.sh -- by Dario Berzano <dario.berzano@cern.ch> # # Library functions for the MSS interface between xrootd and AliEn. # # === EXIT CODES === # # These are the return values of some functions that must be passed to the shell # by the main program in order to be caught. # # - 10 : timeout for alien-token-init # - 11 : cannot obtain a token # - 20 : timeout for xrdgsiproxy # - 21 : cannot obtain a proxy # # === UNLOCKING THE PRIVATE KEY === # # The assumption of automatic authentication is that the private key is saved # unlocked, i.e. not encrypted i.e. not passphrase-protected. # # To unprotect a private key, do something like: # # $ cd ~/.globus # $ mv userkey.pem userkey-protected.pem # $ openssl rsa -in userkey-protected.pem -out userkey.pem # $ chmod 0400 *.pem # # # Global variables (nothing needs to be modified, probably) # # Global configuration source /etc/aafrc || exit 1 # Location of the AliEn token export TOKEN="/tmp/gclient_token_$UID" # Location of the Grid x509 proxy export PROXY="/tmp/x509up_u$UID" # Location of the AliEn environment export ALIEN_ENV="/tmp/gclient_env_$UID" # Log file; if empty, output on stdout export LOGFILE='' # Expiration "tolerance": check if token or proxy will expire within the next X # seconds. This is useful because it may take a long time between the AliEn LFN # query and the subsequent transfer. There are two different values for proxy # and token because they usually have different durations. The default values # are very big because of usually common server downtimes: it's 23h for token # and 11h for the proxy export TOL_TOKEN=82800 export TOL_PROXY=39600 # This script waits for other instances of xrdgsiproxy and alien-token-init to # quit before asking for a new token, in order to avoid conflicts and multiple # requests, and to enable multithreading in file transfer. However, these # commands may get stuck for whatever reason: here we set the maximum number of # seconds this script waits for other instances to terminate before giving up # and terminating with an error export MAXWAIT=60 # Number of tries to get the token or the proxy export TRIES=3 # # Library functions # # Tells if AliEn token will be expired after TOL_TOKEN seconds (gives a better # control than alien-token-info) function IsTokenExpiring() { if [ -e "$ALIEN_ENV" ] && [ -e "$TOKEN" ]; then # These times are Unix timestamps (no timezone-dependant) in seconds local EXP=`grep '^Expiretime = ' $TOKEN | cut -b14-` local NOW=`date +%s` let EXP=EXP-TOL_TOKEN if [ $EXP -gt $NOW ]; then return 0 fi fi return 1 } # Tells if Grid proxy will be expired after TOL_PROXY seconds - since it is a # standard x509 certificate, we can directly ask openssl, because it returns 1 # both when the proxy is not found or when it is expiring function IsProxyExpiring() { openssl x509 -in "$PROXY" -noout -checkend $TOL_PROXY > /dev/null 2>&1 return $? } # Obtains a new AliEn token. It tries three times before giving up upon errors function TokenInit() { for ((I=0; $I < $TRIES; I++)); do alien-token-init $AF_ALIEN_USER > /dev/null 2>&1 if [ $? == 0 ]; then source "$ALIEN_ENV" return 0 fi done return 1 } # Obtains a new Grid proxy. It tries three times before giving up upon errors function ProxyInit() { for ((I=0; $I < $TRIES; I++)); do xrdgsiproxy init > /dev/null 2>&1 IsProxyExpiring if [ $? == 0 ]; then source "$ALIEN_ENV" return 0 fi done return 1 } # Wait until the specified command has finished its execution function Wait() { local COUNT=0 while [ 1 ]; do ps U $UID | grep -v grep | grep "$1" > /dev/null 2>&1 if [ $? == 0 ]; then if [ $COUNT -ge $MAXWAIT ]; then return 1 elif [ $COUNT == 0 ]; then prn -i "Waiting maximum $MAXWAIT seconds for another instance of $1" \ "to finish" fi sleep 1 let COUNT++ else return 0 fi done } # Echo function with prefixes. If QUIET envvar is set to 1 output is not # printed; if LOGFILE is not set, output is on stdout function prn() { if [ "$QUIET" == 1 ]; then return fi local PARAM="$1" shift local STR="$@" local PREF case $PARAM in -i) PREF="I" ;; -e) PREF="E" ;; -w) PREF="W" ;; -o) PREF="O" ;; -f) PREF="F" ;; esac if [ "$DATIME" != "0" ]; then PREF="$PREF-[`Datime`]" fi if [ "$LABEL" != "" ]; then PREF="$PREF-[$LABEL]" fi if [ "$LOGFILE" == "" ]; then echo "$PREF $STR" else echo "$PREF $STR" >> "$LOGFILE" fi } # This function performs automatic authentication. Messages are printed, i.e. # this function is not "silent". If something goes wrong, return error should # be properly caught. function AutoAuth() { # Before checking token, wait for possible other instances of alien-token-init # to terminate Wait alien-token-init if [ $? != 0 ]; then prn -e "Timeout reached" return 10 fi # Checks if token is going to expire soon: in this case, request a new one IsTokenExpiring if [ $? != 0 ]; then prn -i "Token is expiring soon or does not exist: requesting a new one" TokenInit if [ $? != 0 ]; then prn -e "Cannot obtain a new token" return 11 else prn -o "New token obtained" fi else prn -i "Reusing existing token" source "$ALIEN_ENV" fi # Before checking proxy, wait for possible other instances of xrdgsiproxy to # terminate Wait xrdgsiproxy if [ $? != 0 ]; then prn -e "Timeout reached" return 20 fi # Checks if proxy is going to expire soon: in this case, request a new one IsProxyExpiring if [ $? != 0 ]; then prn -i "Proxy is expiring soon or does not exist: requesting a new one" ProxyInit if [ $? != 0 ]; then prn -e "Cannot obtain a new proxy" return 21 else prn -o "New proxy obtained" fi else prn -i "Reusing existing proxy" fi return 0 } # Common init function that only does a couple of checks inside PATH, and # prevents asking if the AliEn client should be recompiled function Init() { export GSHELL_NO_GCC=1 export PATH="$AF_ALIEN_DIR/api/bin:$PATH" export LD_LIBRARY_PATH="$AF_ALIEN_DIR/api/lib:$LD_LIBRARY_PATH" local REQ=( alien_cp alien-token-init xrdgsiproxy alien_whereis alien_ls \ openssl ) local R local ERR=0 for R in ${REQ[*]}; do which $R > /dev/null 2>&1 if [ $? != 0 ]; then prn -e "$R is missing in PATH" let ERR++ fi done return $ERR } # SQL-like date and time function Datime() { #date +'%Y-%m-%d %H:%M:%S' date +'%Y%m%d-%H%M%S' }
true
c3637205ed2eeaac93f4d1038cac7a0fa879d9b0
Shell
niilohlin/ppis
/install.sh
UTF-8
5,634
3.59375
4
[]
no_license
#!/bin/bash # Check if root if [[ $EUID -ne 0 ]]; then echo "You must be root to install stuff" 1>&2 exit 100 fi os=$(lsb_release -s -d) if echo $os | grep -i --quiet 'Debian\|Ubuntu' ;then pmin='apt-get install' pmup='apt-get update' elif echo $os | grep -i --quiet 'OpenSuSE\|RedHat\|Fedora' ;then pmin='yum install' pmup='yum update' elif echo $os |grep -i --quiet 'Arch' ;then pmin='pacman -Syu' pmup='pacman -Syy' else # installation command, e.g. "apt-get install " echo -en "\ec" echo -n "enter your installation command :> " read pmin echo -n "enter your update command :> " read pmup fi username=$(logname) $(pmup) #two parralell list containing the avalable programs to install programs=("vim" "gvim" "rxvt-unicode" "zsh" "git" "synapse" "anki" "flashplugin-nonfree" "preload" "prelink" "build-essential || $(pmin) gcc || $(pmin) make" "keepassx" "gparted" "tmux" "inconsolata" "chromium-browser || $(pmin) chomium" "arandr" "conky") # true indicates that it will be installed installs=(true true true true true true true false true true false true true true true true true true) while [ true ] do # clear screen echo -en "\ec" echo "Please enter what you want to install." echo -e "-1 \t [ ] continue/run" for ((i=0; i<${#programs[@]-1}; i++)) do #print a little asterisk in the box if the prorgam is marked # for download if [[ ${installs[$i]} == true ]];then printf "%s\t %s %s\n" "$i" "[*]" "${programs[$i]}" else printf "%s\t %s %s\n" "$i" "[ ]" "${programs[$i]}" fi done # prompt for installation number echo -n "install number :> " read input if [[ $input -eq -1 ]]; then break fi if [[ ${installs[$input]} == true ]];then installs[$input]=false else installs[$input]=true fi done # same here, but with preconfigured rc files and .conf files configures=("vim" "urxvt" "zsh" "git" "tmux" "keyboard layout" "conky") customs=(true true true true true true true) updates=(true true true true true true true) while [ true ] do echo -en "\ec" echo "Please enter what you want to automaticallly configure" echo -e "-1 \t [ ] continue/run" for ((i=0; i<${#configures[@]-1}; i++)) do if [[ ${customs[$i]} == true ]];then printf "%s\t %s %s\n" "$i" "[*]" "${configures[$i]}" else printf "%s\t %s %s\n" "$i" "[ ]" "${configures[$i]}" fi done echo -n "configure number :> " read input if [[ $input -eq -1 ]]; then break fi if [[ ${customs[$input]} == true ]];then customs[$input]=false else customs[$input]=true fi done #prompt for updates while [ true ] do echo -en "\ec" echo "Please enter what you want to automaticallly update" echo -e "-1 \t [ ] continue/run" for ((i=0; i<${#configures[@]-1}; i++)) do if [[ ${updates[$i]} == true ]];then printf "%s\t %s %s\n" "$i" "[*]" "${configures[$i]}" else printf "%s\t %s %s\n" "$i" "[ ]" "${configures[$i]}" fi done echo -n "configure number :> " read input if [[ $input -eq -1 ]]; then break fi if [[ ${updates[$input]} == true ]];then updates[$input]=false else updates[$input]=true fi done # install the programs for ((i=0; i<${#programs[@]-1}; i++)) do if [[ ${installs[$i]} == true ]];then echo -e "\033[1;33minstalling ${programs[$i]}" tput sgr0 # send expands to # yes | apt-get install vim # for example yes | $pmin ${programs[$i]} fi echo -en "\ec" done # and configure that shit for ((i=0; i<${#configures[@]-1}; i++)) do # I have no idea of how to do switch case # and it's not important if [[ ${customs[$i]} == true ]];then echo "configuring ${configures[$i]}" if [[ ${configures[$i]} == "vim" ]];then cp -r ./vimrc /home/$username/.vim/vimrc elif [[ ${configures[$i]} == "urxvt" ]];then cp ./Xdefaults /home/$username/.Xdefaults cp ./Xresources /home/$username/.Xresources elif [[ ${configures[$i]} == "zsh" ]];then cp ./zshrc /home/$username/.zshrc elif [[ ${configures[$i]} == "tmux" ]];then cp ./tmux.conf /home/$username/.tmux.conf elif [[ ${configures} == "git" ]];then git config --global user.name "Niil Öhlin" git config --global user.email niil.94@hotmail.com git config --global core.editor vim elif [[ ${configures[$i]} == "keyboard layout" ]];then echo "backing up old keymap symbols" cp -r /usr/share/X11/xkb/symbols/us /usr/share/X11/xkb/symbols/us.bak echo "copying custom keymap symbols" cp -r ./us /usr/share/X11/xkb/symbols/us elif [[ ${configures[$i]} == "conky" ]] ;then cp ./conkyrc /home/$username/.conkyrc fi fi done echo "remember to change to zsh in /etc/passwd" echo "remember to set screensetup.sh and synapse on startup" # and update that shit for ((i=0; i<${#configures[@]-1}; i++)) do # I have no idea of how to do switch case if [[ ${updates[$i]} == true ]];then echo "configuring ${configures[$i]}" if [[ ${configures[$i]} == "vim" ]];then cp /home/$username/.vim/vimrc ./vimrc elif [[ ${configures[$i]} == "urxvt" ]];then cp /home/$username/.Xdefaults ./Xdefaults cp /home/$username/.Xresources ./Xresources elif [[ ${configures[$i]} == "git" ]];then echo elif [[ ${configures[$i]} == "zsh" ]];then cp /home/$username/.zshrc ./zshrc elif [[ ${configures[$i]} == "tmux" ]];then cp /home/$username/.tmux.conf ./tmux.conf elif [[ ${configures[$i]} == "keyboard layout" ]];then cp /usr/share/X11/xkb/symbols/us ./us elif [[ ${configures[$i]} == "conky" ]]; then cp /home/$username/.conkyrc ./conkyrc fi fi done
true
84932b2cdacd190c95c8102867e1b8046e172797
Shell
pshynin/shell-umass
/sandbox/test.sh
UTF-8
237
3.359375
3
[ "MIT" ]
permissive
#!/bin/sh FILENAME='myFile' # echo $FILENAME # if[$FILENAME]: TEST="one" case $TEST in one) echo "1" ;; two) echo "2" ;; three) echo "3" ;; ['t','e','n']) echo "chars" ;; ?e) echo "?)" ;; *) echo "*)" ;; esac
true
4ec905c31b2051e70cd57cbeb325250959509910
Shell
hahahawin/ceph
/ceph_auto_install.sh
UTF-8
15,265
3.609375
4
[]
no_license
#!/bin/bash function ceph_destory(){ ceph_destory_tag_more=`ls /var/lib/ceph|wc -w` if [ $ceph_destory_tag_more -ge "2" ]; then echo "该主机中存在多个集群,请手动清除,本脚本无能为力" exit 0 fi ceph_destory_tag=`ls /var/lib/ceph` ansible ceph_master,ceph_slave -m shell -a "cephadm rm-cluster --fsid $ceph_destory_tag --force" } function ceph_check_old(){ if [ ! -d "/var/lib/ceph" ];then mkdir /var/lib/ceph fi ceph_folder=`ls /var/lib/ceph|wc -w` if [ $ceph_folder -ge "1" ]; then echo "ceph集群已存在,是否清除原集群输入yes/no(确认清除将使原集群发生不可逆损毁,请谨慎选择)" read destory while [ "$destory" != "yes" ]; do if [ "$destory" = "no" ]; then echo "正在退出安装。。。" exit 0 fi echo "是否摧毁原集群, 输入yes/no" read destory done ceph_destory fi } function cephadm_check_os_info() { #判断是否是centos7或者8系统 sysvertion=`ansible ceph_master -m shell -a "cat /etc/redhat-release|sed -r 's/.* ([0-9]+)\..*/\1/'"|sed 1d` if [[ $sysvertion = "7" ]] || [[ $sysvertion = "8" ]]; then echo -e "\033[32m 系统符合要求,开始部署。。。\033[0m" echo -e "\n" else echo -e "\033[31m 系统不符合要求,退出中。。。\033[0m" exit 0 fi } function cephadm_python3() { ## 获取cephadm安装脚本 if [[ $sysvertion = "7" ]]; then wget -O /root/ceph_ansible/cephadm.j2 https://liquanbing.oss-cn-chengdu.aliyuncs.com/ceph/cephadm_15.2.6 fi if [[ $sysvertion = "8" ]]; then wget -O /root/ceph_ansible/cephadm.j2 https://liquanbing.oss-cn-chengdu.aliyuncs.com/ceph/cephadm_15.2.8 fi } function ceph_check_network(){ #ceph_network_status=`ansible ceph_master,ceph_slave -m shell -a "curl -I -m 60 -o /dev/null -s -w %{http_code} https://mirrors.aliyun.com/ceph/rpm-15.2.6/el7/noarch/ceph-release-1-1.el7.noarch.rpm"` ceph_network_status=`ansible ceph_master,ceph_slave -m shell -a "ping -W 2 -c 2 mirrors.aliyun.com"` echo "检查ceph各节点网络状态" echo "$ceph_network_status" ceph_network_status_check=`echo "$ceph_network_status"|grep -E "100% packet loss"|"service not knownnon"` if [ -n "$ceph_network_status_check" ];then echo -e "\033[31m ceph节点无法访问外网或ceph镜像库,请检查网络!\033[0m" echo -e "\n" echo -e "\033[31m 安装退出中。。。\033[0m" exit 0 fi } echo -e "\n" echo -e "\033[31m ***********************************************************************************************************\033[0m" echo -e "\033[31m * 本脚本只能在centos7或者centos8下运行,如在其他系统运行会发生致命错误,安装前请确保全集群网络正常 *\033[0m" echo -e "\033[31m ***********************************************************************************************************\033[0m" echo -e "\n" #安装相应环境包 rpm -q epel-release &> /dev/null || yum install epel-release -y &> /dev/null rpm -q wget &> /dev/null || yum install wget -y &> /dev/null rpm -q python3 &> /dev/null || yum install python3 -y &> /dev/null #判断ansible是否存在,不存在就安装 ansible_status=`rpm -qa|grep ansible|wc -l` if [ $ansible_status -eq 0 ]; then yum install -y ansible fi #客户输入配置信息,用于生成ansible yml [ -d /root/ceph_ansible ] || mkdir /root/ceph_ansible echo -e "\033[31m 全局部分\033[0m" echo "******************************" echo "* 要安装的ceph一共有几台主机 *" echo "******************************" read ceph_number ceph_number_Secret=$ceph_number ceph_number_hosts=$ceph_number ceph_number_mon=$ceph_number ceph_number_info=$ceph_number ceph_add_node=$ceph_number echo "内网IP地址 内网网卡 主机ROOT密码" > /etc/ansible/info_tmp echo -e "\n\n" echo "**********************" echo "* 请输入ceph内网网段 *" echo "**********************" read ceph_segment echo -e "\n\n" echo -e "\033[31m CEPH主节点部分\033[0m" echo "******************************" echo "* 请输入ceph主节点内网ip地址 *" echo "******************************" read ceph_master_ip echo -e "\n\n" echo "************************" echo "* 请输入主节点root密码 *" echo "************************" read ceph_master_password echo -e "\n\n" echo "**************************" echo "* 请输入主节点内网网卡名 *" echo "**************************" read ceph_master_card echo -e "\n\n" echo "$ceph_master_ip $ceph_master_card $ceph_master_password" >> /etc/ansible/info_tmp a=0 echo "" > /etc/ansible/hosts_tmp #1 sed -i '1d' /etc/ansible/hosts_tmp #4 #清空上一句产生的空行 echo -e "\033[31m CEPG子节点部分\033[0m" while [[ $ceph_number > "1" ]]; do let "a++" let "ceph_number--" echo "*********************************" echo "* 请输入ceph子节点$a 内网ip地址 *" echo "*********************************" read ceph_slave_ip$a echo -e "\n\n" echo "****************************" echo "* 请输入当前子节点root密码 *" echo "****************************" read ceph_slave_password$a echo -e "\n\n" echo "*****************************" echo "* 请输入子节点$a 内网网卡名 *" echo "*****************************" read ceph_slave_card echo -e "\n\n" eval echo "$"ceph_slave_ip$a hostname=node$a ceph_card=$ceph_slave_card"" >> /etc/ansible/hosts_tmp #2 eval echo "$"ceph_slave_ip$a $ceph_slave_card "$"ceph_slave_password$a"""" >> /etc/ansible/info_tmp done echo -e "\033[31m 请认真确认下面的信息,如果信息有误将导致部署失败\033[0m" echo "******************************************************************************" echo "" cat /etc/ansible/info_tmp |awk ' {printf "%-25s %-25s %-10s\n",$1,$2,$3}' echo "" echo "ceph节点一共有:$ceph_number_info" echo "ceph网段是:$ceph_segment" echo "" echo "*******************************************************************************" echo "请确认上面的信息正确性 正确请输入yes 退出安装请输入no" read asd while [ "$asd" != "yes" ]; do if [ "$asd" = "no" ]; then exit 0 fi echo "请确认上面的信息正确性 输入yes/no" read asd done #部署主机到集群各节点免密 实现ansible可用 :<< 提示 rpm -q sshpass &> /dev/null || yum install sshpass -y &> /dev/null #判断sshpass是否安装 没安装就安装它 sed -i '/Checking ask/c StrictHostKeyChecking no' /etc/ssh/ssh_config #取消主机密钥检查 [ -f /root/.ssh/id_rsa ] || ssh-keygen -P "" -f /root/.ssh/id_rsa &> /dev/null#判断本机密钥是否存在 不存在就创建 sshpass -p $mima ssh-copy-id -i /root/.ssh/id_rsa.pub $ip #复制密钥到对应服务器 提示 rpm -q sshpass &> /dev/null || yum install sshpass -y &> /dev/null sed -i '/Checking ask/c StrictHostKeyChecking no' /etc/ssh/ssh_config [ -f /root/.ssh/id_rsa ] || ssh-keygen -P "" -f /root/.ssh/id_rsa &> /dev/null sshpass -p $ceph_master_password ssh-copy-id -i /root/.ssh/id_rsa.pub $ceph_master_ip &> /dev/null b=0 while [[ $ceph_number_Secret > "1" ]]; do let "b++" let "ceph_number_Secret--" ceph_slave_ip_Secret=`eval echo "$"ceph_slave_ip$b""` ceph_slave_password_Secret=`eval echo "$"ceph_slave_password$b""` sshpass -p $ceph_slave_password_Secret ssh-copy-id -i /root/.ssh/id_rsa.pub $ceph_slave_ip_Secret &> /dev/null done #初始化master主机环境 ##生成ansible hosts文件 cat > /etc/ansible/hosts <<EOF [ceph:children] ceph_master ceph_slave [ceph_master] $ceph_master_ip hostname=node0 [ceph_slave] EOF cat /etc/ansible/hosts_tmp >>/etc/ansible/hosts #3 #1234部分生成了hosts_tmp eval可以应对变量嵌套 例如eval echo "$"ymd$i"" ##生成集群hosts文件 cat > /root/ceph_ansible/hosts.j2 <<EOF 127.0.0.1 localhost localhost.localdomain $ceph_master_ip node0 EOF c=0 while [[ $ceph_number_hosts > "1" ]]; do let "c++" let "ceph_number_hosts--" ceph_slave_ip_hosts=`eval echo "$"ceph_slave_ip$c""` echo "$ceph_slave_ip_hosts node$c" >> /root/ceph_ansible/hosts.j2 done ##生成podman国内加速文件 cat > /root/ceph_ansible/registries.j2 <<EOF unqualified-search-registries = ["docker.io"] [[registry]] prefix = "docker.io" location = "docker.mirrors.ustc.edu.cn" #清华大学加速docker镜像仓库 EOF ##生成时间chrony主节点同步文件 cat > /root/ceph_ansible/chrony_master.j2 <<EOF server cn.pool.ntp.org iburst stratumweight 0 driftfile /var/lib/chrony/drift rtcsync makestep 10 3 allow 0.0.0.0/0 bindcmdaddress 127.0.0.1 bindcmdaddress ::1 keyfile /etc/chrony.keys commandkey 1 generatecommandkey noclientlog logchange 0.5 logdir /var/log/chrony EOF ##生成时间chrony子节点同步文件 cat > /root/ceph_ansible/chrony_slave.j2 << EOF server $ceph_master_ip iburst EOF ##生成执行ansible主节点初始化yml ceph_check_network #检查集群网络 cephadm_check_os_info #检查集群系统版本 cephadm_python3 #根据系统选择cephadm版本 echo "开始初始化主节点环境" cat > /root/ceph_ansible/ceph_initenv_master.yml <<EOF --- - hosts: ceph_master vars: max_clients: 200 user: root tasks: - name: 安装epel源 yum: pkg=epel-release state=latest - name: 传送cephadm安装脚本 copy: src=/root/ceph_ansible/cephadm.j2 dest=/tmp/cephadm - name: 安装ceph源 file: dest=/tmp/cephadm mode=777 - name: 添加cephadm的yum源 shell: /tmp/cephadm add-repo --release octopus - name: 安装podman yum: pkg=podman state=latest - name: 初始化cephadm shell: /tmp/cephadm install - name: 安装gdisk yum: pkg=gdisk state=latest - name: 打开firewalld service: name=firewalld state=started enabled=yes - name: 添加网卡放行防火墙规则 shell: firewall-cmd --zone=trusted --add-interface=$ceph_master_card --permanent - name: 添加时间同步端口防火墙规则 shell: firewall-cmd --zone=public --add-port=123/udp --permanent && firewall-cmd --reload - name: 临时关闭selinux selinux: state=disabled - name: 永久关闭selinux shell: sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux - name: 永久关闭selinux shell: sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config - name: 更改podman镜像库为清华大学 template: src=/root/ceph_ansible/registries.j2 dest=/etc/containers/registries.conf - name: 更改host列表 template: src=/root/ceph_ansible/hosts.j2 dest=/etc/hosts - name: 更改主机名 raw: "echo {{hostname|quote}} > /etc/hostname" - name: 生效主机名 shell: hostname {{hostname|quote}} - name: 安装时间同步服务 yum: pkg=chrony state=latest - name: 配置时间同步服务 template: src=/root/ceph_ansible/chrony_master.j2 dest=/etc/chrony.conf - name: 重启时间同步服务 shell: systemctl restart chronyd && systemctl enable chronyd EOF ansible-playbook -i /etc/ansible/hosts /root/ceph_ansible/ceph_initenv_master.yml ##生成执行ansible主节点初始化yml echo "开始初始化子节点环境" cat > /root/ceph_ansible/ceph_initenv_slave.yml <<EOF --- - hosts: ceph_slave vars: max_clients: 200 user: root tasks: - name: 安装epel源 yum: pkg=epel-release state=latest - name: 安装python3 yum: pkg=python3 state=latest - name: 传送cephadm安装脚本 copy: src=/root/ceph_ansible/cephadm.j2 dest=/tmp/cephadm - name: 安装ceph源 file: dest=/tmp/cephadm mode=777 - name: 添加cephadm的yum源 shell: /tmp/cephadm add-repo --release octopus - name: 安装podman yum: pkg=podman state=latest - name: 初始化cephadm shell: /tmp/cephadm install - name: 安装gdisk yum: pkg=gdisk state=latest - name: 打开firewalld service: name=firewalld state=started enabled=yes - name: 添加网卡放行防火墙规则 shell: firewall-cmd --zone=trusted --add-interface={{ceph_card}} --permanent - name: 添加时间同步端口防火墙规则 shell: firewall-cmd --zone=public --add-port=123/udp --permanent && firewall-cmd --reload - name: 临时关闭selinux selinux: state=disabled - name: 永久关闭selinux shell: sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux - name: 永久关闭selinux shell: sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config - name: 更改podman镜像库为清华大学 template: src=/root/ceph_ansible/registries.j2 dest=/etc/containers/registries.conf - name: 更改host列表 template: src=/root/ceph_ansible/hosts.j2 dest=/etc/hosts - name: 更改主机名 raw: "echo {{hostname|quote}} > /etc/hostname" - name: 生效主机名 shell: hostname {{hostname|quote}} - name: 安装时间同步服务 yum: pkg=chrony state=latest - name: 配置时间同步服务 template: src=/root/ceph_ansible/chrony_slave.j2 dest=/etc/chrony.conf - name: 重启时间同步服务 shell: systemctl restart chronyd && systemctl enable chronyd EOF ansible-playbook -i /etc/ansible/hosts /root/ceph_ansible/ceph_initenv_slave.yml ceph_check_old echo "开始初始化集群环境" if [ $(( $ceph_number_mon % 2 )) = 0 ]; then #mon的数量保证奇数 let "ceph_number_mon--" fi ansible ceph_master -m shell -a "cat /etc/hosts" > /tmp/ceph_host_info # 组装初始换节点node名称 let ceph_add_node-- for i in `seq 0 $ceph_add_node`;do echo -e "$i, \c"; done > /tmp/1 ceph_add_node_1=`cat /tmp/1` ceph_add_node_ok=`echo ${ceph_add_node_1%??}` # 组装完成 cat > /root/ceph_ansible/ceph_initenv.yml <<EOF --- - hosts: ceph_master vars: max_clients: 200 node: [ $ceph_add_node_ok ] user: root tasks: - name: 创建ceph配置目录 file: path=/etc/ceph state=directory - name: 创建ceph主节点 shell: cephadm bootstrap --mon-ip $ceph_master_ip > /root/ceph_dashboard.log 2>&1 - name: 添加主节点 raw: "ssh-copy-id -f -i /etc/ceph/ceph.pub root@node{{item}} && cephadm shell -- ceph orch host add node{{item}}" with_items: - "{{node}}" register: num - name: 配置公共网络 shell: cephadm shell -- ceph config set mon public_network $ceph_segment - name: 指定mon数量 shell: cephadm shell -- ceph orch apply mon $ceph_number_mon - name: 指定mon shell: cephadm shell -- ceph orch apply mon node0,node1,node2 EOF ansible-playbook -i /etc/ansible/hosts /root/ceph_ansible/ceph_initenv.yml echo "恭喜部署完成" echo "请访问dashbrod初始化ceph pool,前端配置如下(如果信息为空,证明节点部署失败,请重跑脚本)" ansible ceph_master -m shell -a "cat /root/ceph_dashboard.log|sed -n "/Dashboard/,/Password/p"" echo "请将如下内容加入访问dashbrod——web的host文件,否则可能无法正常访问服务" echo -e "\n" cat /tmp/ceph_host_info |sed '1,2d' echo -e "\n" echo -e "$ceph_segment \c" > /tmp/2 #将网段信息临时存储,方便其他脚本调用 echo ”如果你已确定ceph部署成功,同时魔方云也已经部署成功,魔方云节点内核已升级到5以上可以用以下脚本自动初始化魔方云ceph环境“ echo ”请在本机执行 ”wget -O mofang_ceph_env.sh https://liquanbing.oss-cn-chengdu.aliyuncs.com/ceph/mofang_ceph_env.sh && sh mofang_ceph_env.sh "
true
653fde1ae6c9c15c6ceb8d96cd12064b04e76fbe
Shell
techthiyanes/nmt-pe-effects-2021
/src/preparation_p1/prepare-data-for-upload.sh
UTF-8
410
3.25
3
[]
no_license
#!/bin/bash mydir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" docs_dir="$mydir/../docs/out" mkdir -p upload-to-memsource idx=1 for srcfile in $docs_dir/a*.src ; do tgtfile="$docs_dir/$(basename $srcfile .src).tgt" python3 $mydir/src/create_tm.py $srcfile $tgtfile > upload-to-memsource/tm-$idx.tmx cp $srcfile upload-to-memsource/input-file-$idx.txt idx=$(( $idx + 1)) done
true
76427edf9916f3b166917976aac2794a944182cc
Shell
vpg/bash_libs
/elasticsearch.sh
UTF-8
793
3.65625
4
[]
no_license
#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "$DIR/logs.sh"; # Returning an array of the indexes that are present in prod # # @return Returns an arra of indexes function vpgGetIndexes { echo $indexJson | jq -r 'keys|.[]' } # Returning an array of the types of the index passed in param # # @return Returns an array of indexes function vpgGetTypes { local index=$1; if [ "$#" -ne 1 ]; then vpgError "You have to pass 1 parameter : index name" exit 1; fi echo $indexJson | jq -r ".[\"$index\"].mappings|keys|.[]" | grep -v "_default_" } # Set the elasticsearch server Reference function setElasticServer { ELSREF=$1 indexJson=$(curl --silent "http://${ELSREF}/_mapping") } setElasticServer "vp-elasticsearch:9200"
true
e85ce608aa4b4e46174a8c05238cf4a424266b32
Shell
whoisronnoc/scripts
/killbyname
UTF-8
182
3.734375
4
[]
no_license
#!/bin/bash # kill process by name bashpid=$$ name=$1 pids=$(ps -ef | grep $1 | awk '{print $2}') for pid in $pids do echo $pid if [ $bashpid -ne $pid ]; then kill $pid fi done
true
741198f3fa06343f2c56b1ce10b44f5c77645e2e
Shell
rajatgirotra/study
/elastic/setup.sh
UTF-8
1,645
3.21875
3
[]
no_license
#!/bin/bash SCRIPTS_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]})) echo "SCRIPTS_DIR = ${SCRIPTS_DIR}" INSTALL_ROOT=/home/rajatgirotra/tools/elk rm -rf ${INSTALL_ROOT} mkdir -p ${INSTALL_ROOT} ES_VERSION=6.2.2 ES_INSTALL_ROOT=${INSTALL_ROOT}/elasticsearch-${ES_VERSION} ES_ARTIFACT=elasticsearch-${ES_VERSION}.tar.gz ES_URL=https://artifacts.elastic.co/downloads/elasticsearch/${ES_ARTIFACT} KIBANA_VERSION=6.2.2 KIBANA_INSTALL_ROOT=${INSTALL_ROOT}/kibana-${KIBANA_VERSION}-linux-x86_64 KIBANA_ARTIFACT=kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz KIBANA_URL=https://artifacts.elastic.co/downloads/kibana/${KIBANA_ARTIFACT} LS_VERSION=6.2.2 LS_INSTALL_ROOT=${INSTALL_ROOT}/logstash-${LS_VERSION} LS_ARTIFACT=logstash-${LS_VERSION}.tar.gz LS_URL=https://artifacts.elastic.co/downloads/logstash/${LS_ARTIFACT} FILEBEAT_VERSION=6.2.2 FILEBEAT_INSTALL_ROOT=${INSTALL_ROOT}/filebeat-${FILEBEAT_VERSION}-linux-x86_64 FILEBEAT_ARTIFACT=filebeat-${FILEBEAT_VERSION}-linux-x86_64.tar.gz FILEBEAT_URL=https://artifacts.elastic.co/downloads/beats/filebeat/${FILEBEAT_ARTIFACT} cd $(dirname ${INSTALL_ROOT}) rm -f ${ES_ARTIFACT} rm -f ${KIBANA_ARTIFACT} rm -f ${LS_ARTIFACT} rm -f ${FILEBEAT_ARTIFACT} wget ${ES_URL} wget ${KIBANA_URL} wget ${LS_URL} wget ${FILEBEAT_URL} # untar elastic and kibana tar xvf ${ES_ARTIFACT} -C ${INSTALL_ROOT} tar xvf ${KIBANA_ARTIFACT} -C ${INSTALL_ROOT} tar xvf ${LS_ARTIFACT} -C ${INSTALL_ROOT} tar xvf ${FILEBEAT_ARTIFACT} -C ${INSTALL_ROOT} # enable elasticsearch.url in kibana config KIBANA_CONFIG=${KIBANA_INSTALL_ROOT}/config/kibana.yml sed -i.bak 's/#elasticsearch.url/elasticsearch.url/g' ${KIBANA_CONFIG}
true
677229d6acf2a5fc78865e8c7a0e11053aea7d5f
Shell
fengbaobao394/downpy
/codes/conus.sh
UTF-8
434
2.65625
3
[ "MIT" ]
permissive
#!/bin/bash echo 'Running conus analysis' FIRST=$(sbatch -o con1.out -e con1.err --wait --parsable conus1.q) sleep 3s echo $FIRST n=$(cat "numberofjobs.txt") echo "Number of jobs = $n" nm=$((n-1)) sleep 3s SECOND=$(sbatch -o con2.out -e con2.err --array 0-$nm --dependency=afterany:$FIRST --parsable conus2.q) echo $SECOND THIRD=$(sbatch -o con3.out -e con3.err --dependency=afterany:$SECOND --parsable conus3.q) echo $THIRD exit 0
true
0a82ecc39e28c8b4817ef0129ce9cc249bbd6c39
Shell
bhm-heddy/42Project_boot2root
/setup_vm.sh
UTF-8
1,647
3.390625
3
[]
no_license
#!/bin/bash VM_NAME='Boot2Root' VM_RAM="1024" VM_VRAM='16' ISO_PATH='/tmp/BornToSecHackMe-v1.1.iso' if ! [ -e "$ISO_PATH" ] then curl -fsSL 'https://projects.intra.42.fr/uploads/document/document/2832/BornToSecHackMe-v1.1.iso' -o "$ISO_PATH" fi # Recreate host-only interface with custom DHCP if [ "$(VBoxManage list hostonlyifs | grep "vboxnet0")" ] then VBoxManage hostonlyif ipconfig vboxnet0 --ip 192.168.56.1 --netmask 255.255.255.248 VBoxManage dhcpserver modify --interface=vboxnet0 --server-ip=192.168.56.2 --netmask=255.255.255.248 --lower-ip=192.168.56.3 --upper-ip=192.168.56.6 --enable else VBoxManage hostonlyif create VBoxManage dhcpserver add --interface=vboxnet0 --server-ip=192.168.56.2 --netmask=255.255.255.248 --lower-ip=192.168.56.3 --upper-ip=192.168.56.6 --enable fi # Ensure VM exist or create it if ! [ "$(vboxmanage list vms | grep "$VM_NAME")" ] then VBoxManage createvm --name "$VM_NAME" --ostype 'Ubuntu_64' --register VBoxManage modifyvm "$VM_NAME" --memory "$VM_RAM" --vram "$VM_VRAM" VBoxManage modifyvm "$VM_NAME" --graphicscontroller vmsvga VBoxManage modifyvm "$VM_NAME" --nic1 hostonly --hostonlyadapter1 vboxnet0 VBoxManage storagectl "$VM_NAME" --name IDE --add ide VBoxManage storageattach "$VM_NAME" --storagectl IDE --port 0 --device 0 --type dvddrive --medium "$ISO_PATH" else printf "${TC_RED}There is already a VM with the name: ${VM_NAME}${TC_RESET}\n" fi # Ensure VM is up or start it if ! [ "$(vboxmanage list runningvms | grep "$VM_NAME")" ] then VboxManage startvm "$VM_NAME" --type headless else printf "${TC_RED}VM with the name: ${VM_NAME} is already running${TC_RESET}\n" fi
true
bf901625c7c885eda71465f9f093c84a26122ac5
Shell
FSMaxB/lfs-me-repos
/7.6-systemd/glibc-2.20
UTF-8
5,105
3.078125
3
[]
no_license
pkgbuild_version=5 pkgname=glibc pkgver=2.20 _tzdata_ver=2014g _info_pages=( 'libc.info' 'libc.info-1' 'libc.info-2' 'libc.info-3' 'libc.info-4' 'libc.info-5' 'libc.info-6' 'libc.info-7' 'libc.info-8' 'libc.info-9' 'libc.info-10' 'libc.info-11' 'libc.info-12' 'libc.info-13' 'libc.info-14' ) backup=( '/etc/ld.so.conf' '/etc/nscd.conf' '/etc/nsswitch.conf' ) dependencies=( "!$pkgname" 'linux-headers' ) sources=( "http://ftp.gnu.org/gnu/${pkgname}/${pkgname}-${pkgver}.tar.xz" "http://www.linuxfromscratch.org/patches/lfs/7.6-systemd/${pkgname}-${pkgver}-fhs-1.patch" "http://www.iana.org/time-zones/repository/releases/tzdata${_tzdata_ver}.tar.gz" ) sha1sums=( 0ddd02ceb41f7cd049fac691a7864047b87b6351 d61e9ba795b64f1b1aa81023f3e45157cb426c93 20be4323055278a4a789d9c95926dd38733afa8d ) lfs_me_prepare() { tar -xf "${sources_dir}/${pkgname}-${pkgver}.tar.xz" -C "$build_dir" mkdir -pv "${build_dir}/tzdata" tar -xf "${sources_dir}/tzdata${_tzdata_ver}.tar.gz" -C "${build_dir}/tzdata" cd "${build_dir}/${pkgname}-${pkgver}" patch -Np1 -i "${sources_dir}/${pkgname}-${pkgver}-fhs-1.patch" mkdir -v "../${pkgname}-build" cd "../${pkgname}-build" "../${pkgname}-${pkgver}/configure" \ --prefix=/usr \ --disable-profile \ --enable-kernel=2.6.32 \ --enable-obsolete-rpc } lfs_me_build() { cd "${build_dir}/${pkgname}-build" make cat > "${build_dir}/ld.so.conf" << "EOF" # Begin /etc/ld.so.conf /usr/local/lib /opt/lib # Add an include directory include /etc/ld.so.conf.d/*.conf EOF cat > "${build_dir}/nsswitch.conf" << "EOF" # Begin /etc/nsswitch.conf passwd: files group: files shadow: files hosts: files dns myhostname networks: files protocols: files services: files ethers: files rpc: files # End /etc/nsswitch.conf EOF } lfs_me_check() { cd "${build_dir}/${pkgname}-build" make check } lfs_me_install() { cd "${build_dir}/${pkgname}-build" make DESTDIR="$fakeroot_dir" install #config for nscd mkdir -pv "${fakeroot_dir}/etc" cp -v "../${pkgname}-${pkgver}/nscd/nscd.conf" "${fakeroot_dir}/etc/nscd.conf" mkdir -pv "${fakeroot_dir}/var/cache/nscd" #systemd files for nscd mkdir -pv "${fakeroot_dir}/usr/lib/"{tmpfiles.d,systemd/system} install -v -Dm644 "../${pkgname}-${pkgver}/nscd/nscd.tmpfiles" "${fakeroot_dir}/usr/lib/tmpfiles.d/nscd.conf" install -v -Dm644 "../${pkgname}-${pkgver}/nscd/nscd.service" "${fakeroot_dir}/usr/lib/systemd/system/nscd.service" mkdir -pv "${fakeroot_dir}/usr/lib/locale" #install config files mkdir -pv "${fakeroot_dir}/etc" cp "$build_dir"/{nsswitch.conf,ld.so.conf} "${fakeroot_dir}/etc" #install timezone data cd "${build_dir}/tzdata" ZONEINFO="${fakeroot_dir}/usr/share/zoneinfo" mkdir -pv "$ZONEINFO"{posix,right} ZONEINFO=/usr/share/zoneinfo mkdir -pv $ZONEINFO/{posix,right} if [ -f /usr/bin/zic ] then zic=/usr/bin/zic elif [ -f /tools/sbin/zic ] then zic=/tools/sbin/zic else lfs_me_error "Can't find 'zic' executable." false fi for tz in etcetera southamerica northamerica europe africa antarctica \ asia australasia backward pacificnew systemv do "$zic" -L /dev/null -d "$ZONEINFO" -y "sh yearistype.sh" "$tz" "$zic" -L /dev/null -d "${ZONEINFO}/posix" -y "sh yearistype.sh" "$tz" "$zic" -L leapseconds -d "${ZONEINFO}/right" -y "sh yearistype.sh" "$tz" done cp -v zone.tab zone1970.tab iso3166.tab "$ZONEINFO" "$zic" -d $ZONEINFO -p America/New_York unset ZONEINFO zic #don't overwrite info pages rm "${fakeroot_dir}/usr/share/info/dir" } lfs_me_postinstall() { #create locales localedef -i cs_CZ -f UTF-8 cs_CZ.UTF-8 localedef -i de_DE -f ISO-8859-1 de_DE localedef -i de_DE@euro -f ISO-8859-15 de_DE@euro localedef -i de_DE -f UTF-8 de_DE.UTF-8 localedef -i en_GB -f UTF-8 en_GB.UTF-8 localedef -i en_HK -f ISO-8859-1 en_HK localedef -i en_PH -f ISO-8859-1 en_PH localedef -i en_US -f ISO-8859-1 en_US localedef -i en_US -f UTF-8 en_US.UTF-8 localedef -i es_MX -f ISO-8859-1 es_MX localedef -i fa_IR -f UTF-8 fa_IR localedef -i fr_FR -f ISO-8859-1 fr_FR localedef -i fr_FR@euro -f ISO-8859-15 fr_FR@euro localedef -i fr_FR -f UTF-8 fr_FR.UTF-8 localedef -i it_IT -f ISO-8859-1 it_IT localedef -i it_IT -f UTF-8 it_IT.UTF-8 localedef -i ja_JP -f EUC-JP ja_JP localedef -i ru_RU -f KOI8-R ru_RU.KOI8-R localedef -i ru_RU -f UTF-8 ru_RU.UTF-8 localedef -i tr_TR -f UTF-8 tr_TR.UTF-8 localedef -i zh_CN -f GB18030 zh_CN.GB18030 #Ask timezone from user and create timezone link if [ ! -h /etc/localtime ] then TZPATH=$(tzselect) ln -sfv "/usr/share/zoneinfo/${TZPATH}" /etc/localtime fi echo "Adding info pages" for info_page in ${_info_pages[@]} do install-info "/usr/share/info/${info_page}" /usr/share/info/dir done } lfs_me_preremove() { echo "Removing locales" rm -rf /usr/lib/locale/* echo "Removing timezone link" rm /etc/localtime echo "Removing info pages" for info_page in ${_info_pages[@]} do install-info --delete "/usr/share/info/${info_page}" /usr/share/info/dir done } # vim:set syntax=sh et:
true
dc6cde43e3f630f56d43b89551dd55256305a25e
Shell
thohal/openqrm
/trunk/src/plugins/sshterm/etc/init.d/sshterm
UTF-8
4,407
3.546875
4
[]
no_license
#!/bin/bash # this is the boot-service init script for the sshterm resources # # This file is part of openQRM. # # openQRM is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 # as published by the Free Software Foundation. # # openQRM is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with openQRM. If not, see <http://www.gnu.org/licenses/>. # # Copyright 2009, Matthias Rechenburg <matt@openqrm.com> # OPENQRM_SERVER_BASE_DIR=$(pushd $(dirname $0)/../../../../.. > /dev/null; echo $PWD; popd > /dev/null) # for including the package functions export OPENQRM_SOURCE_DIR="$OPENQRM_SERVER_BASE_DIR/openqrm/" . $OPENQRM_SERVER_BASE_DIR/openqrm/include/openqrm-functions . $OPENQRM_SERVER_BASE_DIR/openqrm/include/openqrm-package-functions . $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/sshterm/include/openqrm-plugin-sshterm-functions . $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/sshterm/etc/openqrm-plugin-sshterm.conf . $OPENQRM_RESOURCE_PARAMETER_FILE # define wget to use with https if [ "$openqrm_web_protocol" == "https" ]; then WGET_NO_CERT_CHECK="--no-check-certificate" fi # do not run on idle resources if [ "$resource_image" == "idle" ]; then exit 0 fi # also do not run before the real init of the rootfs # if we have this file /etc/initrd-devices.conf we are still in the initrd if [ -f /etc/initrd-devices.conf ]; then exit 0 fi function sshterm_start() { echo "Starting the openQRM sshterm-plugin" sshterm_stop 1>/dev/null 2>&1 if ! which screen 1>/dev/null; then export FORCE_INSTALL=true openqrm_install_os_dependency screen fi if [ -x /usr/bin/screen.real ]; then RUNSCREEN="/usr/bin/screen.real" else RUNSCREEN=`which screen` fi if ! which openssl 1>/dev/null; then export FORCE_INSTALL=true openqrm_install_os_dependency openssl fi # create the openqrm if not existing yet # this user starts the pound proxy and provides a user login because # ajaxterm does not allow root-logins if ! grep ^openqrm /etc/passwd 1>/dev/null; then useradd -d /home/openqrm -s /bin/bash -m openqrm fi if wget -q $WGET_NO_CERT_CHECK -O /iauth.$image_id $openqrm_web_protocol://$resource_openqrmserver/openqrm/action/image-auth/iauth.$image_id 2>/iauth.log; then cryptedpassword=`cat /iauth.$image_id` rm -f /iauth.$image_id iauth.log sed -i "s#^openqrm:[^:]*#openqrm:$cryptedpassword#" /etc/shadow fi SCREEN_NAME=`date +%T%x | sed -e "s/://g" | sed -e "s#/##g"` $RUNSCREEN -dmS $SCREEN_NAME $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/sshterm/ajaxterm/ajaxterm.py sleep 1 # create the pound ssl cert SCREEN_NAME=`date +%T%x | sed -e "s/://g" | sed -e "s#/##g"` $RUNSCREEN -dmS $SCREEN_NAME $0 createcert # start the reverse proxy which also provides secure https connections create_pound_config sleep 2 SCREEN_NAME=`date +%T%x | sed -e "s/://g" | sed -e "s#/##g"` $RUNSCREEN -dmS $SCREEN_NAME $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/sshterm/pound/pound -f $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/sshterm/etc/pound.cfg return 0 } function sshterm_stop() { echo "Stopping the openQRM sshterm-plugin" AJAXTERM_PID=`ps ax | grep ajaxterm | grep -v grep | awk {' print $1 '}` for PID in $AJAXTERM_PID; do kill $PID done POUND_PID=`ps ax | grep pound | grep -v grep | awk {' print $1 '}` for PID in $POUND_PID; do kill $PID done } function sshterm_createcert() { echo "Creating certificate the openQRM sshterm-plugin" # create the pound ssl cert if ! create_pound_cert $COUNTRY $PROVINCE $CITY $ORGANIZATION $ORG_UNIT $COMMON_NAME_CLIENT $EMAIL_CLIENT $LIFETIME; then openqrm_post_event 0 "openqrm_plugin_sshterm_start" 3 "openqrm-plugin-sshterm" "Failed to generate a ssl-certificate! Not starting sshterm plugin." return 1 fi return 0 } case "$1" in start) sshterm_start ;; stop) sshterm_stop ;; restart) sshterm_stop sleep 1 sshterm_start ;; createcert) sshterm_createcert ;; *) echo "Usage: $0 {start|stop|restart|createcert}" exit 1 esac exit $?
true
2df6390affa2df65ee9ebc2eb6c55f39150252e5
Shell
momotonton/linuxdev
/scripts/create_docker_certs.sh
UTF-8
2,445
3.890625
4
[ "MIT" ]
permissive
#!/bin/bash set -e if [ "$(uname -s)" != "Linux" ]; then exit -1 fi # https://docs.docker.com/engine/security/protect-access/ local_ip_addr=192.168.99.123 common_name=$local_ip_addr passphrase=pass:passwd tmpdir=$(mktemp -d) client_certs_dir=/vagrant/certs/ server_certs_dir=/var/docker/ pushd $tmpdir # generate CA private and public keys openssl genrsa -aes256 -out ca-key.pem -passout $passphrase 4096 openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -out ca.pem -passin $passphrase -subj "/C=AU/ST=NSW/L=Sydney/O=Linuxdev/CN=$common_name" # create a server key and certificate signing request openssl genrsa -out server-key.pem 4096 openssl req -subj "/CN=$common_name" -sha256 -new -key server-key.pem -out server.csr # create an extension config file echo subjectAltName = DNS:$common_name,IP:$local_ip_addr,IP:127.0.0.1 > extfile.cnf echo extendedKeyUsage = serverAuth >> extfile.cnf # generate the signed certificate openssl x509 -req -days 365 -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem -passin $passphrase \ -CAcreateserial -out server-cert.pem -extfile extfile.cnf # create a client key and certificate signing request openssl genrsa -out key.pem 4096 openssl req -subj '/CN=client' -new -key key.pem -out client.csr # create a new extensions config file echo extendedKeyUsage = clientAuth > extfile-client.cnf # generate the signed certificate openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -passin $passphrase \ -CAcreateserial -out cert.pem -extfile extfile-client.cnf # remove intermediate files rm -v client.csr server.csr extfile.cnf extfile-client.cnf # file mode chmod -v 0400 ca-key.pem key.pem server-key.pem chmod -v 0444 ca.pem server-cert.pem cert.pem sudo mkdir -p $server_certs_dir # copy client certs to host rm -f ${client_certs_dir}*.pem cp ca.pem $client_certs_dir cp cert.pem $client_certs_dir cp key.pem $client_certs_dir echo Copied client certs to $client_certs_dir # copy server certs to $server_certs_dir sudo cp server*.pem $server_certs_dir sudo cp ca.pem $server_certs_dir echo Copied server certs to $server_certs_dir popd echo "Certificates has been generated. <SERVER> tlscacert: ca.pem tlscert: server-cert.pem tlskey: server-key.pem <CLIENT> tlscacert: ca.pem tlscert: cert.pem tlskey: key.pem " if [ -z "$(ps aux | grep "bootstrap.sh")" ]; then echo "Please run config_docker_certs.sh to apply config certificates." fi
true
200f430463308d58dd4096bb0ed6fc1db90ad654
Shell
IanDarwin/scripts
/open
UTF-8
3,143
3.984375
4
[ "MIT" ]
permissive
#!/bin/sh -f # simple emulation of MS-Windows' start command or OS X's open command: # deduce type of file, start its application (backgrounded). case $(uname) in Darwin) exec /usr/bin/open $@;; # Presumably this is under cygwin or similar... Windows) exec /c/program\ files/start $@;; MINGW64*) exec /c/Program\ Files/Git/usr/bin/start $*;; esac if [ -x /usr/bin/xdg-open -o -x /usr/local/bin/xdg-open ]; then exec xdg-open "$@" fi BROWSER=NOT_SET if [ -x /usr/bin/firefox -o -x /usr/local/bin/firefox ]; then BROWSER=FIREFOX elif [ -x /usr/bin/chrome -o /usr/local/bin/chrome ]; then BROWSER=CHROME else echo "Unknown browser; modify $0 to accomodate!" exit 1 fi #echo "$*" > /tmp/start.log # Tries to handle various browsers and various arguments # Tries xdg-open from ports first if present, else $BROWSER function browser { case ${BROWSER} in CHROME) case ${1} in # HTTP/HTTPS URL http*) exec chrome $1;; # local file: make full path and file:// URL *) case $1 in /*) exec chrome file://$1;; *) exec chrome file://${PWD}/$1;; esac esac ;; FIREFOX) case ${1} in http*) exec firefox -remote "openURL(${1})"& ;; /*) exec firefox -remote "openURL(file:${1})"& ;; *) exec firefox -remote "openURL(file:`pwd`/${1} )"& ;; esac ;; *) echo "No xdg-open found and no browser in default directory" exit 1 ;; esac } USAGE="$0 file ..." function usage { # give usage message, and exit echo "Usage: ${USAGE}" 2>&1 exit 1 } APP="" while getopts "a:v" opt do case "$opt" in a) APP=${OPTARG};; v) set -x;; *) usage;; esac done shift `expr $OPTIND - 1` # leave just filenames for f in "$@" do if [ "${APP}" != "" ]; then ${APP} $f else case "${f}" in *.ai) gimp "${f}" & ;; *.bmp|*.gif|*.jpg|*.jpeg|*.png|*.tif) nsxiv "${f}" & ;; *.class) java `echo ${f}|sed 's/.class$//'` & ;; *.esl) huckster ${f};; ftp://*) ftp "${f}" & ;; *.gz) gunzip -v $f;; http:*|https:*|*.html|*.HTML?) browser ${f} & ;; *.mgp) mgp ${f};; *.mp3|*.ogg|*.avi|*.wav) mplayer ${f};; *.mp4|*.mpg|*.avi|*.mov|*.mkv) mplayer ${f};; *.pro) designer3 ${f} &;; *.pdf) PDFERS="evince okular kpdf xpdf" FOUND=no for peeper in $PDFERS; do if [ -x /usr/bin/$peeper -o -x /usr/local/bin/$peeper ]; then ${peeper} "${f}" & FOUND=yes break; fi done if [ ${FOUND} != "yes" ]; then echo "Can't find any Acrobatty readers for ${f}, trying browser" >&2 browser $f & fi ;; *.ps) gs "${f}" & ;; # sla, svg, and swf must go before *.s?? #*.svg) browser "${f}" & ;; *.svg) inkscape "${f}" & ;; *.sla) scribus "${f}" & ;; # must come before soffice *.odt|*.doc|*.docx) exec soffice "$f" &;; *.odp|*.p[po]t|*.pptx) exec soffice "$f" &;; *.ods|*.xls|*.xlsx) exec soffice "${f}" &;; *.odg) exec soffice "${f}" &;; *.s??|*.o??) exec soffice "${f}" &;; *.sla) scribus "${f}" & ;; *.txt) notepad "${f}" & ;; *.xcf) gimp "${f}" &;; *.Z) uncompress "${f}" & ;; *) echo "Not sure about ${f}'s format, trying browser..." >&2 browser ${f};; esac fi sleep 5 # to avoid killing the machine done
true
e81293974b4b182400d1463fa064191b8d28b135
Shell
petronny/aur3-mirror
/evernote-sdk-python-git/PKGBUILD
UTF-8
657
2.515625
3
[]
no_license
# Maintainer: Ng Oon-Ee <n g o o n e e DOT t a l k AT g m a i l DOT c o m> _pkgname=evernote-sdk-python pkgname=$_pkgname-git pkgver=r45.2afdbb3 pkgrel=1 pkgdesc="Evernote SDK for Python" arch=('any') url="https://github.com/evernote/evernote-sdk-python" license=('MIT') makedepends=('git' 'python2-setuptools') source=("$_pkgname::git://github.com/evernote/$_pkgname.git") md5sums=('SKIP') pkgver() { cd "$srcdir/$_pkgname" printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)" } package() { cd "$srcdir/$_pkgname" python2 setup.py install --root="$pkgdir/" rm -R "$pkgdir/usr/lib/python2.7/site-packages/thrift/" }
true
aa13eca4289f95c261ba28a9d3d9f5e213930001
Shell
AlexMurugesan/Unix-Assignment
/test.sh
UTF-8
1,811
3.21875
3
[]
no_license
#!/bin/bash cd ./assignment file='100 CC Records.csv' IFS="," m1=06 y1=2019 while read f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 do output=$(echo $f11 | sed ':a;s/\B[0-9]\{3\}\>/,&/;ta') output="$output USD" m="${f8:0:2}" y="${f8:3:7}" if [ $y -gt $y1 ] then ans='active' elif [$y -eq $y1 -a $m -ge $m1] then ans='active' else ans='expired' fi filename="$f4.$ans" if [ ! -d "$f2" ]; then mkdir ./$f2 cd ./$f2 if [ ! -d "$f3" ]; then mkdir ./$f3 cd ./$f3 touch $filename.txt cat <<-EOT>> $filename.txt Card Type Code: $f1 Card Type Full Name: $f2 Issuing Bank: $f3 Card Number: $f4 Card Holder's Name: $f5 CVV/CVV2: $f6 Issue Date: $f7 Expiry Date: $f8 Billing Date: $f9 Card PIN: $f10 Credit Limit: $output EOT cd .. else cd ./$f3 touch $filename.txt cat <<-EOT>> $filename.txt Card Type Code: $f1 Card Type Full Name: $f2 Issuing Bank: $f3 Card Number: $f4 Card Holder's Name: $f5 CVV/CVV2: $f6 Issue Date: $f7 Expiry Date: $f8 Billing Date: $f9 Card PIN: $f10 Credit Limit: $output EOT cd .. fi else cd ./$f2 if [ ! -d "$f3" ]; then mkdir ./$f3 cd ./$f3 touch $filename.txt cat <<-EOT>> $filename.txt Card Type Code: $f1 Card Type Full Name: $f2 Issuing Bank: $f3 Card Number: $f4 Card Holder's Name: $f5 CVV/CVV2: $f6 Issue Date: $f7 Expiry Date: $f8 Billing Date: $f9 Card PIN: $f10 Credit Limit: $output EOT cd .. else cd ./$f3 touch $filename.txt cat <<-EOT>> $filename.txt Card Type Code: $f1 Card Type Full Name: $f2 Issuing Bank: $f3 Card Number: $f4 Card Holder's Name: $f5 CVV/CVV2: $f6 Issue Date: $f7 Expiry Date: $f8 Billing Date: $f9 Card PIN: $f10 Credit Limit: $output EOT cd .. fi fi cd .. done < "$file"
true
57903f96b1a702f663eb6d4a3674475a25a14705
Shell
dotmpe/mkdoc
/tools/ci/test-specs.sh
UTF-8
772
2.71875
3
[]
no_license
#!/bin/sh set -e test -n "$ll" test -n "$MK_DIR" test -n "$MK_SHARE" test -n "$PREFIX" $ll attention $1 "Testing Core" cd $MK_DIR/test/example/core/keywords bats ../../../mkdoc-core.bats case "$(whoami)" in travis ) echo FIXME docutils testing at Travis ;; * ) test "$ENV_NAME" = "testing" && { # FIXME Travis failure.. $ll attention $1 "Testing Du" cd $MK_DIR/test/example/du/ bats ../../mkdoc-du.bats } $ll attention $1 "Testing Make" cd $MK_DIR/test/example/du/ bats ../../mkdoc-make.bats ;; esac test "$ENV_NAME" = "development" && { cd $MK_DIR/ bats test/mkdoc-make.bats } $ll attention $1 "Testing BM" cd $MK_DIR/test/example/du/ bats ../../mkdoc-bm.bats cd $MK_DIR
true
8229aaad335998e7ffda47d77346c6ea7110b871
Shell
fnord0/blackarch
/packages/python2-pdfminer/PKGBUILD
UTF-8
447
2.59375
3
[]
no_license
pkgname=python2-pdfminer pkgver=20110515 pkgrel=2 pkgdesc="A tool for extracting information from PDF documents" arch=('any') url='https://github.com/euske/pdfminer/' license=('MIT') depends=('python2') source=('https://github.com/euske/pdfminer/archive/2e8180ddee28cb9cdd06519614998a2432e8fced.tar.gz') md5sums=('4ad04124d127899653fdf854c2e74202') package() { cd "$srcdir"/pdfminer-* python2 setup.py install "--root=$pkgdir" --optimize=1 }
true
ba41f8c8c214d4b38789b1c212d814c00813ef63
Shell
yuizho/dotfiles
/.bin/install_packages_for_mac.sh
UTF-8
939
3.796875
4
[]
no_license
#!/usr/bin/env bash set -ue if [ "$#" -eq 0 ]; then echo "no arguments supplied to ${BASH_SOURCE[0]:-$0}" 1>&2 exit 1 fi if [[ ! "$OSTYPE" =~ ^darwin* ]]; then echo "unexpected OS ($OSTYPE) to install packages for mac" 1>&2 exit 1 fi if ! command -v brew &> /dev/null; then echo "=====> Homebrew is not installed!" echo "=====> start installing Homebrew" /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" fi echo "=====> start package instllation" dotfiles_dir="$1" /opt/homebrew/bin/brew bundle install --file "${dotfiles_dir}/.Brewfile" echo "=====> configure PATH for commands installed by brew!!" export PATH="/opt/homebrew/bin:$PATH" if ! command -v sdk &> /dev/null; then echo "=====> SDKMAN is not installed!" echo "=====> start installing SDKMAN" curl -s "https://get.sdkman.io" | bash fi echo "=====> package instllation is complete!!"
true
8e804c42a4847d801da5d33afad0701a9b43d0c8
Shell
SebNickel/command-line-scripts
/qp
UTF-8
341
3.265625
3
[]
no_license
#!/bin/bash # # Quickly add new events to pal # PAL_DIR=~/.pal PAL_EXTENSION=.pal PAL_FILE=seb while true; do case $1 in -p) PAL_FILE="$2" shift shift ;; *) break ;; esac done PAL_PATH=$PAL_DIR/$PAL_FILE$PAL_EXTENSION echo $@ >> $PAL_PATH
true
9afd9530e7039b805641b17fccfcc159e4eeb3b9
Shell
thomas-yanxin/PaddleNLP
/examples/machine_translation/transformer/deploy/cpp/run.sh
UTF-8
803
2.71875
3
[ "Apache-2.0" ]
permissive
#!/bin/bash # Whether to use mkl or gpu WITH_MKL=ON DEVICE='gpu' # Please set: # * Corresponding PaddlePaddle inference lib # * Corresponding CUDA lib # * Corresponding CUDNN lib # * Corresponding model directory # * Corresponding vocab directory # * Corresponding data directory LIB_DIR=YOUR_LIB_DIR CUDA_LIB_DIR=YOUR_CUDA_LIB_DIR CUDNN_LIB_DIR=YOUR_CUDNN_LIB_DIR MODEL_DIR=YOUR_MODEL_DIR # DATA_HOME is where paddlenlp stores dataset and can be returned by paddlenlp.utils.env.DATA_HOME. VOCAB_DIR=DATA_HOME/WMT14ende/WMT14.en-de/wmt14_ende_data_bpe/vocab_all.bpe.33708 DATA_DIR=DATA_HOME/WMT14ende/WMT14.en-de/wmt14_ende_data_bpe/newstest2014.tok.bpe.33708.en bash run_impl.sh ${LIB_DIR} transformer_e2e ${MODEL_DIR} ${WITH_MKL} ${DEVICE} ${CUDNN_LIB_DIR} ${CUDA_LIB_DIR} ${VOCAB_DIR} ${DATA_DIR}
true
128a7ad4ab5f8acb0092b6392b138f6638b16079
Shell
dionisos2/scripts
/save_ukratio_db
UTF-8
374
3.0625
3
[]
no_license
#!/bin/sh date=$(date "+%d-%m-%Y") echo "création de l’archive" wget http://ukratio.org/dump_db.php echo "récupération de l’archive" user=$(.psw -u ukratio.org) psw=$(.psw -p ukratio.org) curl -u $user:$psw "ftp://ftp.toile-libre.org/ukratio/ukratio.org-web/htdocs/ukratio.sql.gz" -o "/srv/http/sauvegardes_mysql/ukratio_$date.sql.gz" echo "sauvegarde réussie !"
true
4163184fb83ec26387499d045cecf1c80ed47a44
Shell
richardwhiteii/dynamodb-dump-restore
/dynamodb-dump
UTF-8
2,702
3.890625
4
[]
no_license
#!/usr/bin/env bash # Prerequisites: # install aws cli # install jq from https://stedolan.github.io/jq/ S3_FOLDER=${1:?Error: provide the S3 path as the first argument} TOPIC=${2:?Error: provide the SNS ARN to alert to} # arn:aws:sns:eu-central-1:165664414043:witan-to-slack TABLE_NAME_TEST=${3:?Error: provide at least one table name as the second argument} NUM_ITEMS=25 # do not change type aws if (($? > 0)); then printf '%s\n' 'No AWS CLI installed' >&2 exit 1 fi type jq if (($? > 0)); then printf '%s\n' 'No jq installed' >&2 exit 1 fi current_dir=$(pwd) cd $(mktemp -d) dynamo_scan() { local result="" if [ -z "$2" ] then result=$(aws dynamodb scan --table-name $1 --max-items $NUM_ITEMS --page-size $NUM_ITEMS) error=$? else result=$(aws dynamodb scan --table-name $1 --max-items $NUM_ITEMS --page-size $NUM_ITEMS --starting-token $2) error=$? fi echo $result | jq -c '[{Item: .Items[]} | {PutRequest: .}]' | jq -c "{\"$var\": .}" > $var-$file_num next_token=$(echo $result | jq .NextToken) } set_expected_page_count() { item_count=$(aws dynamodb describe-table --table-name $1 | jq .Table.ItemCount) remainder=$(expr $item_count % $NUM_ITEMS) expected_page_count=$(expr $item_count / $NUM_ITEMS) if [ ! -z $remainder ] then expected_page_count=$((expected_page_count+1)) fi } sns-alert() { local table=$1 local error=${@:2} result=$(aws sns publish --topic $TOPIC --message "{\"AlarmName\": \"the backup of $table\", \"NewStateValue\": \"failed\", \"NewStateReason\": \"error code $error\"}") msg_id=$(echo $result | jq .MessageId) echo "Error for table: $1. Msg_id: $msg_id. Msg: $error" } for var in "${@:3}" do echo "Exporting table $var" file_num=1 next_token="null" error=0 expected_page_count=0 set_expected_page_count $var echo "Expected page count: $expected_page_count" echo "Downloading segment: $file_num" dynamo_scan $var while [[ "$next_token" != "null" && "$error" -eq 0 ]]; do file_num=$((file_num+1)) sleep 1 echo "Downloading segment: $file_num, with error: $error, token: $next_token" dynamo_scan $var $next_token done if [ $expected_page_count -gt $file_num ] then error="Expected page count not reached. Expected $expected_page_count. Received: $file_num" fi if [ "$error" != 0 ] then sns-alert $var $error fi done echo "Uploading backup" aws s3 sync --acl=bucket-owner-full-control . s3://$S3_FOLDER/$(date +%Y-%m-%d-%H-%M-%S) error=$? if [ "$error" -ne 0 ]; then sns-alert S3 $error fi cd $current_dir
true
40920a073de05d15076bb6bc2dd8786505d9e831
Shell
hjfreyer/pictophone-be
/genmodel.sh
UTF-8
276
3.265625
3
[]
no_license
#!/bin/bash set -x genFile () { local f=$1 typescript-json-validator --collection "${f}" sed -i "/^export [{]/d" "${f%.*}.validator.ts" tsfmt -r "${f%.*}.validator.ts" } for f in $(find src/model/ -type f|grep -v "validator"); do genFile $f & done wait
true
48f8e62b7f28bdb7d6ba8952b227bf3f2608128f
Shell
tthoma24/sysadmin-challenge
/src/images/images/mysql/setup.sh
UTF-8
2,081
3.78125
4
[ "Apache-2.0" ]
permissive
#!/bin/bash # Edited by tthoma24 9/22/17 # A simple shell script to provision a dedicated MySQL docker container # Credit: https://coreos.com/quay-enterprise/docs/latest/mysql-container.html # # # * This file incorporates work covered by the following copyright and # * permission notice: # * # * Copyright (c) CoreOS, Inc. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. # * You may obtain a copy of the License at # * # * http://www.apache.org/licenses/LICENSE-2.0 # * # * Unless required by applicable law or agreed to in writing, software # * distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. # Pull the Oracle MySQL docker image docker pull mysql:5.7 set -e # Setup MySQL DB name and users: MYSQL_USER="dbuser" MYSQL_DATABASE="db" MYSQL_CONTAINER_NAME="mysql" # Creates a 32 character password from /dev/urandom, sanitizing output using tr, and taking only the first line using sed MYSQL_ROOT_PASSWORD=$(cat /dev/urandom | LC_CTYPE=C tr -dc 'a-zA-Z0-9!*=' | fold -w 32 | sed 1q) MYSQL_PASSWORD=$(cat /dev/urandom | LC_CTYPE=C tr -dc 'a-zA-Z0-9!*=' | fold -w 32 | sed 1q) echo "Start the Oracle MySQL container:" # It will provision a blank database upon first start. # This initial provisioning can take up to 30 seconds. docker \ run \ --detach \ --env MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} \ --env MYSQL_USER=${MYSQL_USER} \ --env MYSQL_PASSWORD=${MYSQL_PASSWORD} \ --env MYSQL_DATABASE=${MYSQL_DATABASE} \ --name ${MYSQL_CONTAINER_NAME} \ --publish 3306:3306 \ mysql:5.7; echo "Sleeping for 10 seconds to allow time for the DB to be provisioned:" for i in `seq 1 10`; do echo "." sleep 1 done echo "Database '${MYSQL_DATABASE}' running." echo " Username: ${MYSQL_USER}" echo " Password: ${MYSQL_PASSWORD}"
true
69a86543010388d81da722053bd271c9a9bc6c6f
Shell
ksanjeev/Scripts
/Kamal_Sunquest_Backups/shell scripting/rgrep.sh
UTF-8
236
3.515625
4
[]
no_license
#!/bin/ksh # # rgrep.sh - recursive grep (find and grep) # # John Roebuck - 01/11/99 if [ $# -ne 2 ] then echo " " echo "Usage : rgrep.sh path what-to-grep-for" echo " " exit 1 fi find $1 -exec grep -l "$2" {} \;
true
98571e24de8d47a4c44385e4b964fecb5201de2d
Shell
Njima1572/smaregi-app-starter
/cf-main/run.sh
UTF-8
795
2.921875
3
[ "MIT" ]
permissive
#!/bin/bash # ./run.sh ##### SETTINGS ##### IMAGE_NAME=smaregi-main PORT_NUM=4010 # Google Cloudの認証情報が入ったフォルダのパスを取得(どこで起動しても同じになるように) SCRIPT_DIR=$(cd $(dirname $0); pwd) CRED_DIR="${SCRIPT_DIR}/../_init/credentials" . $CRED_DIR/secrets.txt echo "PROJECT ID is ${PROJECT_ID}" echo "LOCAL ID is ${LOCAL_CONTRACT_ID}" ################ docker build -t $IMAGE_NAME . &&\ echo "launching container: "$IMAGE_NAME" port: "$PORT_NUM &&\ open http://localhost:$PORT_NUM &&\ docker run -p $PORT_NUM:8080 --env projectID=${PROJECT_ID} --env app_client_id=${QA_APP_CLIENT_ID} --env app_client_secret=${QA_APP_CLIENT_SECRET} --env local_contract_id=${LOCAL_CONTRACT_ID} -v $CRED_DIR:/tmp --name $IMAGE_NAME --rm -it $IMAGE_NAME
true
a89def23d93a429d4a2aecc09d8062d7471a1f44
Shell
gondalez/bitbar-plugins
/Tools/screen.2m.sh
UTF-8
1,440
3.65625
4
[]
no_license
#!/usr/bin/env bash # # <bitbar.title>Screen</bitbar.title> # <bitbar.version>v1.0</bitbar.version> # <bitbar.author>Jake Gage</bitbar.author> # <bitbar.author.github>Dispader</bitbar.author.github> # <bitbar.desc>Monitor, attach, and power detach from GNU Screen sessions.</bitbar.desc> # <bitbar.dependencies>screen</bitbar.dependencies> # <bitbar.image>https://user-images.githubusercontent.com/2664155/54407949-f5490280-46ad-11e9-86fc-9856d64b5a0e.png</bitbar.image> # <bitbar.abouturl>http://github.com/Dispader/screen-bitbar-plugin</bitbar.abouturl set -eu PATH="/usr/local/bin:$PATH" SCREEN_COMMAND=$(command -v screen) echo "💻" echo '---' SCREENS=$(${SCREEN_COMMAND} -list | grep -o '\s*.*\s*(.*)') if [[ -z ${SCREENS} ]]; then echo "no screens" else ( IFS=$'\n'; for LINE in $(screen -list); do if [[ ${LINE} =~ ^[[:space:]]+[[:digit:]]+\.(.+)[[:space:]]+(\(.*\))$ ]] then SCREEN_SESSION=${BASH_REMATCH[1]} SCREEN_SESSION_STATUS=${BASH_REMATCH[2]} if [[ "${SCREEN_SESSION_STATUS}" == "(Detached)" ]] then echo "⚫ ${SCREEN_SESSION} ${SCREEN_SESSION_STATUS} | refresh=true bash=${SCREEN_COMMAND} param1=-R param2=${SCREEN_SESSION}" else echo "🔵 ${SCREEN_SESSION} ${SCREEN_SESSION_STATUS}" echo "-- power detach | terminal=false refresh=true bash=${SCREEN_COMMAND} param1=-D param2=${SCREEN_SESSION} " fi fi done ) fi
true
cc2cffb626af77e3dd4ceefe07f7dae90eb21e6b
Shell
jtbaker/osm-extract
/src/bboxosm.sh
UTF-8
292
2.578125
3
[]
no_license
array_of_lines=("${(@f)$(cat bboxes.txt)}") for line in $array_of_lines do wget -O "osmfiles/$line.osm" "https://lz4.overpass-api.de/api/interpreter?data=way[\"highway\"~\"motorway|primary|trunk|motorway_link|trunk_link|primary_link|secondary|secondary_link\"]($line);(._;>;);out geom;" done
true
1553d1667383ff678f2f3da522cb4816f5b1698d
Shell
pallxk/dotfiles
/bash/.bashrc.d/bash-git-prompt.sh
UTF-8
305
2.734375
3
[ "MIT" ]
permissive
# https://github.com/magicmonty/bash-git-prompt if [ -f "/opt/homebrew/opt/bash-git-prompt/share/gitprompt.sh" ]; then __GIT_PROMPT_DIR=/opt/homebrew/opt/bash-git-prompt/share GIT_PROMPT_ONLY_IN_REPO=1 GIT_PROMPT_IGNORE_SUBMODULES=1 source /opt/homebrew/opt/bash-git-prompt/share/gitprompt.sh fi
true
e7fadae4e3212249e38470682d7f02089a813c73
Shell
YuBeomGon/TexttoSpeech
/TestMode/testmode.sh
UTF-8
1,928
2.71875
3
[]
no_license
#!/bin/bash # Copyright 2019 YuBeomGon # Apache 2.0 # bash part # first, make a transcription using number.txt and dental.txt # then paste id to the above transcription, example of id, male_speed_slow_pitch_m5_1, so make a text.scp file #python and google cloud API # second, using python and google texttospeech api, make a audio for various transcription # and wav.scp # bash and morfessor # third, do the segmentation with themorfessor.seg for text.scp, and update segmodel for ASR, and change wav to pcm format using ffmpeg # c++ # 4th, do the ASR for wav file, and make a decoded.txt(ked id + text) # 5th, compute the wer date=$(date +'%F-%H-%M') echo start at $date testflag=1 dir=~/TexttoSpeech voicedir=$dir/audio scriptdir=$dir/TestMode asrdir=$dir/Asr cd ~/kaldi/egs/zeroth/s5 . ./cmd.sh . ./path.sh . ./utils/parse_options.sh cd $dir required="$dir/wav.scp $dir/text.scp $asrdir/zeroth_morfessor.seg" for f in $required; do [ ! -f $f ] && echo "mkgraph.sh: expected $f to exist" && exit 1; done # bash part # first, make a transcription using number.txt and dental.txt # then paste id to the above transcription, example of id, male_speed_slow_pitch_m5_1, so make a text.scp file ##### make wav.scp and transcription for comparing ###### #python and google cloud API # second, using python and google texttospeech api, make a audio for various transcription # and wav.scp # bash and morfessor # third, do the segmentation with themorfessor.seg for text.scp, and update segmodel for ASR, and change wav to pcm format using ffmpeg $scriptdir/test_seg.sh $dir $asrdir #get decoded text data using kaldi test mode echo "test mode is doing" #decode and make decode.txt $scriptdir/web_decode_testmode.sh $dir $asrdir ~/kaldi/src/bin/compute-wer --text --mode=present ark:$dir/text.scp ark:$dir/decode.txt date=$(date +'%F-%H-%M') echo ends at $date exit 1 #gdb ../../../src/online2bin/beom-test core
true
ba155a41f3355f640053966393d6943f55e0f1b3
Shell
j18e/.dotfiles
/.zshrc
UTF-8
3,470
2.609375
3
[]
no_license
# If you come from bash you might have to change your $PATH. # export PATH=$HOME/bin:/usr/local/bin:$PATH # Path to your oh-my-zsh installation. export ZSH="$HOME/.oh-my-zsh" # we're not using this oh-my-zsh feature - PROMPT is set manually in this file ZSH_THEME="" # Uncomment the following line to use case-sensitive completion. # CASE_SENSITIVE="true" # Uncomment the following line to use hyphen-insensitive completion. # Case-sensitive completion must be off. _ and - will be interchangeable. HYPHEN_INSENSITIVE="true" DISABLE_AUTO_UPDATE="true" # Uncomment the following line if pasting URLs and other text is messed up. # DISABLE_MAGIC_FUNCTIONS=true # Uncomment the following line to disable colors in ls. # DISABLE_LS_COLORS="true" # Uncomment the following line to disable auto-setting terminal title. # DISABLE_AUTO_TITLE="true" # Uncomment the following line to enable command auto-correction. # ENABLE_CORRECTION="true" # Uncomment the following line to display red dots whilst waiting for completion. # COMPLETION_WAITING_DOTS="true" # Uncomment the following line if you want to disable marking untracked files # under VCS as dirty. This makes repository status check for large repositories # much, much faster. # DISABLE_UNTRACKED_FILES_DIRTY="true" # Uncomment the following line if you want to change the command execution time # stamp shown in the history command output. # You can set one of the optional three formats: # "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd" # or set a custom format using the strftime function format specifications, # see 'man strftime' for details. # HIST_STAMPS="mm/dd/yyyy" # Would you like to use another custom folder than $ZSH/custom? # ZSH_CUSTOM=/path/to/new-custom-folder # Standard plugins in ~/.oh-my-zsh/plugins/* # Custom plugins added to ~/.oh-my-zsh/custom/plugins/ plugins=( golang vi-mode ) source $ZSH/oh-my-zsh.sh source $HOME/.config/base16-shell/scripts/base16-oceanicnext.sh # colorscheme source $HOME/.zsh_aliases export EDITOR=vim export LANG="en_US.UTF-8" export LC_ALL="en_US.UTF-8" export PATH="/usr/local/sbin:$PATH" # homebrew's sbin export PATH="$PATH:$HOME/.bin" # golang export GOPATH=$HOME/go export PATH="$PATH:$GOPATH/bin" # rust export PATH="$HOME/.cargo/bin:$PATH" # helm export HELM_HOME=$HOME/.helm export PATH="/opt/homebrew/opt/helm@2/bin:$PATH" # fzf [ -f ~/.fzf.zsh ] && source ~/.fzf.zsh # kubectl [[ -f ~/.config/kubectl-zsh-completion ]] || kubectl completion zsh > ~/.config/kubectl-zsh-completion source ~/.config/kubectl-zsh-completion [[ -f "$HOME/.env" ]] && source $HOME/.env [[ -d "$HOME/.rvm/bin" ]] && export PATH="$HOME/.rvm/bin:$PATH" [[ -d "/opt/finnbuild/latest/bin" ]] && export PATH="/opt/finnbuild/latest/bin:$PATH" [[ -d "/usr/local/opt/openjdk/bin" ]] && export PATH="/usr/local/opt/openjdk/bin:$PATH" # python [ -f ~/.pythonrc ] && export PYTHONSTARTUP=$HOME/.pythonrc # artifactory [ -f ~/.artifactory-user ] && export ARTIFACTORY_USER=$(cat ~/.artifactory-user) [ -f ~/.artifactory-apikey ] && export ARTIFACTORY_PWD=$(cat ~/.artifactory-apikey) # prompt which shell-prompt >> /dev/null || go install github.com/j18e/shell-prompt PROMPT='$(shell-prompt -exit-code $? -zsh)' # rust [[ -f "~/.cargo/env" ]] && source ~/.cargo/env # pyenv eval "$(pyenv init -)" export PATH="$HOME/.pyenv/shims:$PATH" # cli syntax highlighting - must be at end of file source $HOME/.config/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
true
709024c62fbb3c3b10e436ab94e0f29016d5eef5
Shell
petronny/aur3-mirror
/mingw-w64-enet/PKGBUILD
UTF-8
1,395
3
3
[]
no_license
# Maintainer: Naelstrof <naelstrof@gmail.com> pkgname=mingw-w64-enet pkgver=1.3.12 pkgrel=3 pkgdesc="A free, open source, portable framework for networking application development (mingw-w64)" arch=('any') url="http://enet.bespin.org/" license=('MIT') makedepends=('mingw-w64-gcc') depends=('mingw-w64-crt') options=('!strip' '!buildflags' 'staticlibs') source=("http://enet.bespin.org/download/enet-${pkgver}.tar.gz") md5sums=('2b581600a589553c1e7684ad663f27a8') _architectures="i686-w64-mingw32 x86_64-w64-mingw32" build() { #required else the compile fails. cp -rf ${srcdir}/enet-${pkgver}/include/enet ${srcdir}/enet-${pkgver} unset LDFLAGS for _arch in ${_architectures}; do mkdir -p ${srcdir}/enet-${pkgver}-build-${_arch} cd ${srcdir}/enet-${pkgver}-build-${_arch} ${srcdir}/enet-${pkgver}/configure --prefix=/usr/${_arch} --build=$CHOST --host=${_arch} make done } package() { for _arch in ${_architectures}; do cd ${srcdir}/enet-${pkgver}-build-${_arch} make DESTDIR=${pkgdir} install mkdir -p "${pkgdir}/usr/${_arch}/share/licenses/${pkgname}" cp "${srcdir}/enet-${pkgver}/LICENSE" "${pkgdir}/usr/${_arch}/share/licenses/${pkgname}" # no need to strip?? #${_arch}-strip --strip-unneeded "$pkgdir"/usr/${_arch}/bin/*.dll #${_arch}-strip -g "$pkgdir"/usr/${_arch}/lib/*.a done }
true
1012fa4716595f266715d5e221c102525b2e43d6
Shell
feeloor/azure-static-website-deploy
/entrypoint.sh
UTF-8
1,671
3.328125
3
[ "MIT" ]
permissive
#!/bin/sh set -e if [ -z "$AZURE_SUBSCRIPTION_ID" ]; then echo "AZURE_SUBSCRIPTION_ID is not set. Quitting." exit 1 fi if [ -z "$AZURE_CLIENT_ID" ]; then echo "AZURE_CLIENT_ID is not set. Quitting." exit 1 fi if [ -z "$AZURE_SECRET" ]; then echo "AZURE_SECRET is not set. Quitting." exit 1 fi if [ -z "$AZURE_TENANT_ID" ]; then echo "AZURE_TENANT_ID is not set. Quitting." exit 1 fi if [ -z "$AZURE_STORAGE_ACCOUNT_NAME" ]; then echo "AZURE_STORAGE_ACCOUNT_NAME is not set. Quitting." exit 1 fi if [ -z "$AZURE_INDEX_DOCUMENT_NAME" ]; then echo "AZURE_INDEX_DOCUMENT_NAME is not set. Quitting." exit 1 fi if [ -z "$SOURCE_DIR" ]; then echo "SOURCE_DIR is not set. Quitting." exit 1 fi # Login az login --service-principal --username ${AZURE_CLIENT_ID} --password ${AZURE_SECRET} --tenant ${AZURE_TENANT_ID} # Set subscription id az account set --subscription ${AZURE_SUBSCRIPTION_ID} # Enable Static Website if [ -z "$AZURE_ERROR_DOCUMENT_NAME" ]; then az storage blob service-properties update --account-name ${AZURE_STORAGE_ACCOUNT_NAME} --static-website --index-document ${AZURE_INDEX_DOCUMENT_NAME} else az storage blob service-properties update --account-name ${AZURE_STORAGE_ACCOUNT_NAME} --static-website --404-document ${AZURE_ERROR_DOCUMENT_NAME} --index-document ${AZURE_INDEX_DOCUMENT_NAME} fi # Upload source to storage if [ "$FORCE_OVERWRITE" = 'true' ]; then echo "Using overwrite mode" az storage blob upload-batch -s ${SOURCE_DIR} -d \$web --account-name ${AZURE_STORAGE_ACCOUNT_NAME} --overwrite else az storage blob upload-batch -s ${SOURCE_DIR} -d \$web --account-name ${AZURE_STORAGE_ACCOUNT_NAME} fi
true
4cd6c88eab9866d11f0e5316f7e7340c04a087e0
Shell
etsauer/eap6_build_bash
/build.sh
UTF-8
6,261
3.8125
4
[]
no_license
#!/bin/bash #set -x BUILD_NAME=jboss_standalone_build SCRIPTS_BASE=/opt/jboss/webhosting/scripts BINARY_BASE=/opt/jboss/60 BINARY_VERSION=jboss-eap-6.0 JBOSS_CVS_PROJECT=jboss JBOSS_TMP_DIR=/tmp/jboss_install jboss_user=`stat -c %U /opt/jboss` build_latest() { local dest=$1 echo "Pulling down build from $JBOSS_INSTALL_ROOT. This may take a few minutes." #checkout &> build.out #OLD WAY pull_jboss #NEW WAY #remove all CVS directories #echo "Cleaning up CVS insertions..." #find /tmp/$BUILD_NAME -depth -name 'CVS' -exec rm -rf '{}' \; echo "Building package..." package $dest } #Deprecated: CVS proved not to be a viable option for storing jars, so we switched to using rsync (below) checkout() { #Clean out old jboss builds rm -rf /tmp/$JBOSS_CVS_PROJECT rm -rf /tmp/$BUILD_NAME local current_dir=`pwd` cd /tmp cvs co $JBOSS_CVS_PROJECT mv $JBOSS_CVS_PROJECT $BUILD_NAME cd $current_dir } pull_jboss() { #Clean out old jboss builds rm -rf $JBOSS_TMP_DIR rm -rf /tmp/$BUILD_NAME mkdir $JBOSS_TMP_DIR rsync -av -e ssh $JBOSS_INSTALL_ROOT/binaries $JBOSS_TMP_DIR 2>1 1>build.out rsync -av -e ssh $JBOSS_INSTALL_ROOT/scripts $JBOSS_TMP_DIR 2>1 1>build.out mv $JBOSS_TMP_DIR /tmp/$BUILD_NAME } push_jboss() { # Clean out jboss dir rm -rf ~/jboss mkdir -p ~/jboss/{binaries,scripts} # Take snapshot of current jboss project echo "Taking snapshot of most current version..." build_latest ~/jboss # Copy in current state of jboss build echo "Pushing new version.." cp -r $BINARY_BASE/{jboss-eap-6.0,jboss-eap-6.0.1} ~/jboss/binaries/ cp -r $SCRIPTS_BASE/* ~/jboss/scripts/ echo "rsync -av --progress -e ssh ~/jboss/ $JBOSS_INSTALL_ROOT" rsync -av --progress -e ssh ~/jboss/ $JBOSS_INSTALL_ROOT 2>1 1>build.out } extract () { local tarball=$1 echo "Extracting $tarball" # run the rest as jboss user sesu - $jboss_user <<EOFADD tar xzf $tarball -C /tmp mkdir -p $BINARY_BASE cp -R /tmp/$BUILD_NAME/binaries/* $BINARY_BASE mkdir -p $SCRIPTS_BASE cp -R /tmp/$BUILD_NAME/scripts/* $SCRIPTS_BASE rm -rf /tmp/$BUILD_NAME EOFADD make_links } make_links() { find /opt/jboss/ -maxdepth 2 -name "jboss-eap-*" | while read line do sesu - $jboss_user <<EOFADD mkdir -p $line/modules/org/jboss/as/web/main/lib/linux-x86_64 ln -s /usr/lib64/libcrypto.so.10 $line/modules/org/jboss/as/web/main/lib/linux-x86_64/libcrypto.so ln -s /usr/lib64/libapr-1.so.0 $line/modules/org/jboss/as/web/main/lib/linux-x86_64/libapr-1.so ln -s /usr/lib64/libssl.so.10 $line/modules/org/jboss/as/web/main/lib/linux-x86_64/libssl.so EOFADD done } configure_app-group_install() { appgroup=$1 properties_files=$2 echo "Configuring $appgroup using $properties_files" sesu - $jboss_user <<EOFADD # Moving SSL directory out of /opt/$appgroup. May as well do it here. -NTM mkdir -p /opt/jboss/apps/$appgroup/ssl cp $properties_files/* /opt/jboss/apps/$appgroup/ EOFADD } install_app-group() { appgroup=$1 build_home=$JBOSS_BUILD_HOME sesu - $jboss_user <<EOFADD $build_home/bin/install_app-group.sh $appgroup $build_home EOFADD } clean() { appgroup=$1 rm -R $SCRIPTS_BASE rm -R /opt/jboss/apps/$appgroup rm -R /tmp/$BUILD_NAME rm -R $BINARY_BASE/$BINARY_VERSION } package() { local target=$1 # Assumes a checkout() to /tmp local current_dir local filename=JBoss_$(date +%Y%m%d-%H%M%S).tgz cd /tmp tar czf $target/$filename $BUILD_NAME chmod a+r $target/$filename echo "Build created: $target/$filename" #clean up /tmp rm -rf $BUILD_NAME cd $current_dir } show_help() { echo "RUN AS OWN USER -- SCRIPT WILL sesu AS NEEDED." echo "Usage: build.sh --package {/path/to/tarball/directory}|--extract {tarball}|--setup {app-group} {/path/to/properties-files/dir}|--install {app-group}" echo echo "--build_tar - Creates JBoss tarball of latest stable binaries and scripts." echo "--extract - Extracts JBoss tarball and installs binaries and scripts." echo "--setup - Creates file system for app group and places installation properties files in preparation for installation." echo "--install - Kicks off installation of app group and all servers." echo echo "JBOSS BUILD INSTRUCTIONS" echo " 1. Run $(basename $0) --build_tar {/where/to/place/archive/} (i.e. $(basename $0) --build_tar ~) to create a tarball of binaries and scripts and place it in the specified directory." echo " 2. Place build script, tarball, and all app group and server properties files (filled out with desired configuration) on server or in remote location accessible by jboss id." echo " 3. Run $(basename $0) --extract {tarball} (i.e. $(basename $0) --extract JBoss_$(date +%Y%m%d-%H%M%S).tgz) to extract and place binaries/scripts." echo " 4. Run $(basename $0) --setup {app-group} {/path/to/properties-files/dir} (i.e. $(basename $0) --setup tad ~/jboss_build/sample_installs/tad) to create app-group file system and place properties files in the expected location." echo " 5. Run $(basename $0) --install {app-group} (i.e. $(basename $0) --install tad) to install and configure app-group and all subsequent servers." } if [[ -z "$JBOSS_BUILD_HOME" ]] then echo "Please set JBOSS_BUILD_HOME to location of jboss_build directory. No trailing slash." echo echo "Example: export JBOSS_BUILD_HOME=~/jboss_build" exit 1 fi if [[ -z "$JBOSS_INSTALL_ROOT" ]] then echo "Please set JBOSS_INSTALL_ROOT to location of the jboss install directory. No trailing slash." echo echo "Example: export JBOSS_INSTALL_ROOT=[username]@host:/opt/webhosting/jboss" exit 1 fi if [[ -z "$@" ]] then show_help exit fi if ! options=$(getopt -o e:b:s::i:c:hm -l extract:,build_tar:,setup::,install:,help,clean:,push -- "$@") then echo "Use --help for usage information." exit 1 fi while [ $# -gt 0 ] do case $1 in -h|--help) show_help ;; -e|--extract) extract $2 ; shift;; -b|--build_tar) build_latest $2 ; shift;; -s|--setup) configure_app-group_install $2 $3 ; shift;; -i|--install) install_app-group $2; shift;; -c|--clean) clean $2 ; shift;; -m) make_links ; shift;; --push) push_jboss ; shift;; (--) shift; break;; (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;; (*) break;; esac shift done
true
2a7b366899b04e878116e607dafa80b24b94aa06
Shell
delkyd/alfheim_linux-PKGBUILDS
/fairroot/PKGBUILD
UTF-8
1,625
2.859375
3
[]
no_license
# Maintainer: Bastian Löher <b.loeher@gsi.de> pkgname=fairroot pkgver=17.03 fairsoftver=mar17-1 pkgrel=8 pkgdesc="Analysis framework based on root provided by GSI/FAIR." arch=('x86_64') url="http://fairroot.gsi.de" license=('LGPL3') groups=() makedepends=('cmake' 'clang' 'bison' 'flex' 'gcc-fortran' 'subversion' 'git' 'curl') depends=( 'fairsoft' ) provides=() conflicts=() replaces=() backup=() options=('!emptydirs' 'staticlibs' 'libtool' '!strip') install=fairroot.install changelog= source=("https://github.com/FairRootGroup/FairRoot/archive/v-${pkgver}.tar.gz" 'fairroot.install' ) noextract=() md5sums=('77ab94317667c2d4339da992d9389e61' 'd2d2d7b11b308120382fba7e32c5268a') # Do not compress the package for installation # PKGEXT='.pkg.tar' # Compress using lightweight gzip PKGEXT='.pkg.tar.gz' prepare() { # Path to fairsoft installation export SIMPATH=/opt/fairsoft/${fairsoftver} cd ${srcdir}/FairRoot-v-${pkgver} # Execute cmake cd ${srcdir} [ -d build ] || mkdir build cd build sed -i "s/std=c++11/std=c++1y/" ../FairRoot-v-17.03/CMakeLists.txt sed -i "s/std=c++11/std=c++1y/" ../FairRoot-v-17.03/cmake/checks/CMakeLists.txt sed -i "s/std=c++11/std=c++1y/" ../FairRoot-v-17.03/cmake/modules/CheckCXX11Features.cmake cmake \ -DUSE_DIFFERENT_COMPILER=TRUE \ -DCMAKE_INSTALL_PREFIX="/opt/fairroot/v-${pkgver}" \ -DROOT_DIR=${SIMPATH} \ ../FairRoot-v-${pkgver} : } build() { # Path to fairsoft installation export SIMPATH=/opt/fairsoft/${fairsoftver} cd ${srcdir}/build # Make c++14 mandatory make -j$(nproc) } package() { cd ${srcdir}/build make DESTDIR="${pkgdir}/" install }
true
66c65c195b19b6722d7d4bcf712a9e288e7454a3
Shell
sbissantz/dotfiles_manjaro
/.bash_aliases
UTF-8
763
2.921875
3
[]
no_license
# Overwrite/delete warnings # alias rm='rm -i' alias mv='mv -i' alias cp='cp -i' alias cp='cp -i' # (Colored) ls # alias l='ls -F --color=auto' alias ls='ls -F' alias ll='ls -lhF --color=auto' alias la='ls -alhF --color=auto' # cd # alias ..='cd ..' alias ...='cd ../../../' alias ....='cd ../../../../' alias .....='cd ../../../../' # (Colored) grep # alias grep='grep --color=auto' alias egrep='egrep --color=auto' alias fgrep='fgrep --color=auto' # clear # alias c='clear' # tar # alias untar='tar -zxvf' alias tarit='tar -zcvf' # wget # alias wget='wget -c ' # R # alias R="R --quiet" alias R-devel="R-devel --quiet" # IP # # external ip alias ipe='curl ipinfo.io/ip' # local ip alias ipi='ipconfig getifaddr en0' # ping # alias ping='ping -c 5'
true
8d399276cada28b9af850085ba429460bcbdb7dd
Shell
jasonchoimtt/dotfiles
/pandoc/mark
UTF-8
589
3.328125
3
[ "MIT" ]
permissive
#!/usr/bin/env bash set -e OUTPUT="${1%.*}.pdf" if [[ "$#" == "1" ]]; then IS_DEFAULT=1 fi OPTIONS=( "--from=markdown+pipe_tables${IS_DEFAULT+-latex_macros}" "--pdf-engine=xelatex" "--filter=$HOME/.dotfiles/pandoc/filters/main.py" "--output=$OUTPUT" ) PANDOC=/usr/local/bin/pandoc if ! [[ -x "$PANDOC" ]]; then PANDOC=$(which pandoc) fi if which reattach-to-user-namespace > /dev/null 2>&1; then # Xelatex needs user namespace to resolve system fonts reattach-to-user-namespace "$PANDOC" "${OPTIONS[@]}" "$@" else "$PANDOC" "${OPTIONS[@]}" "$@" fi
true
4db61c3e42cb0ff66d04604fecde57ed5d7f5979
Shell
moonboots/dotfiles
/scripts/git-local-clone
UTF-8
639
3.796875
4
[]
no_license
#!/bin/sh # Clones a local repository and adds original repos remotes set -o errexit local_repo=$(readlink -m $1) new_repo_name=$2 git clone $local_repo $new_repo_name pushd $new_repo_name git remote rename origin local pushd $local_repo # Regex below tranforms the git remote -v output # # origin git://github.com/twitter/bootstrap.git (fetch) # origin git://github.com/twitter/bootstrap.git (push) # # into # # git://github.com/twitter/bootstrap.git origin # # Doesn't handle multiple remotes yet remote_urls=$(git remote -v | sed 's/^\(\S\+\)\s\+\(\S\+\).*$/\1 \2/g' | uniq) popd git remote add --fetch $remote_urls
true
3ffe84f2bffc1b3c71dbe876b92326214c5632af
Shell
Chaos-Monkey-Island/CommonCrawlScanner
/cc.sh
UTF-8
1,710
3.96875
4
[]
no_license
#!/bin/bash echo "-----------------------------------------------------" echo "Commoncrawl url searcher by nukedx" echo "-----------------------------------------------------" if [[ $# -eq 0 ]]; then echo "Error: You didn't enter a target" echo "Usage: $0 <target>" echo "Example: $0 google.com" exit fi declare domain=$1 declare -a results declare outputStyle="output=json&fl=url" if [[ $# -eq 2 ]]; then outputStyle="output=json&fl=url&filter=$2" echo "Applying special filter: $2" fi echo "Started commoncrawl search for $domain" echo "Getting database infos" declare crawldatabases=$(curl -s http://index.commoncrawl.org/collinfo.json) declare dbamount=$(echo $crawldatabases | jq -c '.[]["cdx-api"]' | wc -w) echo "$dbamount active databases found" for (( c=1 ; c<dbamount; c++)) do currentData=$(echo $crawldatabases | jq -c '.['$c']') currentdatabase=$(echo $currentData | jq -c '.["name"]' | sed 's/\"//g') echo "Searching $domain on $currentdatabase" targeturl=$(echo $currentData | jq -c '.["cdx-api"]' | sed 's/\"//g') fetchedData=$(curl -s "$targeturl?url=*.$domain/*&$outputStyle" | jq -c '. | select(.url != null) | .url') if [[ ! -z "$fetchedData" ]]; then results+="$fetchedData" fi done echo "-----------------------------------------------------" echo "Fetched all databases for $domain" filename="CC-$domain-$(date "+%Y.%m.%d-%H.%M").txt" echo "$results" | jq . | sed 's/\"//g' | sort -u > $filename count=$(wc -l $filename | awk {'print $1'}) echo "Saved output on: $filename" echo "Total urls found: $count" echo "-----------------------------------------------------"
true
c713d4345f6455fd12db22b37204c58bf3a606b5
Shell
AndrewDaws/dotfiles
/scripts/update.sh
UTF-8
14,161
4.15625
4
[ "MIT" ]
permissive
#!/bin/bash # # Unix full post-install script if [[ -f "$(dirname "$(readlink -f "${0}")")/.functions" ]]; then # shellcheck disable=SC1090 # shellcheck disable=SC1091 source "$(dirname "$(readlink -f "${0}")")/.functions" else echo "File does not exist!" echo " $(dirname "$(readlink -f "${0}")")/.functions" exit "1" fi repo_update() { # Declare local variables local current_script local new_script local current_checksum local new_checksum local new_flag local return_code # Initialize local variables current_script="$(realpath "${0}")" new_script="${HOME}/.dotfiles/scripts/update.sh" current_checksum="" new_checksum="" new_flag="1" return_code="1" print_stage "Updating dotfiles repo" # Check if GitHub domain is valid and accessible abort_check_connectivity "github.com" abort_not_installed "git" abort_directory_dne "${HOME}/.dotfiles" # Compute this install script checksum current_checksum="$(checksum_file "${current_script}")" # Pull latest repo print_step "Updating dotfiles repo" git -C "${HOME}/.dotfiles" pull # Compute new update script checksum new_checksum="$(checksum_file "${new_script}")" # Compare this and new install script checksums if [[ "${current_script}" == "${new_script}" ]]; then if [[ "${current_checksum}" -eq "${new_checksum}" ]]; then new_flag="0" fi fi # Check if install script was new or updated if [[ "${new_flag}" -ne 0 ]]; then # Ensure new install script can execute chmod +x "${new_script}" # Run new install script print_step "Running updated install script" "${new_script}" return_code="${?}" # Delete current install script if no error if [[ "${return_code}" -eq 0 ]]; then if [[ "${current_script}" != "${new_script}" ]]; then print_step "Deleting current install script" rm -f "${current_script}" fi fi # Repeat return code and exit exit_script "${return_code}" fi # Set file permissions abort_file_dne "${HOME}/.dotfiles/scripts/set_permissions.sh" if ! "${HOME}/.dotfiles/scripts/set_permissions.sh"; then abort_script "Script ${HOME}/.dotfiles/scripts/set_permissions.sh Failed!" fi } update_packages() { print_step "Update repository information" sudo apt update -qq print_step "Perform system upgrade" sudo apt dist-upgrade -y print_step "Installing dependencies" sudo apt install -f print_step "Cleaning packages" sudo apt clean print_step "Autocleaning packages" sudo apt autoclean print_step "Autoremoving & purging packages" sudo apt autoremove --purge -y } initial_setup() { print_stage "Initial setup" update_packages print_step "Installing repository tool" sudo apt install -y --no-install-recommends software-properties-common } headless_setup() { # Declare local variables local package_list # Initialize local variables package_list=( coreutils man-db sed gawk file tree openssh-server cron zsh tmux curl vim nano nmap htop xclip apt-utils ncdu ) print_stage "Headless applications setup" # Check if GitHub domain is valid and accessible abort_check_connectivity "github.com" # print_step "Adding repositories" # Install packages for listIndex in "${package_list[@]}"; do # Check if package already installed if ! dpkg-query -W -f='${Status}' "${listIndex}" 2>/dev/null | grep -c "ok installed" &>/dev/null; then print_step "Installing ${listIndex}" sudo apt install -y --no-install-recommends "${listIndex}" else print_step "Skipped: Installing ${listIndex}" fi done # Install Tmux Plugin Manager # git_update "https://github.com/tmux-plugins/tpm" "${HOME}/.tmux/plugins/tpm" "Tmux Plugin Manager" # Install Zinit Framework if file_exists "${HOME}/.local/share/zinit/bin/zinit.zsh"; then print_step "Updating zinit" zsh --interactive -c -- "@zinit-scheduler burst zinit self-update" zsh --interactive -c -- "@zinit-scheduler burst zinit update --all --no-pager --parallel" else print_step "Installing zinit" git_update "https://github.com/zdharma-continuum/zinit.git" "${HOME}/.local/share/zinit/bin" "zinit" zsh --interactive -c -- "@zinit-scheduler burst" fi print_step "Installing headless application configurations" rm -f "${HOME}/.bash_history" rm -f "${HOME}/.bash_logout" rm -f "${HOME}/.bashrc" # Create links abort_file_dne "${HOME}/.dotfiles/scripts/create_links.sh" if ! "${HOME}/.dotfiles/scripts/create_links.sh" --headless; then abort_script "Script ${HOME}/.dotfiles/scripts/create_links.sh Failed!" fi } desktop_setup() { # Declare local variables local package_list # Initialize local variables package_list=( libegl1-mesa-dev libssl-dev gcc g++ make cmake build-essential firefox meld pkg-config libfreetype6-dev libfontconfig1-dev libxcb-xfixes0-dev python3 gtkhash ) print_stage "Desktop applications setup" # Check if GitHub domain is valid and accessible abort_check_connectivity "github.com" # print_step "Adding repositories" # Install packages for listIndex in "${package_list[@]}"; do # Check if package already installed if ! dpkg-query -W -f='${Status}' "${listIndex}" 2>/dev/null | grep -c "ok installed" &>/dev/null; then print_step "Installing ${listIndex}" sudo apt install -y --no-install-recommends "${listIndex}" else print_step "Skipped: Installing ${listIndex}" fi done # Configure Firefox to default browser if is_installed "firefox"; then if is_installed "xdg-settings"; then if [[ "$(xdg-settings get default-web-browser)" != "firefox.desktop" ]]; then xdg-settings set default-web-browser firefox.desktop fi fi fi # Installis_installed Rustup if not_installed "rustup"; then curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y else rustup update fi # Rust environment handling if file_exists "${HOME}/.cargo/env"; then # shellcheck disable=SC1090 source "${HOME}/.cargo/env" # Add cargo tools to path if not already in the path if [[ "${PATH}" != *"${HOME}/.cargo/bin"* ]]; then if directory_exists "${HOME}/.cargo/bin"; then export PATH="${HOME}/.cargo/bin:${PATH}" fi fi fi # @todo Improve Cargo Package Updating # @body Find a way to only update cargo packages if outdated, rather than full reinstall. # Install Cargo Applications # Install Alacritty if not_installed "alacritty"; then print_step "Installing alacritty" cargo install alacritty # Create desktop entry wget https://raw.githubusercontent.com/alacritty/alacritty/master/extra/linux/Alacritty.desktop root_copy "${PWD}/Alacritty.desktop" "/usr/local/bin/Alacritty.desktop" sudo chmod u=rwx "/usr/local/bin/Alacritty.desktop" mkdir -p "${HOME}/.local/share/applications" cp "${PWD}/Alacritty.desktop" "${HOME}/.local/share/applications/Alacritty.desktop" chmod u=rwx "${HOME}/.local/share/applications/Alacritty.desktop" rm -f "${PWD}/Alacritty.desktop" wget https://raw.githubusercontent.com/alacritty/alacritty/master/extra/logo/alacritty-term.svg root_copy "${PWD}/alacritty-term.svg" "/usr/share/pixmaps/Alacritty.svg" rm -f "${PWD}/alacritty-term.svg" sudo desktop-file-install "/usr/local/bin/Alacritty.desktop" sudo update-desktop-database # Set as default terminal (Ctrl + Alt + T) # if is_installed "update-alternatives"; then # sudo update-alternatives --quiet --install "/usr/bin/x-terminal-emulator" x-terminal-emulator "$(command -v -- alacritty)" 50 # sudo update-alternatives --quiet --skip-auto --config x-terminal-emulator # fi if is_installed "gsettings"; then gsettings set org.gnome.desktop.default-applications.terminal exec 'alacritty' gsettings set org.gnome.desktop.default-applications.terminal exec-arg '' fi else print_step "Skipped: Installing alacritty" fi # @todo File Manager Installation # @body Determine and automate a file manager (like Double Commander) installation. if not_installed "code"; then print_step "Installing vs code" rm -f "${PWD}/vscode.deb" wget "https://code.visualstudio.com/sha/download?build=stable&os=linux-deb-x64" -O "${PWD}/vscode.deb" sudo apt install "${PWD}/vscode.deb" rm -f "${PWD}/vscode.deb" else print_step "Skipped: Installing vs code" fi # @todo VS Code Config and Extensions # @body Export VS Code settings and installation of extensions. print_step "Installing desktop fonts" # Install FiraCode if ! find "${HOME}/.local/share/fonts/NerdFonts/Fura Code"* >/dev/null; then "${HOME}/.dotfiles/scripts/install_firacode.sh" else print_step "Skipped: ${HOME}/.dotfiles/scripts/install_firacode.sh" fi print_step "Installing desktop configurations" # Create links abort_file_dne "${HOME}/.dotfiles/scripts/create_links.sh" if ! "${HOME}/.dotfiles/scripts/create_links.sh" --desktop; then abort_script "Script ${HOME}/.dotfiles/scripts/create_links.sh Failed!" fi # Install term environment abort_not_installed "infocmp" abort_not_installed "tic" if sudo infocmp "xterm-256color-italic" &>/dev/null; then print_step "Skipped: Installing terminfo xterm-256color-italic" else print_step "Installing terminfo xterm-256color-italic" sudo tic "${HOME}/.dotfiles/terminfo/xterm-256color-italic.terminfo" fi if sudo infocmp "tmux-256color-italic" &>/dev/null; then print_step "Skipped: Installing terminfo tmux-256color-italic" else print_step "Installing terminfo tmux-256color-italic" sudo tic "${HOME}/.dotfiles/terminfo/tmux-256color-italic.terminfo" fi # Configure SSH config if directory_dne "${HOME}/.ssh"; then mkdir -p "${HOME}/.ssh" fi chmod 700 "${HOME}/.ssh" if line_dne "${HOME}/.ssh/config" "Include ~/.dotfiles/ssh/config_global"; then print_step "Including global SSH config" prepend_line "${HOME}/.ssh/config" "Include ~/.dotfiles/ssh/config_global" "" else print_step "Skipped: Including global SSH config" fi chmod 644 "${HOME}/.ssh/config" # Create Global Git Config if file_exists "${HOME}/.gitconfig"; then chmod 664 "${HOME}/.gitconfig" fi # shellcheck disable=SC2088 if [[ "$(git config --global core.attributesfile)" != "~/.dotfiles/git/.gitattributes_global" ]]; then print_step "Including global git attributes" git config --global --add core.attributesfile "~/.dotfiles/git/.gitattributes_global" else print_step "Skipped: Including global git attributes" fi # shellcheck disable=SC2088 if [[ "$(git config --global core.excludesfile)" != "~/.dotfiles/git/.gitignore_global" ]]; then print_step "Including global git ignore" git config --global --add core.excludesfile "~/.dotfiles/git/.gitignore_global" else print_step "Skipped: Including global git ignore" fi # shellcheck disable=SC2088 if [[ "$(git config --global include.path)" != "~/.dotfiles/git/.gitconfig_global" ]]; then print_step "Including global git config" git config --global --add include.path "~/.dotfiles/git/.gitconfig_global" else print_step "Skipped: Including global git config" fi # shellcheck disable=SC2088 if [[ -z "$(git config --global user.email)" ]]; then print_step "Creating global git email" "What is your git email?:" read -r git_email abort_variable_unset "git_email" "${git_email}" git config --global --add user.email "${git_email}" else print_step "Skipped: Creating global git email" fi # shellcheck disable=SC2088 if [[ -z "$(git config --global user.name)" ]]; then print_step "Creating global git name" "What is your git name?:" read -r git_name abort_variable_unset "git_name" "${git_name}" git config --global --add user.name "${git_name}" else print_step "Skipped: Creating global git name" fi } final_setup() { print_stage "Final setup" update_packages if [[ -n "$(${SHELL} -c 'echo "${ZSH_VERSION}"')" ]]; then print_step "Skipped: Changing shell to ZSH" else print_step "Changing shell to ZSH" sudo usermod -s "$(command -v -- zsh)" "${USER}" env zsh -l fi } main() { # Declare local variables local input_arguments local argument_flag local headless_mode local desktop_mode # Initialize local variables input_arguments="${*}" argument_flag="false" headless_mode="disabled" desktop_mode="disabled" # Configure single password prompt at the beginning of the script get_sudo # Check repo for updates before proceeding repo_update if variable_set "${input_arguments}"; then # Process arguments for argument in "${@}"; do argument_flag="true" if [[ "${argument}" == "-?" || "${argument}" == "--help" ]]; then abort_script "Usage:" " $(script_filename) [options]" " -?, --help show list of command-line options" "" "OPTIONS" " -h, --headless force enable headless mode" " -d, --desktop force enable desktop mode" elif [[ "${argument}" == "-h" || "${argument}" == "--headless" ]]; then headless_mode="enabled" elif [[ "${argument}" == "-d" || "${argument}" == "--desktop" ]]; then desktop_mode="enabled" else abort_script "Invalid Argument!" "" "Usage:" " $(script_filename) [options]" " -?, --help show list of command-line options" fi done fi # Determine system type if no arguments given if [[ "${argument_flag}" == "false" ]]; then if has_display; then desktop_mode="enabled" fi headless_mode="enabled" fi initial_setup if [[ "${argument_flag}" == "false" || "${headless_mode}" == "enabled" ]]; then headless_setup fi if [[ "${desktop_mode}" == "enabled" ]]; then desktop_setup fi final_setup # Cleanup single password prompt at the end of the script clear_sudo } main "${*}" exit_script "0"
true
91e5c1f62e9970905156da7f8e75644e940bbaba
Shell
jorgen/build_shell
/examples/rpi-buildroot/scripts/post_build_buildroot
UTF-8
483
3.3125
3
[ "MIT" ]
permissive
#!/bin/sh #We have to make a symlink to where our libraries will be installed #because the pkg-config from buildroot will allways prepend #the sysroot FILE=$2 INSTALL_PATH=$(jsonmod -p arguments.install_path $FILE) echo THIS IS THE POST SCRIPT $INSTALL_PATH if [ ! -d output/staging$INSTALL_PATH ]; then mkdir -p "output/staging$INSTALL_PATH" ln -s $INSTALL_PATH/lib "output/staging$INSTALL_PATH/lib" ln -s $INSTALL_PATH/include "output/staging$INSTALL_PATH/include" fi
true
113c12b8f0111df7d7dbe91ea761b3fb19877089
Shell
Ensembl/ensembl-variation
/nextflow/ProteinFunction/bin/create_aa_substitutions.sh
UTF-8
808
3.734375
4
[ "Apache-2.0" ]
permissive
#!/bin/bash program=${1?:First argument needs to be the program} id=${2?:Second argument needs to be the ID of the peptide} peptide=${3?:Third argument needs to be the peptide string} pos=0 ALL_AAS=(A C D E F G H I K L M N P Q R S T V W Y) for ref in `grep -o . <<< $peptide`; do ((pos+=1)) # ignore non-standard amino acids (e.g. X) when using PolyPhen-2 if [[ $program == "polyphen2" && ! " ${ALL_AAS[*]} " =~ " ${ref} " ]]; then continue fi for alt in ${ALL_AAS[@]}; do if [[ $ref == $alt ]]; then continue elif [[ $program == "sift" ]]; then output="$ref$pos$alt" elif [[ $program == "polyphen2" ]]; then output="$id\t$pos\t$ref\t$alt" else echo "ERROR: specified program is not supported" >&2 exit 1 fi echo -e "$output" done done
true
0be870241bba729bd7b8e306744edf0dbeef2594
Shell
FauxFaux/debian-control
/libp/libpam-ldap/libpam-ldap_186-4_amd64/prerm
UTF-8
169
2.78125
3
[]
no_license
#!/bin/sh set -e if [ "$1" = remove ] && \ [ "$(dpkg-query --show libpam-ldap 2> /dev/null | wc -l)" = 1 ]; then pam-auth-update --package --remove ldap fi exit 0
true
60521473ed5299428eca4b88976d0b9e45079fcc
Shell
cossacklabs/themis
/tests/_integration/encrypt_folder.sh
UTF-8
762
2.859375
3
[ "LicenseRef-scancode-proprietary-license", "Apache-2.0" ]
permissive
#!/usr/bin/env bash set -eu HOST_NAME=$1 TEST_OUT=./tests/out/$HOST_NAME mkdir -p $TEST_OUT echo ".. encrypting data from $HOST_NAME in folder $TEST_OUT" ruby ./tools/ruby/scell_seal_string_echo.rb "enc" "pass" "test seal: pass" > $TEST_OUT/scell_seal.txt ruby ./tools/ruby/scell_seal_string_echo.rb "enc" "pass" "test seal context: pass" "somecontext" > $TEST_OUT/scell_seal_context.txt ruby ./tools/ruby/scell_context_string_echo.rb "enc" "pass" "test context imprint: pass" "somecontext" > $TEST_OUT/scell_context_impr.txt ruby ./tools/ruby/scell_token_string_echo.rb "enc" "pass" "test token: pass" > $TEST_OUT/scell_token.txt ruby ./tools/ruby/scell_token_string_echo.rb "enc" "pass" "test token: pass" "somecontext" > $TEST_OUT/scell_token_context.txt
true
ed469457c85ddf8ecea9d827aa401c86af6f9cb9
Shell
tklebanoff/rust-musl-builder
/ar_wrapper
UTF-8
307
2.859375
3
[ "Apache-2.0", "MIT" ]
permissive
#!/bin/bash export LTO_PLUGIN="/usr/local/musl/libexec/gcc/x86_64-linux-musl/8.3.0/liblto_plugin.so.0.0.0" args=() for arg in "$@"; do args+=("$arg") done args+=("--plugin" "${LTO_PLUGIN}") echo "AR_WRAPPER, RUNNING WITH ARGS: ${args[@]}" /usr/local/musl/bin/x86_64-linux-musl-ar-real "${args[@]}"
true
9423ed7a620a4d9070c44e88063097725b024ca8
Shell
h-oikawa-jp/dotfiles
/linux/zsh/.zsh/completion.zsh
UTF-8
2,516
2.875
3
[ "MIT" ]
permissive
# ----------------------------- # Completion # ----------------------------- fpath=(~/.zsh/completion $fpath) # 自動補完を有効にする autoload -Uz compinit ; compinit -u # 単語の入力途中でもTab補完を有効化 setopt complete_in_word # コマンドミスを修正 setopt correct # 補完の選択を楽にする #zstyle ':completion:*' menu select zstyle ':completion:*' menu select=long # 補完候補をできるだけ詰めて表示する setopt list_packed # キャッシュの利用による補完の高速化 zstyle ':completion::complete:*' use-cache true # 補完候補に色つける autoload -U colors ; colors ; zstyle ':completion:*' list-colors "${LS_COLORS}" #zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS} #eval "$(dircolors -b)" #zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS} #zstyle ':completion:*' list-colors '' # 大文字・小文字を区別しない(大文字を入力した場合は区別する) #zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}' zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*' # manの補完をセクション番号別に表示させる zstyle ':completion:*:manuals' separate-sections true # sudo の後ろでコマンド名を補完する zstyle ':completion:*:sudo:*' command-path /usr/local/sbin /usr/local/bin \ /usr/sbin /usr/bin /sbin /bin /usr/X11R6/bin # ps コマンドのプロセス名補完 zstyle ':completion:*:processes' command 'ps x -o pid,s,args' # --prefix=/usr などの = 以降でも補完 setopt magic_equal_subst # pip function _pip_completion { local words cword read -Ac words read -cn cword reply=( $( COMP_WORDS="$words[*]" \ COMP_CWORD=$(( cword-1 )) \ PIP_AUTO_COMPLETE=1 $words[1] ) ) } compctl -K _pip_completion pip compctl -K _pip_completion pip3 zstyle ':completion:*' auto-description 'specify: %d' zstyle ':completion:*' completer _expand _complete _correct _approximate zstyle ':completion:*' format 'Completing %d' zstyle ':completion:*' group-name '' zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s zstyle ':completion:*' use-compctl false zstyle ':completion:*' verbose true zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31' zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
true
d0f9dafa1ebf7ba4f46f8135bf6334effa30653d
Shell
gbraad/automate-everything
/playbookwrapper
UTF-8
375
3
3
[]
no_license
#!/bin/sh APTPKGS="ansible" RPMPKGS="ansible" if [ ! -x "/usr/bin/ansible-playbook" ]; then # Crude multi-os installation option if [ -x "/usr/bin/apt-get" ] then sudo apt-get install -y $APTPKGS elif [ -x "/usr/bin/dnf" ] then sudo dnf install -y $RPMPKGS elif [ -x "/usr/bin/yum" ] then sudo yum install -y $RPMPKGS fi fi ansible-playbook $@
true