blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d3bb3270f200874b7280ad655a7bd4134f80059e
|
Shell
|
HERA-Team/hera-images
|
/test-librarian/launch.sh
|
UTF-8
| 2,161
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# Copyright 2015-2016 the HERA Collaboration
# Licensed under the MIT License.
#
# We need to set up the runtime configuration of the Librarian server. We must
# be called with an argument giving the name of the Librarian database to use.
if [ -z "$1" ] ; then
echo >&2 "error: you must specify the name of the librarian database to use"
exit 1
fi
set -e
for f in /opt/conda/etc/conda/activate.d/* ; do
source "$f"
done
/hera/fill-configs.sh
cd /hera/librarian/server
if [ "$1" = onsite ] ; then
mc_flag=true
else
mc_flag=false
fi
cat <<EOF >test-$1-config.json
{
"server": "tornado",
"n_server_processes": 2,
"n_worker_threads": 2,
"SECRET_KEY": "7efa9258e0b841eda8a682ccdd53c65d493a7dc4b95a5752b0db1bbbe96bd269",
"SQLALCHEMY_DATABASE_URI": "postgresql://postgres:$HERA_DB_PASSWORD@db:5432/hera_librarian_$1",
"SQLALCHEMY_TRACK_MODIFICATIONS": false,
"host": "0.0.0.0",
"displayed_site_name": "$1",
"flask_debug": true,
"report_to_mandc": $mc_flag,
"obsid_inference_mode": "_testing",
"local_disk_staging": {
"ssh_host": "localhost",
"dest_prefix": "/data/stagedest",
"displayed_dest": "Test Directory",
"username_placeholder": "your username",
"chown_command": ["echo", "chown"]
},
"sources": {
"RTP": {
"authenticator": "9876543210"
},
"correlator": {
"authenticator": "9876543211"
},
"Karoo": {
"authenticator": "9876543212"
},
"HumanUser": {
"authenticator": "human"
}
},
"add-stores": {
"local": { "path_prefix": "/data", "ssh_host": "localhost" },
"${1}pot": { "path_prefix": "/data", "ssh_host": "${1}pot" }
}
}
EOF
# We need to wait for the database to be ready to accept connections before we
# can start. This is a simple (but hacky) way of doing this:
host=db
port=5432
while true ; do
(echo >/dev/tcp/$host/$port) >/dev/null 2>&1 && break
echo waiting for database ...
sleep 1
done
# Make sure the database schema is up-to-date and go.
export LIBRARIAN_CONFIG_PATH=test-$1-config.json
alembic upgrade head
exec ./runserver.py
| true
|
f57d942774bacd4b4d4b1a8a2e2633ca88092e8f
|
Shell
|
brianlan/deb-deps-downloader
|
/download.sh
|
UTF-8
| 1,225
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
ARCHIVE_DIR="/var/cache/apt/archives"
if [ $# -lt 2 ]
then
echo "Invalid number of arguments. Usage: download.sh <ubuntu_version> <package_name> [in_china]"
exit 1
fi
UBT_VER=$1; shift;
PACKAGE=$1; shift;
IN_CHINA=$1; shift;
TAR_PATH="${PACKAGE}-${UBT_VER}.tar.gz"
if [ "${IN_CHINA}" == "in_china" ]
then
dockerfilename="Dockerfile.china"
else
dockerfilename="Dockerfile"
fi
echo "start to build docker image"
docker build -t tmp_deb_img --build-arg ubuntu_version=${UBT_VER} -f ${dockerfilename} .
if [ $? -ne 0 ]
then
echo "Error found during building docker image. Process aborted and wait for cleaning."
exit 1
fi
echo "start to download deb packages and then put them into a tar file"
docker run --rm -v tmp_deb:${ARCHIVE_DIR} -v $(pwd):/__tmp__ tmp_deb_img \
/bin/bash -c "apt-get install -y --download-only ${PACKAGE}; tar -C ${ARCHIVE_DIR} -czvf /__tmp__/${TAR_PATH} ./"
#docker run -d -v tmp_deb:${ARCHIVE_DIR} -v $(pwd):/__tmp__ tmp_deb_img \
# /bin/bash -c "apt-get install -y --download-only ${PACKAGE}; tail -f /dev/null"
echo "deleting temp docker volume"
docker volume rm tmp_deb
echo "deleting temp docker image"
docker rmi tmp_deb_img
| true
|
c112db5eb5b46d91ae45ee81fbdfefbc44c4e733
|
Shell
|
seaglex/dots
|
/.bashrc
|
UTF-8
| 362
| 2.546875
| 3
|
[] |
no_license
|
# Useful bash script append to ~/.bashrc
# Set git autocompletion and PS1 integration
if [ -f /usr/share/doc/git-1.7.1/contrib/completion/git-completion.bash ]; then
. /usr/share/doc/git-1.7.1/contrib/completion/git-completion.bash
fi
export GIT_PS1_SHOWDIRTYSTATE=true
export GIT_PS1_SHOWUNTRACKEDFILES=true
export PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
| true
|
9fab5f99cdefba56907b644eda8d75631efc342a
|
Shell
|
nitinkk13/jenkins-project
|
/systeminfo.sh
|
UTF-8
| 391
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
#Author: Nitin Kumar
Name=`whoami`
ID=`id -u`
Kernel=`uname`
Version=`uname -r`
current_dir=`pwd`
echo -e "************System Information*************** \n Login Name: $Name \n Login ID: $ID
Working Kernel Name: $Kernel
Working Kernel Version: $Version
Current Working Directory: $current_dir
***************************************"
echo "Checking SCM Poll in Jenkins Project."
| true
|
4b7797170677e812eab05416684610f79a2a3ec0
|
Shell
|
lendup/heroku-buildpack-ironbank
|
/bin/release
|
UTF-8
| 512
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
BUILD_DIR="$1"
if [ ! -f "$BUILD_DIR/Procfile" ]; then
cat <<EOF
default_process_types:
web: ./apache-maven/bin/mvn -P '!local' --settings ./assets/settings.xml -Dmaven.repo.local=./.m2_repository play:run -Dplay.httpPort=\$PORT -Dplay.id=\$PLAY_ID -Dplay.serverJvmArgs="$JAVA_OPTS"
worker: ./apache-maven/bin/mvn -P '!local' --settings ./assets/settings.xml -Dmaven.repo.local=./.m2_repository play:run -Dplay.httpPort=\$PORT -Dplay.id=\$PLAY_ID -Dplay.serverJvmArgs="$JAVA_OPTS"
EOF
fi
| true
|
2938c983e9cc9847ab89096869ad7bd6cd70e314
|
Shell
|
modernish/modernish
|
/share/doc/modernish/examples/sort-music.sh
|
UTF-8
| 4,116
| 3.90625
| 4
|
[
"ISC",
"CC0-1.0",
"BSD-3-Clause"
] |
permissive
|
#! /usr/bin/env modernish
#! use safe -k
#! use sys/cmd
#! use var/assign
#! use var/loop
#! use var/string
harden -p cut
harden -e '>1' ffprobe # ffprobe comes with the ffmpeg package
harden -p -t mkdir
harden -p -t ln
harden -p printf
PATH=/dev/null # more bug-proof: only allow hardened external commands
# Have a disk or directory with scattered or disorganised music files? This
# script searches a directory for audio/music files, and hardlinks or symlinks
# all files found into a new directory hierarchy, organised by artist, album,
# track number and title, based on the metadata read from the files. Your
# original files are not touched. This way, your files are organised
# non-destructively without changing their original names or locations.
#
# This script shows off the modernish 'find' shell loop. With the regular
# 'find' utility, this is nearly impossible to implement correctly and robustly
# in shell. With modernish, processing is correct and robust by default for
# all file names, no matter how weird (spaces, newlines, etc.).
#
# Among other things, this script also demonstrates string processing, portable
# process substitution, and safe assignment of variables by reference.
#
# To read the metadata, the script uses the 'ffprobe' command from ffmpeg.
#
# By Martijn Dekker <martijn@inlv.org>, February 2019. Public domain.
# ____ configuration ____
# where to search for your music
musicdir=~/Music
# where to store organised links
sorteddir=~/Music/sorted
# ERE describing the filename extensions of files to be processed, in lowercase
extensions='^mp[34]$|^m4a$|^ogg$|^wma$|^flac$|^aiff?$|^wav$'
# set to 'y' to create symlinks instead of hardlinks
symlinks='y'
# ____ initialisation ____
is dir $musicdir || exit 2 "$musicdir doesn't exist"
is dir $sorteddir || mkdir -p $sorteddir
if str eq $symlinks 'y'; then
ln_opt='-s'
else
ln_opt=''
if not is onsamefs $musicdir $sorteddir; then
exit 2 "$musicdir and $sorteddir are on different file" \
"systems; hardlinks would be impossible"
fi
fi
# ____ main program ____
processedfiles=0
totalfiles=0
LOOP find musicfile in $musicdir \
-path $sorteddir -prune -or -xdev -type f -iterate
DO
let "totalfiles += 1"
# Determine if we should process this file.
extension=${musicfile##*.}
str eq $extension $musicfile && continue # no extension
tolower extension
str ematch $extension $extensions || continue
# Initialise tag variables.
artist=''
album=''
title=''
track=''
# Read metadata from ffprobe output using process substitution. Lines
# from ffprobe are in the form "TAG:artist=Artist name here", etc.;
# remove initial TAG: and treat the rest as variable assignments.
while read -r tag; do
assign ${tag#TAG:}
done < $( % ffprobe -loglevel 16 \
-show_entries format_tags=artist,album,title,track \
-of default=noprint_wrappers=1:nokey=0 \
$musicfile )
# Make artist, album, title and track number suitable for file names.
# ...fill in if empty
str empty $artist && artist='_unknown_artist'
str empty $album && album='_unknown_album'
str empty $title && title=${musicfile##*/} && title=${title%.*}
# ...replace any directory separators (/)
replacein -a artist '/' '\'
replacein -a album '/' '\'
replacein -a title '/' '\'
# ...remove any leading and trailing whitespace
trim artist
trim album
trim title
# ...remove any initial 'The ' from artist name(s) and limit length
artist=${artist#[Tt][Hh][Ee][ _-]}
let "${#artist} > 32" && artist=$(putln $artist | cut -c 1-29)...
# ...format track number, if any
track=${track%%/*} # remove total number of tracks
str isint $track && track=$(printf '%02d ' $track) || track=
# Determine and check the new path.
newdir=$sorteddir/$artist/$album
newname=$track$title.${musicfile##*.}
if not is dir $newdir; then
mkdir -p $newdir
elif is present $newdir/$newname; then
putln "WARNING: skipping duplicate: $newdir/$newname" >&2
continue
fi
# Hardlink or symlink the original to the new path.
ln $ln_opt $musicfile $newdir/$newname
let "processedfiles += 1"
DONE
putln "$processedfiles out of $totalfiles files processed"
| true
|
8d71afa3ddb80379f4ce39c7772e06f46cc76a0b
|
Shell
|
josefglatz/dotfiles
|
/zsh_custom/functions.zsh
|
UTF-8
| 6,265
| 3.8125
| 4
|
[] |
permissive
|
# Loads `.env` file from a filename passed as an argument
loadenv() {
while read line; do
if [ "${line:0:1}" = '#' ]; then
continue # comments are ignored
fi
export $line > /dev/null
done < "$1"
echo 'Loaded!'
}
# broot (brew) https://dystroy.org/broot/install-br/
# --------------------------------------------------
# Shell Function for broot -> br got via `broot --print-shell-function zsh`
# This script was automatically generated by the broot program
# More information can be found in https://github.com/Canop/broot
# This function starts broot and executes the command
# it produces, if any.
# It's needed because some shell commands, like `cd`,
# have no useful effect if executed in a subshell.
function br {
f=$(mktemp)
(
set +e
broot --outcmd "$f" "$@"
code=$?
if [ "$code" != 0 ]; then
rm -f "$f"
exit "$code"
fi
)
code=$?
if [ "$code" != 0 ]; then
return "$code"
fi
d=$(<"$f")
rm -f "$f"
eval "$d"
}
# Homebrew Upgrade for greedy formulars/casks
# -------------------------------------------------
# just pass an existing homebrew formulae/cask name
function brew-upgrade-greedy {
echo "${YELLOW}Upgrading greedy cask \"$1\" ${RED} without any warranty!${RESET}"
brew upgrade --greedy $1
}
# trash with information
# ----------------------
# wrapper for trash
function move-to-trash {
if test $(brew --prefix)/bin/trash; then
echo "${YELLOW}Moving following items to 🗑 ${RESET} (trash):"
$(brew --prefix)/bin/trash -v "$@" | column
echo "\n(List all trash items with command ${YELLOW}trash -l${RESET})\n"
fi
}
if test $(which fzf); then
# fh - repeat history (with fzf)
# ------------------------------
# a fuzzy finder powered history
fh() {
print -z $( ([ -n "$ZSH_NAME" ] && fc -l 1 || history) | fzf +s --tac | sed -E 's/ *[0-9]*\*? *//' | sed -E 's/\\/\\\\/g')
}
# find-in-file - usage: fif <SEARCH_TERM>
# ---------------------------------------
fif() {
if [ ! "$#" -gt 0 ]; then
echo "Need a string to search for!";
return 1;
fi
rg --hidden --files-with-matches --no-messages "$1" | fzf $FZF_PREVIEW_WINDOW --preview "rg --ignore-case --pretty --context 10 '$1' {}"
}
alias find-in-file="fif"
# fda - including hidden directories
# ---------------------------------------------
# https://github.com/junegunn/fzf/wiki/examples
fda() {
local dir
dir=$(find ${1:-.} -type d 2> /dev/null | fzf +m) && cd "$dir"
}
alias find-directory-all="fda"
fi
# lazygit "alias" with auto cd after quiting application support
# --------------------------------------------------------------
# https://github.com/jesseduffield/lazygit/#changing-directory-on-exit
lg()
{
export LAZYGIT_NEW_DIR_FILE=~/.lazygit/newdir
lazygit "$@"
if [ -f $LAZYGIT_NEW_DIR_FILE ]; then
cd "$(cat $LAZYGIT_NEW_DIR_FILE)"
rm -f $LAZYGIT_NEW_DIR_FILE > /dev/null
fi
}
# "g" alias for GIT with fallback to git status
# --------------------------------------------------------
# No arguments: `git status`
# With arguments: acts like `git`
# https://github.com/thoughtbot/dotfiles
unalias g
g() {
if [[ $# -gt 0 ]]; then
git "$@"
else
git status -sb
fi
}
# Find installed app by app name oder bundleId
# -----------------------------------------------------------------------------------
# Example: whichapp finder # -> '/System/Library/CoreServices/Finder.app/'
# Example: whichapp com.apple.finder # -> '/System/Library/CoreServices/Finder.app/'
# https://stackoverflow.com/a/12900116
whichapp() {
local appNameOrBundleId=$1 isAppName=0 bundleId
# Determine whether an app *name* or *bundle ID* was specified.
[[ $appNameOrBundleId =~ \.[aA][pP][pP]$ || $appNameOrBundleId =~ ^[^.]+$ ]] && isAppName=1
if (( isAppName )); then # an application NAME was specified
# Translate to a bundle ID first.
bundleId=$(osascript -e "id of application \"$appNameOrBundleId\"" 2>/dev/null) ||
{ echo "$FUNCNAME: ERROR: Application with specified name not found: $appNameOrBundleId" 1>&2; return 1; }
else # a BUNDLE ID was specified
bundleId=$appNameOrBundleId
fi
# Let AppleScript determine the full bundle path.
fullPath=$(osascript -e "tell application \"Finder\" to POSIX path of (get application file id \"$bundleId\" as alias)" 2>/dev/null ||
{ echo "$FUNCNAME: ERROR: Application with specified bundle ID not found: $bundleId" 1>&2; return 1; })
printf '%s\n' "$fullPath"
# Warn about /Volumes/... paths, because applications launched from mounted
# devices aren't persistently installed.
if [[ $fullPath == /Volumes/* ]]; then
echo "NOTE: Application is not persistently installed, due to being located on a mounted volume." >&2
fi
}
# Find bundleId of given app
# -----------------------------------------------------------
# Example: bundleid finder # -> 'com.apple.finder'
# https://stackoverflow.com/a/12900116
bundleid() {
osascript -e "id of application \"$1\"" 2>/dev/null ||
{ echo "$FUNCNAME: ERROR: Application with specified name not found: $1" 1>&2; return 1; }
}
# Use Ctrl-x,Ctrl-l to get the output of the last command
# -------------------------------------------------------
# Based on https://github.com/skwp/dotfiles/commit/2ad786a41b29277530ebb4c50bcb65f3971d4901
zmodload -i zsh/parameter
insert-last-command-output() {
LBUFFER+="$(eval $history[$((HISTCMD-1))])"
}
zle -N insert-last-command-output
bindkey "^X^L" insert-last-command-output
# Load other functions
# ---------------------------------------------
# E.g for overriding existing functions locally
# which is not part of dotfiles repository
# Source ~/.dotfiles_local/.functions_local (if file exists)
if [[ -f "$DOTFILES_DIRECTORY_LOCAL/.functions_local" ]]; then
source "$HOME/.functions_local"
fi
# Source files in ~/.dotfiles_local/functions (if folder exists)
if [[ -d "$DOTFILES_DIRECTORY_LOCAL/functions" ]]; then
for file in "$(find $DOTFILES_DIRECTORY_LOCAL/functions -maxdepth 1 -name '*.zsh' -print -quit)"; do source $file; done
fi
# Source ~/.functions_local (if file exists)
if [[ -f "$HOME/.functions_local" ]]; then
source "$HOME/.functions_local"
fi
| true
|
3553ba500acb5614ab4ccc9b58e9fe0948762c1b
|
Shell
|
StevenWColby/scripts
|
/.bash_aliases
|
UTF-8
| 3,197
| 2.640625
| 3
|
[] |
no_license
|
# alias cls='clear'
alias csl='csmake --list-commands ; csmake --list-phases'
# alias drmc="docker rm `docker ps -a -q --filter status=exited`"
# alias drmi="docker images | awk 'NR>1 {print $3}' | xargs docker rmi"
# alias di='docker images'
# alias dla='docker ps -a; echo ''; docker images -a ; echo ----- dangling ------ ; docker images -a -f dangling=true'
alias dla='docker ps -a; echo ''; docker images -a; echo ''; docker network ls'
alias dl='docker ps; echo ''; docker images'
# alias dps='docker ps -a'
alias ssd="systemctl status 'docker*' --no-pager --full"
alias ds='dirs -v'
alias envp='env | grep -v "LS_COLORS" | grep -i "proxy" | grep -v 'UBUNTU_MENUPROXY' | sort'
alias envs='env | grep -v "LS_COLORS" | sort'
alias envsl='env | grep -v "LS_COLORS" | sort | less'
alias envg='env | grep -v "LS_COLORS" | grep -i "go" '
alias gba='git branch --all -vv'
alias gb='git branch -vv --list'
alias gca='git commit --amend'
alias gds='git diff --staged'
alias gg='gitg'
alias gh='history | grep'
# alias gitd='git checkout hp/cs/9.0-master; git branch -vv --list'
# alias gitm='git checkout hp/cs/10.0-master; git branch -vv --list'
# alias gitm='git checkout hp/cs/10.1-master; git branch -vv --list'
alias gitm='git checkout master; git branch -vv --list'
# alias gitm='git checkout hp/cs/master; git branch -vv --list'
# alias gitm='git checkout master'
alias glga='git log --graph --all --decorate --oneline'
alias glg='git log --graph --decorate'
alias glgo='git log --graph --decorate --oneline'
alias glo='git log --oneline'
alias gls='git log --stat -n 1 -p'
alias grso='git remote show origin'
alias gsl='git stash list'
alias gsm='git submodule update --init'
alias gss='git submodule status'
alias gst='git branch -vv ; git status'
alias gsti='git status --ignored'
alias gup='git up'
alias ha='history -a # write (append) recent commands to history file'
alias h='history'
alias hh='history 20'
alias hr='history -r # read (re-read) history file'
alias la='ls -A'
alias lld='ls -lF'
alias ll='ls -alF'
# alias llr='ls -alR'
# alias l='ls -CF'
alias lpath='echo $PATH | tr ":" "\n"'
alias lsl='ls -l'
alias md='mkdir'
alias mr='make reallyclean'
alias mrma='make reallyclean; make all'
# alias mvni='mvn -Dmaven.test.skip=true install'
# alias mvnp='mvn -Dmaven.test.skip=true package'
# alias mvnt='mvn test'
# alias mvntp='mvn package'
alias ose="env | sort | grep 'OS_'"
alias p3='python3'
alias pd='pushd'
alias rot='pushd +1 >/dev/null'
# alias rrot='pushd -1 >/dev/null'
alias sba='source ./.venv/bin/activate'
alias sd='sdparam'
alias tl='tree -C | less -r'
alias vah='source ./here/bin/activate'
alias vc='virsh console'
alias vh='virtualenv ./here'
alias vik='vi ~/.ssh/known_hosts'
# alias psf='ps -ef --forest'
alias psf='ps -f --ppid 2 --deselect --forest'
# A pretty 'find' command which ignores .svn directories
function pfind() {
find $2 -type f -not -iwholename '*.svn*' -not -iwholename '*.git*' -exec grep -i --color=always "$1" {} \; -printf '\033[1;32m %p \033[0m \n\n\n';
}
# export -f pfind
# A function to enable a 'sd' alias like 'pushd +$1' - there is no way to directly parameterize a bash alias.
function sdparam() {
pushd +$1 >/dev/null
}
| true
|
b644d0c9c61df45d63d51854919609b42af230f8
|
Shell
|
antsman/rpi-jenkins
|
/get-versions.sh
|
UTF-8
| 612
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# Outputs versions found in specified container, saves same also
# in properties file for later use in Jenkinsfile
VERSIONS=version.properties
test -n "$1" && (
echo JENKINS_VERSION=\"$(docker exec -t $1 cat /data/war/META-INF/MANIFEST.MF | grep Jenkins-Version | awk '{ print $2 }' | tr -d '\r')\"
echo JAVA_VERSION=\"$(docker exec -t $1 java -version | grep version | awk -F\" '{ print $2 }')\"
echo DOCKER_VERSION=\"$(docker exec -t $1 docker --version | grep version | awk '{ print $3 }' | tr -d ',')\"
) | tee $VERSIONS || (
echo Give container name as parameter!
exit 1
)
| true
|
7970b9029b69d51297d4a9754b99a28cccd56be5
|
Shell
|
emoore24/dotfiles
|
/install.sh
|
UTF-8
| 1,853
| 3.421875
| 3
|
[] |
no_license
|
echo "PREREQUISITES: "
echo "Make sure you have installed ZSH and Neovim"
read -p "Press Enter to Continue, or Ctrl-C to exit" continuevar
BUNDLE_DIR=~/.vim/bundle
# "dirname" gives directory name of script (BASH_SOURCE). cd to directory and run pwd to get directory of this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "Running in $SCRIPT_DIR"
echo "Symlinking files..."
#Symlink files
ln -sfv "$SCRIPT_DIR/.vimrc" ~ # vimrc
ln -sfv "$SCRIPT_DIR/.tmux.conf" ~ # tmux.conf
ln -sfv "$SCRIPT_DIR/.vimrc" ~/.config/nvim/init.vim # neovim
ln -sfv "$SCRIPT_DIR/.zshrc" ~ # zsh
# symlink vim bundle directory with neovim. vimrc takes care of detecting
# the difference and doing the right thing.
mkdir -pv ~/.vim
ln -sfv ~/.vim ~/.config/nvim
echo "Instaling vim-plug..."
# Install vim-plug
# Vim
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Neovim
curl -fLo ~/.local/share/nvim/site/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
echo "instaling Neovim providers..."
sudo python3 -m pip install --user --upgrade pynvim
gem install neovim
echo "PlugInstall vim-plug bundles"
# Install vim-plug bundles
vim +PlugInstall +qall
nvim +PlugInstall +qall
echo "Install NVM..."
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | zsh
echo "Installing OhMyZsh..."
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
echo "Things to do:"
echo "1. Restart shell & npm install neovim, typescript, javascript-typescript-langserver"
echo "2. Install Homebrew"
echo "3. Set up zsh using example zshrc in repo."
echo "4. Set up git ssh key: https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh"
cd -
| true
|
7228a6089719649d5a1e247508352d468e11ff7e
|
Shell
|
tlockney/home
|
/osx_install.sh
|
UTF-8
| 949
| 2.875
| 3
|
[] |
no_license
|
BASE_PACKAGES=$(cat <<EOF
ansible
awscli
bash
cmake
coreutils
curl
emacs
gcc
gh
git
gnupg
gzip
htop
hugo
jq
mosh
nvm
pandoc
pipenv
pipx
pyenv
tmux
tree
unzip
wget
zsh
zsh-completions
EOF
)
CASK_PACKAGES=$(cat <<EOF
1password
1password-cli
alfred
docker
emacs
font-fira-code
google-chrome
hazel
istat-menus
iterm2
moom
EOF
)
# Check whether Homebrew is installed and install it if it's missing
which brew 2>&1 > /dev/null
if [ $? -eq 1 ]; then
echo "Installing Homebrew"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
brew tap homebrew/core
brew tap homebrew/cask-fonts
brew tap homebrew/services
brew tap homebrew/cask
brew tap homebrew/cask-versions
brew install $BASE_PACKAGES || true
brew install --cask $CASK_PACKAGES || true
| true
|
c5fbf31a1d1983f36d5e46c40e63cd9b9661bd75
|
Shell
|
VeritoneAlpha/zookeeper-docker
|
/conf/run.sh
|
UTF-8
| 817
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# This is the base process of the container.
init()
{
chown zookeeper:zookeeper /var/lib/zookeeper/
service zookeeper-server init
}
# Start zookeeper and tail logs to keep PID 1 going.
start_container()
{
init
start_zookeeper
sleep 5
tail -f /var/log/zookeeper/*.out
}
# Start the tomcat service
start_zookeeper()
{
# Start the Zookeeper service
echo -e "\n---- Luanching Zookeeper ----\n"
service zookeeper-server start
}
# Stop the tomcat service
stop_zookeeper()
{
service zookeeper-server stop
}
# Startup the container
if [ -z $1 ]; then
start_container
fi
# Start
if [ "$1" == "start" ]; then
start_zookeeper
fi
# Stop
if [ "$1" == "stop" ]; then
stop_zookeeper
fi
# Restart
if [ "$1" == "restart" ]; then
stop_zookeeper
sleep 2
start_zookeeper
fi
| true
|
bebd31b6a68dd5348122aae776647a367aec89c3
|
Shell
|
endreszabo/raklap
|
/.raklaputils.sh
|
UTF-8
| 642
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
set -o errexit
_mount_device() {
sudo mount -v -t vfat "${RAKLAP_BLOCKDEVICE}" "${RAKLAP_MOUNTPOINT}" -o "${RAKLAP_MOUNTOPTS}"
}
_umount_device() {
sudo umount -v "${RAKLAP_BLOCKDEVICE}"
}
_format_device() {
sudo mkfs.vfat -n RAKLAP -i "${1:-00000000}" -v -- "${RAKLAP_BLOCKDEVICE}"
}
_wipe_device() {
mountpoint -- "${RAKLAP_MOUNTPOINT}" && find "${RAKLAP_MOUNTPOINT}" -type f -print0 | xargs --null --no-run-if-empty -- shred --verbose --random-source=/dev/urandom -n1 --
}
_clean_device() {
mountpoint -- "${RAKLAP_MOUNTPOINT}" || _mount_device
_wipe_device
_umount_device
_format_device "${1:-00000000}"
sync
}
| true
|
7531f70af958fb2c619c60de869ec011945e39e7
|
Shell
|
FabianGabor/Shell-programming
|
/walesiStat.sh
|
UTF-8
| 431
| 3.3125
| 3
|
[] |
no_license
|
#! /bin/bash
sorokSzama=`wc -l $1`
szavakSzama=`wc -w $1`
karakterekSzama=`wc -m $1`
sorokSzama=${sorokSzama:0:3}
szavakSzama=${szavakSzama:0:4}
karakterekSzama=${karakterekSzama:0:5}
echo "Sorok szama: $sorokSzama"
echo "Szavak szama: $szavakSzama"
echo "Karakterek szama: $karakterekSzama"
echo "Atlagos soronkenti szoszam: $((szavakSzama/sorokSzama))"
echo "Atlagos soronkenti karakterszam: $((karakterekSzama/sorokSzama))"
| true
|
431541bada67640382cd65532759ae5414eac850
|
Shell
|
mohitekv/ShellBasic
|
/sequencial/unitconverter.sh
|
UTF-8
| 169
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
ft=12
ftCon=$((ft*42))
echo 42in = $ftCon ft
w=60
l=40
area=$(($w*$l))
echo area of 60 *40 feet plot is : $area
echo area of 25 such plots are $((25*$area))
| true
|
ef6e3d274e406409ea3894a14df627d03f3f2601
|
Shell
|
mpvelarde/django-wedding-website
|
/deploy/gunicorn/gunicorn_start
|
UTF-8
| 554
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
NAME="wedding-website"
DIR=[path to project]
USER=[user]
GROUP=[group]
WORKERS=3
BIND=unix:[path to socket]
DJANGO_SETTINGS_MODULE=bigday.settings
DJANGO_WSGI_MODULE=bigday.wsgi
LOG_LEVEL=error
LOG_FILE=[path to log file]
cd $DIR
. venv/bin/activate
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DIR:$PYTHONPATH
exec venv/bin/gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $WORKERS \
--user=$USER \
--group=$GROUP \
--bind=$BIND \
--log-level=$LOG_LEVEL \
--log-file=$LOG_FILE
| true
|
293d0bda8de54bc9207f55f69e513f5cdcd28726
|
Shell
|
nmzuo/Act-L-Act-network
|
/feat_level1_RL_fslsub.sh
|
UTF-8
| 339
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
nSubj=30 ## total RL: 467
for sec in `seq 1 1 $nSubj`
do
sec1=${sec}
sec2=`echo "$sec+45-1"|bc -l `
if ([ $sec2 -gt $nSubj ])
then
sec2=$nSubj
fi
sec2=${sec}
echo "nStart:nEnd " $sec1 $sec2
/mnt/software/fsl5.0/bin/fsl_sub bash feat_level1_RL.sh $sec1 $sec2
done
echo "Finished"
| true
|
63cd4a00a1c728150ff6f453aba64f9bcf7ae26b
|
Shell
|
VinceBarresi/random-gif
|
/random_gif.sh
|
UTF-8
| 171
| 2.8125
| 3
|
[] |
no_license
|
function get_gif {
url="http://api.giphy.com/v1/gifs/random?&api_key=dc6zaTOxFJmzC"
gif=$(curl $url | jq -r '.data.url')
gif=$gif/fullscreen
open $gif
}
get_gif
| true
|
b0beebdc27e3fe8709cbfefa5dcab172051c9da5
|
Shell
|
Rican7/dotfiles
|
/local/bin/emux
|
UTF-8
| 3,139
| 4.28125
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
#
# Script to wrap the most common "tmux" functionality. Like a super-alias.
#
#
# Define constants
readonly STARTUP_SCRIPT_DIR="$(printf "%s.d" "${BASH_SOURCE[0]}")"
# Create a function to echo out errors
function error() {
# Let's get our arguments
local message=$1
# Echo out our message
echo -e "\033[00;31mERROR:\033[00m $message" 1>&2
}
# Perform a TMUX command in wrapper that allows the command to work even if a
# TMUX server isn't currently running.
#
# (We have to use `tmux start-server` to run certain commands if a session
# doesn't exist yet... See: https://github.com/tmux/tmux/issues/182)
function tmux_command() {
tmux start-server \; "$@"
}
# Create a function to check if a tmux session exists
function session_exists() {
# Get the session id argument
local sessionArg=$1
# Did we pass an argument?
if [ -n "$sessionArg" ]; then
tmux_command has-session -t "$sessionArg" 2>/dev/null
else
tmux_command has-session 2>/dev/null
fi
}
function get_start_script_path() {
printf "%s/%s" "$STARTUP_SCRIPT_DIR" "$1"
}
# Let's define what commands exist
hash tmux 2>/dev/null && tmux=true || tmux=false
# Do we NOT have tmux?
if ! $tmux ; then
# Echo error and exit with a non-successful (non-zero) response
error "\"tmux\" not installed or unable to be executed";
exit 1;
fi
# Define our start script
start_script_path=""
start_script=""
# Let's set some values based on the parameters
while getopts ":r:" opt; do
case $opt in
r) start_script_path="$(get_start_script_path "$OPTARG")";;
\?) error "Unknown option: -$OPTARG"; exit 1;;
:) error "Invalid option: -$OPTARG requires an argument"; exit 1;;
esac
done
shift $((OPTIND -1))
if [ -n "$start_script_path" ] && [ ! -x "$start_script_path" ]; then
# Echo error and exit with a non-successful (non-zero) response
error "Start script \"$start_script_path\" does not exist or is unable to be executed";
exit 1;
fi
if [ -n "$start_script_path" ]; then
# Determine the default TMUX command
default_tmux_command="$(tmux_command show-options -gv default-command)"
default_tmux_command="${default_tmux_command:-$(tmux_command show-options -gv default-shell)}"
start_script="tmux source-file $start_script_path ; $default_tmux_command"
fi
# Grab our passed argument
readonly session_id=$1
# Did we pass an argument?
if [ -n "$session_id" ]; then
# See if we have an existing session matching our passed name
if session_exists "$session_id"; then
exec tmux attach-session -t "$session_id"
# We don't have an existing session with our passed name
else
if [ -n "$start_script" ]; then
exec tmux new-session -s "$session_id" "$start_script"
else
exec tmux new-session -s "$session_id"
fi
fi
# No argument passed
else
# See if we have any existing sessions
if session_exists; then
# Attach to the most recent session and interactively list our sessions
exec tmux attach-session\; choose-tree -s -Z
# No existing sessions
else
# Just create a new, default session
if [ -n "$start_script" ]; then
exec tmux new-session "$start_script"
else
exec tmux new-session
fi
fi
fi
| true
|
470113699d199f8b0b6b09c2fd3c9f8d8026f8a0
|
Shell
|
guziy/shell_scripts_for_rpn_files
|
/select_fields/soumet_selection.sh
|
UTF-8
| 618
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# samples_dir="/gs/project/ugh-612-aa/huziy/Output/GL_Hostetler/Samples"
samples_dir="/sf1/escer/sushama/restore/huziy/Samples"
coords_file=${samples_dir}/../tictacs.rpn
#===================
. s.ssmuse.dot diagtools fulldev
# create the coords file
rm -f ${coords_file}
for f in ${samples_dir}/*/pm*; do
echo "desire(-1, ['>>', '^^'])" | editfst -s $f -d ${coords_file}
break
done
# select the data
for y in {1998..1998}; do
echo $y
soumet ./select_fields_for_Craig.sh -t 7200 -cpus 1 -listing /home/huziy/listings/localhost -jn select_${y} -args ${y} ${samples_dir} ${coords_file}
done
| true
|
e7c173f15c2b7661778ba92da4df80adc8b1547b
|
Shell
|
paritytrading/philadelphia
|
/scripts/generate-repository
|
UTF-8
| 2,591
| 3.8125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright 2015 Philadelphia authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This script generates FIX protocol versions from the FIX Repository. For more
# information on the FIX Repository, see:
#
# https://www.fixtrading.org/standards/fix-repository/
#
# This script uses Philadelphia Code Generator and therefore requires Python
# 3.7 or newer.
#
# As this repository already contains the generated FIX protocol versions, you
# only need to run this script if you change Philadelphia Code Generator or add
# support for additional FIX protocol versions.
#
set -e
generate() {
local command=$1
local config_path=$2
local input_path=$3
local output_path=$4
if [[ $input_path != /* ]]; then
input_path="../../$input_path"
fi
./scripts/generate-license > "$output_path"
(cd applications/generate; \
python -m philadelphia.generate "$command" "../../$config_path" "$input_path" >> "../../$output_path")
}
generate_version() {
local input_dir=$1
local input_version=$2
local output_prefix=${input_version//./}
local output_module=${output_prefix,,}
local config_path="scripts/$output_module.ini"
local input_path="$input_dir/$input_version/Base"
local output_dir="libraries/$output_module/src/main/java/com/paritytrading/philadelphia/$output_module"
generate enumerations "$config_path" "$input_path" "$output_dir/${output_prefix}Enumerations.java"
generate msg-types "$config_path" "$input_path" "$output_dir/${output_prefix}MsgTypes.java"
generate tags "$config_path" "$input_path" "$output_dir/${output_prefix}Tags.java"
}
if [[ $# != 1 ]]; then
echo "Usage: ${0##*/} <input-directory>" >&2
exit 2
fi
input_dir=$1
generate_version "$input_dir" "FIX.4.0"
generate_version "$input_dir" "FIX.4.1"
generate_version "$input_dir" "FIX.4.2"
generate_version "$input_dir" "FIX.4.3"
generate_version "$input_dir" "FIX.4.4"
generate_version "$input_dir" "FIX.5.0"
generate_version "$input_dir" "FIX.5.0SP1"
generate_version "$input_dir" "FIX.5.0SP2"
| true
|
96312dd95492bc42f2a82c57f964f2a0ec90290a
|
Shell
|
GorillaFu/holberton-system_engineering-devops
|
/0x05-processes_and_signals/4-to_infinity_and_beyond
|
UTF-8
| 127
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# print a line of text infinitely w bash
for (( ; ; ))
do
echo "To infinity and beyond"
sleep 2
done
| true
|
d8cd521bc29bda0a887e92c26a9cbae8d4c7896c
|
Shell
|
exponea/pg8000
|
/circleci/install-pypy3.3-5.5.sh
|
UTF-8
| 465
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
set -x
set -e
BUILDROOT=$HOME/pg8000
if [[ ! -e pypy3.3-5.5-alpha-20161013-linux_x86_64-portable/bin/pypy ]]; then
wget https://bitbucket.org/squeaky/portable-pypy/downloads/pypy3.3-5.5-alpha-20161013-linux_x86_64-portable.tar.bz2
tar -jxf pypy3.3-5.5-alpha-20161013-linux_x86_64-portable.tar.bz2
rm -f pypy3.3-5.5-alpha-20161013-linux_x86_64-portable.tar.bz2
fi
ln -s $BUILDROOT/pypy3.3-5.5-alpha-20161013-linux_x86_64-portable/bin/pypy ~/bin/pypy3
| true
|
a6c11545fa6ad18e759a03e38492d28845f56794
|
Shell
|
timfel/dotfiles
|
/bin/makedeb.sh
|
UTF-8
| 4,020
| 3.609375
| 4
|
[] |
no_license
|
#! /bin/sh
#
# makedeb.sh - Utility for easy packaging of binaries
# Copyright (C) 2005-2006 Tommi Saviranta <wnd@iki.fi>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# Version: makedeb.sh v0.1.2 17-Mar-2006 wnd@iki.fi
set -e
tmpdir=__makedeb__tmp__
section=misc
priority=optional
maintainer="Tim Felgentreff <timfelgentreff@gmail.com>"
version=0.1.0-1
branch=unstable
urgency=low
depends=apt
arch=i386
while test $# -gt 0; do
case $1 in
--help)
cat <<EOF
Usage: $0 --rootdir dir --package package --description description
[--longdesc long-description] [--version version]
[--arch architecture] [--branch bracnh] [--urgency urgency]
[--maintainer maintainer] [--priority priority]
[--section section] [--depends depends]
EOF
exit 0
;;
--[a-z]*)
switch=$(echo "$1" | cut -c 3-)
eval $switch=\""$2"\"
shift
;;
*)
echo "Bad option: $1"
exit 1
;;
esac
shift
done
if [ "x$rootdir" = "x" ]; then
echo "rootdir not set"
exit 1
fi
if [ "x$package" = "x" ]; then
echo "Package name not set"
exit 1
fi
if [ "x$description" = "x" ]; then
echo "Description not set"
exit 1
fi
test ! "$longdesc" && longdesc="$description"
date=$(date "+%a, %d %b %Y %H:%M:%S %z")
if [ -d "$tmpdir" ]; then
echo "$tmpdir already exists!"
exit 1
fi
if [ ! -d "$rootdir" ]; then
echo "$rootdir does not exist!"
exit 1
fi
if [ "x$arch" = "x" ]; then
if dpkg-architecture -qDEB_BUILD_ARCH_CPU 1>/dev/null 2>&1; then
arch=$(dpkg-architecture -qDEB_BUILD_ARCH_CPU)
else
echo "Cannot get architecture with dpkg-architecture!"
echo "Use --architecture foo to enter it manually."
exit 1
fi
fi
mkdir "$tmpdir"
trap "rm -rf \"$tmpdir\"" 0 1 2 15
mkdir "$tmpdir/debian"
echo 4 >"$tmpdir/debian/compat"
cat <<EOF >"$tmpdir/debian/changelog"
$package ($version) $branch; urgency=$urgency
* Packaged with makedeb.
-- $maintainer $date
EOF
cat <<EOF >"$tmpdir/debian/control"
Source: $package
Section: $section
Priority: $priority
Maintainer: $maintainer
Build-Depends: debhelper (>= 4.0.0)
Standards-Version: 3.6.0
Package: $package
Section: $section
Architecture: $arch
Depends: $depends
Description: $description
$longdesc
EOF
cp -r -p "$rootdir"/* "$tmpdir"
ls "$tmpdir" | grep -v debian >"$tmpdir/debian/$package.install"
cat <<EOF >"$tmpdir/debian/rules"
#! /usr/bin/make -f
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
# These are used for cross-compiling and for saving the configure script
# from having to guess our platform (since we know it already)
DEB_HOST_GNU_TYPE ?= \$(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
DEB_BUILD_GNU_TYPE ?= \$(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
config.status: configure
dh_testdir
build: build-stamp
build-stamp:
dh_testdir
touch build-stamp
clean:
dh_testdir
dh_testroot
rm -f build-stamp
dh_clean
install: build
dh_testdir
dh_testroot
dh_clean -k
# Build architecture-independent files here.
binary-indep: build install
# Build architecture-dependent files here.
binary-arch: build install
dh_testdir
dh_testroot
dh_installchangelogs
dh_install
dh_installdebconf
dh_compress
dh_fixperms
dh_makeshlibs
dh_installdeb
dh_gencontrol
dh_md5sums
dh_builddeb
binary: binary-indep binary-arch
.PHONY: build clean binary-indep binary-arch binary install
EOF
chmod 755 "$tmpdir/debian/rules"
(cd "$tmpdir"; fakeroot dpkg-buildpackage -b)
| true
|
4753affba9ae9784859d09e2baa615b9639ace9d
|
Shell
|
thehyve/ohdsi-etl-sweden
|
/execute_etl_mac.sh
|
UTF-8
| 9,152
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Created by Maxim Moinat.
# Copyright (c) 2016 The Hyve B.V.
# This code is licensed under the Apache License version 2.0
# Python 3 is required!
# Command line Variables
DATABASE_NAME="$1"
USER="$2"
ENCODING="$3"
DATABASE_SCHEMA="$4"
VOCAB_SCHEMA="$5"
# ETL parameters
DATA_START_DATE="19970101"
DATA_END_DATE="20150801"
# Source daimon parameters
DATASET_NAME="Swedish Registry ETL $DATE"
DATASET_ABBREV="SwedReg"
WEBAPI_SCHEMA="webapi" #Schema of source an source_daimon tables and results tables
CONNECTION_STRING="jdbc:postgresql://localhost:5432/ohdsi?user=webapi&password=webapi"
PRIO_DAIMONS=1
# System Constants
SCRIPTS_FOLDER="scripts"
SOURCE_FOLDER="source_tables"
MAP_SCRIPT_FOLDER="$SCRIPTS_FOLDER/mapping_scripts"
ETL_SCRIPT_FOLDER="$SCRIPTS_FOLDER/loading_scripts"
DYNAMIC_SCRIPT_FOLDER="$SCRIPTS_FOLDER/rendered_sql"
SQL_FUNCTIONS_FOLDER="$SCRIPTS_FOLDER/sql_functions"
PYTHON_FOLDER="$SCRIPTS_FOLDER/python"
DRUG_MAPPING_FOLDER="$SCRIPTS_FOLDER/drug_mapping"
OMOP_CDM_FOLDER="$SCRIPTS_FOLDER/OMOPCDM"
TIME_FORMAT="Elapsed Time: %e sec"
DATE=`date +%Y-%m-%d_%H:%M`
# Check whether command line arguments are given
if [ "$DATABASE_NAME" = "" ] || [ "$USER" = "" ]; then
echo "Please input a database name and username of the database. Usage: "
echo "./execute_etl.sh <database_name> <user_name> [<encoding>] [<database_schema>] [<vocabulary_schema>]"
exit 1
fi
# Defaults
if [ "$ENCODING" = "" ]; then ENCODING="UTF8"; fi
if [ "$DATABASE_SCHEMA" = "" ]; then DATABASE_SCHEMA="cdm5"; fi
if [ "$VOCAB_SCHEMA" = "" ]; then VOCAB_SCHEMA="cdm5"; fi
date
echo "===== Starting the ETL procedure to OMOP CDM ====="
echo "Using the database '$DATABASE_NAME' and the '$DATABASE_SCHEMA' schema."
echo "Loading source files from the folder '$SOURCE_FOLDER' "
echo "Using $ENCODING encoding of the source files."
# Create cdm5 schema. Assume vocab schema exists and is filled.
sudo -u $USER psql -d $DATABASE_NAME -c "CREATE SCHEMA IF NOT EXISTS $DATABASE_SCHEMA;"
# Search for tables in database schema, if schema name not specified explicitly.
sudo -u $USER psql -d $DATABASE_NAME -c "ALTER DATABASE $DATABASE_NAME SET search_path TO $DATABASE_SCHEMA;"
echo
echo "Preprocessing patient registers..."
# First remove any existing rendered tables
rm -f rendered_tables/patient_register/*
rm -f rendered_tables/death_register/*
python $PYTHON_FOLDER/process_patient_tables_wide_to_long.py $SOURCE_FOLDER $ENCODING
python $PYTHON_FOLDER/process_death_tables_wide_to_long.py $SOURCE_FOLDER $ENCODING
echo
echo "Reading headers of source tables..."
# python $SCRIPTS_FOLDER/process_drug_registries.py $SOURCE_FOLDER/drug_register
python $PYTHON_FOLDER/create_copy_sql.py $SOURCE_FOLDER $ENCODING $DYNAMIC_SCRIPT_FOLDER
echo
# The following is executed quietly (-q)
echo "Dropping cdm5 tables and emptying schemas..."
sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/empty_schemas.sql -q
sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/drop_cdm_tables.sql -q
sudo -u $USER psql -d $DATABASE_NAME -f "$OMOP_CDM_FOLDER/OMOP CDM ddl.sql" -q
echo "Creating sequences..."
sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/alter_omop_cdm.sql -q
echo
echo "Creating source tables..."
sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/create_source_tables.sql
echo "Loading source tables..."
# sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/load_source_tables.sql
time sudo -u $USER psql -d $DATABASE_NAME -f $DYNAMIC_SCRIPT_FOLDER/load_tables.sql
echo "Filtering rows without date..."
time sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/filter_source_tables.sql
echo "Creating indices source tables..."
time sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/alter_source_tables.sql
# Search for tables first in database schema, then in vocabulary schema (and last in public schema)
sudo -u $USER psql -d $DATABASE_NAME -c "ALTER DATABASE $DATABASE_NAME SET search_path TO $DATABASE_SCHEMA, $VOCAB_SCHEMA, public;"
echo
echo "Creating mapping tables..."
sudo -u $USER psql -d $DATABASE_NAME -f $MAP_SCRIPT_FOLDER/load_mapping_tables.sql
echo "Loading mappings into the source_to_concept_map:"
sudo -u $USER psql -d $DATABASE_NAME -f $MAP_SCRIPT_FOLDER/load_source_to_concept_map.sql
echo
echo "Preprocessing..."
printf "%-35s" "Unique persons from registers: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/lpnr_aggregated.sql
echo "Create supporting SQL functions:"
sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/getObservationStartDate.sql
sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/getObservationEndDate.sql
sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/convertDeathDate.sql
sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/getDrugQuantity.sql
sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/getDrugEndDate.sql
sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/getDrugDaysSupply.sql
# Actual ETL. Order is important.
# Especially always first Person and Death tables.
echo
echo "Performing ETL..."
printf "%-35s" "Person: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_person.sql
printf "%-35s" "Death with addendum table: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_death.sql
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_death_addendum.sql -q
printf "%-35s" "Observation Period: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_period.sql -v data_start_date=$DATA_START_DATE -v data_end_date=$DATA_END_DATE
printf "%-35s" "Visit Occurrence: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_visit_occurrence.sql
printf "%-35s" "Condition Occurrence: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_condition_occurrence.sql
printf "%-35s" "Procedure Occurrence: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_procedure_occurrence.sql
printf "%-35s" "Drug Exposure: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_drug_exposure.sql
printf "%-35s" "Observation Death Morsak: " #Additional causes of death
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_death.sql
printf "%-35s" "Observation Civil Status: " #Only where civil is not null
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_civil.sql
printf "%-35s" "Observation Planned visit: " #all
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_pvard.sql
printf "%-35s" "Observation Utsatt Status: " #Only sluten care
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_utsatt.sql
printf "%-35s" "Observation Insatt Status: " #Only sluten care
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_insatt.sql
printf "%-35s" "Observation Ekod: " #Only where ekod is not null
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_ekod.sql
printf "%-35s" "Observation Work Status: " #Only Lisa
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_work_status.sql
printf "%-35s" "Observation Education level: " #Only Lisa
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_education.sql
printf "%-35s" "Observation Ethnic Background: " #Only Lisa
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_observation_background.sql
printf "%-35s" "Measurement Income: " #Only Lisa
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_measurement_income.sql
# printf "%-35s" "Measurement Age: " #All registers
# time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/etl_measurement_age.sql
echo
echo "Building Eras..."
printf "%-35s" "Condition Era: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/build_condition_era.sql
printf "%-35s" "Drug Era: "
time sudo -u $USER psql -d $DATABASE_NAME -f $ETL_SCRIPT_FOLDER/build_drug_era.sql
# Insert data information in cdm_source and webapi_sourc[_daimon] tables
echo
sudo -u $USER psql -d $DATABASE_NAME -f $SCRIPTS_FOLDER/insert_cdm_source.sql -q -v source_release_date=$DATA_END_DATE
# sudo -u $USER psql -d $DATABASE_NAME -f $SQL_FUNCTIONS_FOLDER/setSourceDaimon.sql
# echo "The new Source ID is:"
# sudo -u $USER psql -d $DATABASE_NAME -c "SELECT setSourceDaimon('$DATABASE_SCHEMA','Swedish Registry ETL $DATE','SwedReg');" -t
# Grant access to webapi in order to make the person tab work
sudo -u $USER psql -d $DATABASE_NAME -c "GRANT SELECT ON ALL TABLES IN SCHEMA $DATABASE_SCHEMA TO webapi;"
sudo -u $USER psql -d $DATABASE_NAME -c "GRANT USAGE ON SCHEMA $DATABASE_SCHEMA TO webapi;"
echo
echo "Adding constraints..."
time sudo -u $USER psql -d $DATABASE_NAME -f "$OMOP_CDM_FOLDER/OMOP CDM constraints.sql" -q
echo "Adding indices..."
time sudo -u $USER psql -d $DATABASE_NAME -f "$OMOP_CDM_FOLDER/OMOP CDM indexes required.sql" -q
# Restore search path
sudo -u $USER psql -d $DATABASE_NAME -c "ALTER DATABASE $DATABASE_NAME SET search_path TO \"\$user\", public;"
date
| true
|
9d226cf97ebda786a52ac826ffbe29e2e50012a9
|
Shell
|
PapaSchlunz/swarm
|
/modules/bee/beeNetwork
|
UTF-8
| 591
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# Check if network changed
if [ "$beeNetwork" != "$currentBeeNetwork" ]; then
if [ "$beeNetwork" = "mainnet" ]; then
sudo echo "OPTIONS=\"--config config.chrysalis-mainnet.toml\"" > /etc/default/bee
fi
if [ "$beeNetwork" = "comnet" ]; then
sudo echo "OPTIONS=\"--config config.chrysalis-comnet.toml\"" > /etc/default/bee
fi
sudo rm -rf $beeHome/snapshots/$currentBeeNetwork $beeHome/storage/${currentBeeNetwork}
restartBee=true
whiptail --title "Bee Network" --msgbox "Bee was successfully changed to the $beeNetwork network!" 8 65
fi
| true
|
e2263e2a50316c7366c94fd5b50f5100bb983747
|
Shell
|
CDS-VRN/CDS-Assembly
|
/deploy.sh
|
UTF-8
| 7,185
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ $# < 3 ]]
then
echo "Insufficient arguments provided, required: <settings file> <docker host address tcp://127.0.0.1:2375> <deploy version> [<version to upgrade from if different from currently running>]"
exit 1
fi
SETTINGS_FILE=$1
DOCKER_HOST=$2
DOCKER_CMD="docker -H ${DOCKER_HOST}"
DEPLOY_VERSION=$3
echo "Loading settings from $SETTINGS_FILE ..."
source $SETTINGS_FILE
echo "Deploying version: ${DEPLOY_VERSION} ; host ${DOCKER_HOST}."
CURRENT_VERSION=$(${DOCKER_CMD} ps -a | grep "cds-master-admin" | awk 'BEGIN {FS=" "}; {print $NF}' | awk 'BEGIN {FS="_"}; {print $2}')
# Note that we can upgrade from a different version that the currently running in case of an error after deploying, we are already running the newer version but we want to still upgrade from the previous in order to run the fixed update scripts for the database and LDAP.
if [[ $# > 3 ]]
then
PREV_VERSION=$4
else
PREV_VERSION=${CURRENT_VERSION}
fi
echo "Upgrading from version ${PREV_VERSION}"
echo "Running containers before upgrade:"
${DOCKER_CMD} ps -a
if [[ -z "${CURRENT_VERSION}" ]]
then
# New installation
echo "No existing installation detected; New installation."
${DOCKER_CMD} run -d -v /var/lib/postgresql --name cds-master-dbdata cds-postgresql:${DEPLOY_VERSION} true
${DOCKER_CMD} run -d -v /opt/OpenDS-2.2.1/db --name cds-master-ldapdata cds-ldap:${DEPLOY_VERSION} true
${DOCKER_CMD} run -d -v /etc/cds/workspaces --name cds-master-workspaces cds-webservices:${DEPLOY_VERSION} true
${DOCKER_CMD} run -d -v /var/lib/cds --name cds-master-metadata cds-admin:${DEPLOY_VERSION} true
#${DOCKER_CMD} run --rm --volumes-from cds-master-workspaces -v /path/to/local/workspaces:/etc/cds/workspaces-src cds-base sh -c 'cp -r /etc/cds/workspaces-src/* /etc/cds/workspaces'
else
echo "Detected current version: ${CURRENT_VERSION}."
echo "Stopping existing ${CURRENT_VERSION} containers..."
# Stop all existing containers.
${DOCKER_CMD} stop cds-master-cron_${CURRENT_VERSION}
${DOCKER_CMD} stop cds-master-apache_${CURRENT_VERSION}
${DOCKER_CMD} stop cds-master-webservices_${CURRENT_VERSION}
${DOCKER_CMD} stop cds-master-jobexecutor_${CURRENT_VERSION}
${DOCKER_CMD} stop cds-master-admin_${CURRENT_VERSION}
${DOCKER_CMD} stop cds-master-ldap_${CURRENT_VERSION}
${DOCKER_CMD} stop cds-master-postgresql_${CURRENT_VERSION}
echo "Removing existing ${CURRENT_VERSION} containers (except data only containers) ..."
# Remove existing non-data-only containers.
${DOCKER_CMD} rm cds-master-cron_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-apache_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-webservices_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-jobexecutor_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-admin_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-ldap_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-postgresql_${CURRENT_VERSION}
${DOCKER_CMD} rm cds-master-config_${CURRENT_VERSION}
fi
# Start the containers. Each container can perform data container updates when needed by checking the current and deploy version.
echo "Deploying new ${DEPLOY_VERSION} containers..."
echo "Generating config from cds-config..."
${DOCKER_CMD} run \
--name cds-master-config_${DEPLOY_VERSION} \
-e CDS_ADMIN_REQUEST_AUTHORIZATION_PROMPT="$CDS_ADMIN_REQUEST_AUTHORIZATION_PROMPT" \
-e CDS_ADMIN_REQUEST_AUTHORIZATION_HREF="$CDS_ADMIN_REQUEST_AUTHORIZATION_HREF" \
-e CDS_MAIL_FROM="$CDS_MAIL_FROM" \
-e CDS_MAIL_HOST="$CDS_MAIL_HOST" \
-e CDS_ETL_PGR_URL="$CDS_ETL_PGR_URL" \
-e CDS_AWSTATS_URL="$CDS_AWSTATS_URL" \
-e CDS_AWSTATS_NAMES="$CDS_AWSTATS_NAMES" \
-e CDS_MUNIN_URL="$CDS_MUNIN_URL" \
-e CDS_NAGIOS_URL="$CDS_NAGIOS_URL" \
-e CDS_NAGIOS_HOSTS="$CDS_NAGIOS_HOSTS" \
-e CDS_NAGIOS_HOSTGROUP="$CDS_NAGIOS_HOSTGROUP" \
-e CDS_NAGIOS_STATUS_REGISTRY_PORT="$CDS_NAGIOS_STATUS_REGISTRY_PORT" \
-e CDS_NAGIOS_STATUS_SERVICE_URL="$CDS_NAGIOS_STATUS_SERVICE_URL" \
-e CDS_INSPIRE_GET_FEATURE_REQUEST="$CDS_INSPIRE_GET_FEATURE_REQUEST" \
-e CDS_INSPIRE_HOST="$CDS_INSPIRE_HOST" \
cds-config:${DEPLOY_VERSION}
echo "Deploying cds-postgresql..."
${DOCKER_CMD} run --name cds-master-postgresql_${DEPLOY_VERSION} -P -d --volumes-from cds-master-dbdata \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
--restart=always \
cds-postgresql:${DEPLOY_VERSION}
echo "Deploying cds-ldap..."
${DOCKER_CMD} run --name cds-master-ldap_${DEPLOY_VERSION} -P -d --volumes-from cds-master-ldapdata \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
--restart=always \
cds-ldap:${DEPLOY_VERSION}
echo "Deploying cds-admin..."
${DOCKER_CMD} run --name cds-master-admin_${DEPLOY_VERSION} -P -d --volumes-from cds-master-config_${DEPLOY_VERSION} \
--volumes-from cds-master-metadata \
--link cds-master-postgresql_${DEPLOY_VERSION}:db \
--link cds-master-ldap_${DEPLOY_VERSION}:ldap \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
--restart=always \
cds-admin:${DEPLOY_VERSION}
echo "Deploying cds-jobexecutor..."
${DOCKER_CMD} run --name cds-master-jobexecutor_${DEPLOY_VERSION} -P -d --volumes-from cds-master-config_${DEPLOY_VERSION} \
--volumes-from cds-master-metadata \
--link cds-master-postgresql_${DEPLOY_VERSION}:db \
--link cds-master-ldap_${DEPLOY_VERSION}:ldap \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
--restart=always \
cds-job-executor:${DEPLOY_VERSION}
echo "Deploying cds-webservices..."
${DOCKER_CMD} run --name cds-master-webservices_${DEPLOY_VERSION} -P -d --volumes-from cds-master-config_${DEPLOY_VERSION} \
--volumes-from cds-master-workspaces \
--link cds-master-postgresql_${DEPLOY_VERSION}:db \
--volumes-from cds-master-metadata \
--link cds-master-ldap_${DEPLOY_VERSION}:ldap \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
--restart=always \
cds-webservices:${DEPLOY_VERSION}
echo "Deploying cds-apache..."
${DOCKER_CMD} run --name cds-master-apache_${DEPLOY_VERSION} -p 80:80 -d --link cds-master-admin_${DEPLOY_VERSION}:admin \
--link cds-master-webservices_${DEPLOY_VERSION}:webservices \
--volumes-from cds-master-metadata \
-e CDS_SERVER_NAME="$CDS_SERVER_NAME" \
-e CDS_WEBSERVICES_SERVER_NAME="$CDS_WEBSERVICES_SERVER_NAME" \
-e CDS_SERVER_ADMIN="$CDS_SERVER_ADMIN" \
-e CDS_SERVICES="$CDS_SERVICES" \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
--restart=always \
cds-apache:${DEPLOY_VERSION}
echo "Deploying cds-cron..."
${DOCKER_CMD} run --name cds-master-cron_${DEPLOY_VERSION} -d --link cds-master-postgresql_${DEPLOY_VERSION}:db \
-e DEPLOY_VERSION=${DEPLOY_VERSION} \
-e PREV_VERSION=${PREV_VERSION} \
cds-cron:${DEPLOY_VERSION}
# TODO: Make this more elegant.
sleep 2
ERROR_CHECK=$(${DOCKER_CMD} logs cds-master-postgresql_${DEPLOY_VERSION} | grep error | wc -l)
if [[ ${ERROR_CHECK} != 0 ]]
then
echo "Database update failed:"
exit 1
fi
echo "Existing containers after upgrade:"
${DOCKER_CMD} ps -a
| true
|
d8ca00a8279e151fadf31e2761a0a5ebdbe9a913
|
Shell
|
greearb/lanforge-scripts
|
/rping.bash
|
UTF-8
| 2,520
| 4
| 4
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
## ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ##
## Ping a random ip every second, indefinitely. The ping command
## has a deadline of *count* seconds and wait time of 1 second.
## This script is aware of vrf_exec.bash and LF_NO_USE_VRF.
##
## Example Usage:
##
## Ping a random ip once from port br0 every second:
## ./rping.bash -p br0 -c 1
##
## Default to eth1, ping 4 random IPs, ping each ip 10 times
## ./rping.bash -r 4 -c 10
##
## Ping 4 random IPs, ping each ip 10 times, from port br1
## ./rping.bash -r 4 -c 10 -pbr1
##
## ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ##
# set -vux
HL="/home/lanforge"
HL_VX="$HL/vrf_exec.bash"
UV=1
if [ -f $HL/LF_NO_USE_VRF ]; then
UV=0
fi
if [ ! -x $HL_VX ]; then
UV=0
fi
usage() {
echo "Usage: $0 [-p port] [-c count of pings per ip] [-r number random ips] [-d seconds duration]"
}
PORT=eth1
COUNT=1
NUM_IP=0
DURATION=0
while getopts "hi:c:d:p:" OPT; do
case "${OPT}" in
c) COUNT="${OPTARG}"
;;
d) DURATION="${OPTARG}"
if (( $DURATION < 0 )); then
echo "$0: duration cannot be negative"
exit 1
fi
;;
h) usage
exit 0
;;
i) NUM_IP="${OPTARG}"
;;
p) PORT="${OPTARG}"
;;
*) echo "Unknown option $OPT";
usage
exit 1
;;
esac
done
if [[ x$PORT == x ]]; then
echo "$0: no port" >&2
return 1
fi
# this is unfiltered and will ping network numbers, broadcast numbers and
# multicast addresses, if you need a specific range, you should add that
# logic near here
rand_ip() {
printf "%d.%d.%d.%d" \
"$((RANDOM % 256))" \
"$((RANDOM % 256))" \
"$((RANDOM % 256))" \
"$((RANDOM % 256))"
}
my_ping() {
if [[ x$1 == x ]]; then
echo "$0: my_ping: cannot ping empty ip" >&2
return 1
fi
if (($UV == 1)); then
$HL_VX $PORT ping -c $COUNT -D -i1 -n -w$COUNT -W1 "$1"
else
ping -I$PORT -c $COUNT -D -i1 -n -w$COUNT -W1 "$1"
fi
}
IP_LIST=()
if (( NUM_IP != 0 )); then
for n in `seq 1 $NUM_IP`; do
IP_LIST+=($(rand_ip))
done
fi
counter=0;
mod=$NUM_IP
while true; do
if (( $NUM_IP > 0 )); then
i=$(($counter % $NUM_IP))
randi="${IP_LIST[$i]}"
counter=$((counter++))
else
randi=$(rand_ip)
fi
my_ping "$randi"
sleep 1
done
#
| true
|
7fe3c5577e7d115a43509ddb3cf658eaaebb3f41
|
Shell
|
wongoo/kubernetes-installation
|
/etcd/etcd-install-from-binary.sh
|
UTF-8
| 1,998
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
export V_ETCD_VER=v3.3.2
if hash etcd 2>/dev/null; then
echo "etcd exists"
else
echo "-------> install etcd"
if [ -f etcd-${V_ETCD_VER}-linux-amd64.tar.gz ]
then
echo "etcd-${V_ETCD_VER}-linux-amd64.tar.gz exists"
else
wget https://github.com/coreos/etcd/releases/download/${V_ETCD_VER}/etcd-${V_ETCD_VER}-linux-amd64.tar.gz
fi
tar -xvf etcd-${V_ETCD_VER}-linux-amd64.tar.gz
sudo mv etcd-${V_ETCD_VER}-linux-amd64/etcd* /usr/local/bin
fi
systemctl stop etcd
echo "-------> config etcd"
sudo cp etcd/etcd.service /usr/lib/systemd/system/
sed -i "s#__V_ETCD_NAME__#$V_ETCD_NAME#g" /usr/lib/systemd/system/etcd.service
sed -i "s#__V_ETCD_CLUSTER_LIST__#$V_ETCD_CLUSTER_LIST#g" /usr/lib/systemd/system/etcd.service
sed -i "s#__CURR_NODE_IP__#$CURR_NODE_IP#g" /usr/lib/systemd/system/etcd.service
sed -i "s#__V_ETCD_LISTEN_CLIENT_URLS__#$V_ETCD_LISTEN_CLIENT_URLS#g" /usr/lib/systemd/system/etcd.service
sed -i "s#__V_ETCD_LISTEN_PEER_URLS__#$V_ETCD_LISTEN_PEER_URLS#g" /usr/lib/systemd/system/etcd.service
sed -i "s#__V_ETCD_ADVERTISE_PEER_URLS__#$V_ETCD_ADVERTISE_PEER_URLS#g" /usr/lib/systemd/system/etcd.service
sed -i "s#__V_ETCD_ADVERTISE_CLIENT_URLS__#$V_ETCD_ADVERTISE_CLIENT_URLS#g" /usr/lib/systemd/system/etcd.service
# 使用etcd的第一种启动模式: https://github.com/ianwoolf/myPages/blob/master/%E5%88%86%E5%B8%83%E5%BC%8F/etcd%E4%B8%A4%E7%A7%8D%E9%9B%86%E7%BE%A4%E6%A8%A1%E5%BC%8F%E5%92%8C%E5%90%AF%E5%8A%A8.md
if [ "$CURR_NODE_IP" == "$K8S_MASTER_IP" ]
then
echo "current is master"
fi
echo "-------> start etcd"
sudo rm -rf /var/lib/etcd/*
sudo mkdir /var/lib/etcd/
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd
#=================================================
echo "-------> check etcd"
etcdctl \
--ca-file=/etc/kubernetes/ssl/ca.pem \
--cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
cluster-health
| true
|
2fd2cb88f7a355fdaa6fa99741d28ba596c25bc0
|
Shell
|
demonlibra/nemo-actions
|
/actions/qbittorrent.sh
|
UTF-8
| 1,388
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
FORM=`yad --borders=10 --title="qbittorent" --text="" --text-align=center --form --separator="," --item-separator="|" --field=:LBL --field="Путь для загрузки:DIR" --field="Пропустить диалоговые окна:CHK" --field="Загружать последовательно:CHK" --field="Загружать с первой и последней части:CHK" --field="Добавить остановленными:CHK" "" "" TRUE FALSE FALSE FALSE`
if [ $? = 0 ]
then
options=""
path=$( echo $FORM | awk -F ',' '{print $2}')
skip_dialog=$( echo $FORM | awk -F ',' '{print $3}')
if [ "$skip_dialog" = "TRUE" ]; then options=$options" --skip-dialog"; fi #Пропустить диалоговые окна
sequential=$( echo $FORM | awk -F ',' '{print $4}')
if [ "$sequential" = "TRUE" ]; then options=$options" --sequential"; fi #Загружать последовательно
first_and_last=$( echo $FORM | awk -F ',' '{print $5}')
if [ "$first_and_last" = "TRUE" ]; then options=$options" --first-and-last"; fi #Загружать с первой и последней части
pause=$( echo $FORM | awk -F ',' '{print $6}')
if [ "$pause" = "TRUE" ]; then options=$options" --add-paused=TRUE"; fi #Добавить остановленными
qbittorrent --save-path="$path" $options "$@"
fi
| true
|
126a9b7f60b55a9678de5976f97ce75b2e40146f
|
Shell
|
luningcowboy/gkEngine
|
/exec/tools/resource_task/osx_tga2dds_engine.sh
|
UTF-8
| 644
| 3.015625
| 3
|
[] |
no_license
|
cd ../global_task/
source set_global_env.sh
foreachd(){
# 遍历参数1
for file in $1/*
do
# 如果是目录就打印处理,然后继续遍历,递归调用
if [ -d $file ]
then
#echo $file"是目录"
foreachd $file
elif [ -f $file ]
then
#echo $file pvr process
if [ "${file##*.}" = "tga" ];
then
echo $file
#$GKENGINE_HOME/tools/pvrtextool -m -f a8r8g8b8 -shh -i $file -o ${file%.*}.dds
#$GKENGINE_HOME/tools/convert $file ${file%.*}.dds
$GKENGINE_HOME/tools/nvcompress -rgb $file ${file%.*}.dds
fi
fi
done
}
foreachd "$GKENGINE_HOME/engine/assets"
# %GKENGINE_HOME%\tools\pvrtextool -m -fOGL8888 -yflip0 -square -silent -i %%~fA
| true
|
edb8f0104e19f50315ea51185bb1ae7d709141d0
|
Shell
|
bsb808/linux_setup
|
/ubuntu/apt_18_ros.sh
|
UTF-8
| 1,061
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# accept packages from ROS
echo Accept packages from ROS...
sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
apt update
# ROS Base
DIST="melodic"
apt install ros-${DIST}-desktop-full
# ROS Extras
apt install \
ros-${DIST}-rqt-robot-plugins \
ros-${DIST}-effort-controllers \
ros-${DIST}-joy \
ros-${DIST}-teleop-twist-joy \
ros-${DIST}-teleop-twist-keyboard \
ros-${DIST}-teleop-tools \
ros-${DIST}-joy-teleop \
ros-${DIST}-key-teleop \
ros-${DIST}-geographic-info \
ros-${DIST}-move-base \
ros-${DIST}-robot-localization \
ros-${DIST}-robot-state-publisher \
ros-${DIST}-xacro \
ros-${DIST}-rqt \
ros-${DIST}-rqt-common-plugins \
apt upgrade libignition-math2
# ros-${DIST}-multimaster-fkie \
source /opt/ros/melodic/setup.bash
#rosdep init || echo Ignore error if rosdep is already initialized
| true
|
3c3f16362fbd6bfac5ef169c20f09e402d5777ea
|
Shell
|
alexpaulzor/CursedGames
|
/sudoku/benchmark.sh
|
UTF-8
| 151
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
for p in $(cat puzzles/top95.txt); do
(date; time ./sudoku.py solve -v $p) 2>&1 | tee -a puzzles/solvetimes/$p.log
sleep 0.1
done
| true
|
da409f520a90158cadf8185e9ff630289ce9dcec
|
Shell
|
kishan811/Scripting-Assignments
|
/Grep, Sed, Awk and BASH programming/3_2_5.sh
|
UTF-8
| 842
| 2.625
| 3
|
[] |
no_license
|
awk 'BEGIN{print "\n\n*** Grade Report for the ABC course *** \n\nStudent : Gender: Grade"
max = 0;p=0;avg=0}
{ if(NR==2){total=$3+$4+$5;min=total;name=$1;max=total;i=0;}
if(NR>1) {
total=$3+$4+$5;
p=p+total;
if(max<total)
{
max=total;
name=$1;
}
min=(min>total)?total:min;
if(total>=95&&total<=100) grade="A";
else if(total>=90&&total<95) grade="A-";
else if(total>=85&&total<90) grade="B";
else if(total>=80&&total<85) grade="B-";
else if(total>=75&&total<80) grade="C";
else if(total>=70&&total<75) grade="C-";
else if(total>=60&&total<70) grade="D";
else if(total<60) grade="F";
print $1, ":", $2, ":" , grade}}
END{
avg=p/(NR-1);
print "\nTotal Students in the course-> " NR-1 " \nHighest Marks-> " max "\nLowest Marks-> " min "\nAverage Marks-> "avg "\n\n*** END OF GRADE REPORT ***\n"}' marks.txt
| true
|
60e75821cc0f63c27a88ff005ef35c81b56a7197
|
Shell
|
lzpfmh/ink
|
/modules/build_mods.sh
|
UTF-8
| 260
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
modules=`find -maxdepth 1 -type d`
LD=${ARCH_PREFIX}ld
LDFLAGS=-shared
for mod in $modules
do
if [ ! $mod = "." ]; then
echo "find mod ${mod}"
cd $mod
make
cp build/*.so "../${mod}.so"
cp build/*.dll "../${mod}.dll"
cd ../
fi
done
| true
|
b3733fdf2416ca92b46725c2f14e404fa30f671c
|
Shell
|
darco2018/tutorial_ranking
|
/scripts/build-app-image.sh
|
UTF-8
| 456
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ./scripts/docker-config.sh
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Building ${app_image} image from Dockefile... >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
# the whole image is always created anew, rather than adding new changes
echo "docker build -f ./docker/Dockerfile -t ${app_image} \
--build-arg jar_name=${app_jar} ./"
docker build -f ./docker/Dockerfile -t ${app_image} \
--build-arg jar_name=${app_jar} ./
| true
|
c982a1f4a1213674b89692948877c948a02e7d9c
|
Shell
|
dethrophes/Experimental-Bash-Module-System
|
/bash/TesterFuncs.sh
|
UTF-8
| 9,698
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#set -o errexit
#set -o errtrace
#set -o nounset
[[ "${DEBUG:-0}" != "1" ]] || set -o xtrace
#<KHeader>
#+=========================================================================
#I Project Name: Scripts
#+=========================================================================
#I Copyright: Copyright (c) 2004-2012, John Kearney
#I Author: John Kearney, dethrophes@web.de
#I
#I License: All rights reserved. This program and the accompanying
#I materials are licensed and made available under the
#I terms and conditions of the BSD License which
#I accompanies this distribution. The full text of the
#I license may be found at
#I http://opensource.org/licenses/bsd-license.php
#I
#I THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN '
#I AS IS' BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF
#I ANY KIND, EITHER EXPRESS OR IMPLIED.
#I
#I Description:
#I File Name : TesterFuncs.sh
#I
#+-------------------------------------------------------------------------
#I
#I File Name : TesterFuncs.sh
#I File Location : Experimental-Bash-Module-System/bash
#I
#+=========================================================================
#</KHeader>
#########################################################################
# Source Files
#########################################################################
if [ -z "${__GenFuncs_sh__:-}" ]; then
[ -z "${ScriptDir:-}" ] && ScriptDir="$(cd "$(dirname "${0}")"; pwd)"
if [ -f "${ScriptDir}/GenFuncs.sh" ]; then
source "${ScriptDir}/GenFuncs.sh"
else
echo "# "
echo "# Error Exit : Error Sourcing \"${ScriptDir}/GenFuncs.sh\"" >&2
echo "# "
exit 7
fi
fi
#SourceCoreFiles_ "DiskFuncs.sh"
#SourceFiles_ "${ScriptDir}/DiskFuncs.sh"
if [ -z "${__TesterFuncs_sh__:-}" ]; then
__TesterFuncs_sh__=1
#########################################################################
# Procedures
#########################################################################
function AddTestCase {
push_element ${1} "$(EncodeArgs "${@:2}")"
}
declare -ga MTestCases
function AddTestCase2 {
push_element MTestCases "$(EncodeArgs "${@:2}")"
}
function escapeCtrlCharsString2 {
local LC_CTYPE=C
local -i idx=${#1}
local nString= tval
for (( idx=0; $idx<${#1}; idx++ )) ; do
case "${1:${idx}:1}" in
$'"') nString+='"\""';;
$'\\') nString+='\\';;
#$';') nString+='\;';;
#$' ') nString+='\ ';;
[^[:cntrl:]]) nString+="${1:${idx}:1}";;
$'\e') nString+='\e';;
$'\a') nString+='\a';;
$'\n') nString+='\n';;
$'\b') nString+='\b';;
$'\v') nString+='\v';;
$'\t') nString+='\t';;
$'\r') nString+='\r';;
*)
printf -v tval '\\x%02x' "'${1:${idx}:1}"
nString+="${tval}"
;;
esac
done
echo -n \$\'"${nString}"\'
}
function test_FuncType_return_only {
local _FUNCNAME="${1}"
local -i _FUNCDEPTH=2
local _RETURN
function FWrapper {
_RETURN=""
"${_FUNCNAME}" "${@}"
return $?
}
test_FuncType_RETURN FWrapper "${@:2}"
}
function test_FuncType_echo {
local _FUNCNAME="${1}"
local -i _FUNCDEPTH=2
local _RETURN
function FWrapper {
_RETURN="$("${_FUNCNAME}" "${@}" )"
return $?
}
test_FuncType_RETURN FWrapper "${@:2}"
}
function test_FuncType_RETURN {
local _FUNCNAME="${_FUNCNAME:-${1}}"
local -i _FUNCDEPTH="${_FUNCDEPTH:-1}"
local -r FuncName="${1}"
local -i ECnt
local -a CTest
local -i ErrorCnt=0
local -i TestCnt=$#
local -a _RETURN
while shift && [ $# -gt 0 ]; do
local Error=0
DecodedArgs CTest "${1}"
#echo "${CTest[@]}"
local -i ExpectedRValue="${CTest[0]}"
local -a FuncArgs=("${CTest[@]:2:${CTest[1]}}")
local -a Expected_RETURN=("${CTest[@]:2+${CTest[1]}}")
echo -n "$(CmdOut ${_FUNCDEPTH} "${_FUNCNAME}" "${FuncArgs[@]}" 6>&1)"
local RValue=0
time "${FuncName}" "${FuncArgs[@]}" || RValue=$?
if [[ -n "${CTest[0]}" && ${ExpectedRValue} -ne ${RValue} ]]; then
ErrorOut ${_FUNCDEPTH} "$(CreateEscapedArgList3 "${_FUNCNAME}" "${FuncArgs[@]}")" \
" Error Wrong Return Value" \
" [ ${ExpectedRValue} -ne ${RValue} ]"
Error+=1
fi
ECnt=0
for CTArg in "${Expected_RETURN[@]}"; do
if [[ "${CTArg}" != "${_RETURN[${ECnt}]:-}" ]]; then
ErrorOut ${_FUNCDEPTH} "$(CreateEscapedArgList3 "${_FUNCNAME}" "${FuncArgs[@]}")" \
" Error Wrong Return Value _RETURN[${ECnt}]" \
" [ $(CreateEscapedArgList3 "${CTArg}") != $(CreateEscapedArgList3 "${_RETURN[${ECnt}]:-}") ]"
Error+=1
fi
ECnt+=1
done
if [[ ${#Expected_RETURN[@]} -lt ${#_RETURN[@]} ]]; then
ErrorOut ${_FUNCDEPTH} "$(CreateEscapedArgList3 "${_FUNCNAME}" "${FuncArgs[@]}")" \
" Too many return elements Got ${#_RETURN[@]} expected ${ECnt}"
Error+=1
fi
[[ ${Error} -eq 0 ]] || ErrorCnt+=1
shift
done
DebugOut ${_FUNCDEPTH} "${_FUNCNAME} Test Cases=${TestCnt} Fail=${ErrorCnt}"
return ${ErrorCnt}
}
function time_test_func {
local TIMEFORMAT=$'%3lR'
local IterrationCnt=1000
printf " # %-4d x { %-60s } took " "${IterrationCnt}" "$(CreateDQuotedArgListMinimal "${@}")"
(time for (( i=0 ; i <${IterrationCnt}; i++ )); do "${@}" >/dev/null || true ; done )
}
#########################################################################
# Module Argument Handling
#########################################################################
function Set_TesterFuncs_Flags {
local -i PCnt=0
while [ $# -gt 0 ] ; do
case "${1}" in
--Usage)
if [ $PCnt -eq 0 ]; then
ConsoleStdoutN ""
#ConsoleStdout "I -h --help "
#ConsoleStdout "I $(gettext "Display This message") "
fi
break
;;
--SupportedOptions)
[ ${PCnt} -eq 0 ] && ConsoleStdoutN ""
break
;;
*)
break
;;
esac
let PCnt+=1
shift
done
return ${PCnt}
}
#########################################################################
# Required Packages
#########################################################################
#push_element RequiredDebianPackages <Package Name> ...
#push_element RequiredRpmPackages <Package Name> ...
#push_element RequiredGentooPackages <Package Name> ...
#push_element RequiredSolarisPackages <Package Name> ...
#push_element RequiredFreeBsdPackages <Package Name> ...
#push_element RequiredSusePackages <Package Name> ...
TesterFuncsRevision=$(CleanRevision '$Revision: 64 $')
TesterFuncsDescription=''
push_element ScriptsLoaded "TesterFuncs.sh;${TesterFuncsRevision};${TesterFuncsDescription}"
fi
if [ -n "${__GenFuncs_sh_Loaded_-}" -a "${SBaseName2}" = "TesterFuncs.sh" ]; then
ScriptRevision="${TesterFuncsRevision}"
#########################################################################
# Usage
#########################################################################
function Usage {
ConsoleStdout "."
ConsoleStdout "+=============================================================================="
ConsoleStdout "I ${SBaseName2} ................................................... ${ScriptRevision}"
ConsoleStdout "+=============================================================================="
ConsoleStdout "I "
ConsoleStdout "I $(gettext "Description"): "
ConsoleStdout "I $(gettext "Please Enter a program description here") "
ConsoleStdout "I "
ConsoleStdout "I $(gettext "Usage"): "
UsageCommon
ConsoleStdout "I "
ConsoleStdout "I "
sNormalExit 0
}
SetLogFileName "&1"
sLogOut "${0}" "${@}"
#########################################################################
# Argument Processing
#########################################################################
push_element ModulesArgHandlers "Set_TesterFuncs_Flags" "Set_TesterFuncs_exec_Flags"
#push_element SupportedCLIOptions
function Set_TesterFuncs_exec_Flags {
local -i PCnt=0
while [ $# -gt 0 ] ; do
case "${1}" in
--Usage)
if [ $PCnt -eq 0 ]; then
ConsoleStdoutN ""
#ConsoleStdout "I -h --help "
#ConsoleStdout "I $(gettext "Display This message") "
fi
break
;;
--SupportedOptions)
[ ${PCnt} -eq 0 ] && ConsoleStdoutN ""
break
;;
-*)
sError_Exit 4 "$(gettext "Unsupported option") \"${1}\" "
;;
*)
break
;;
esac
let PCnt+=1
shift
done
return ${PCnt}
}
#MainOptionArg ArgFiles "${@}"
MainOptionArg "" "${@}"
#########################################################################
# MAIN PROGRAM
#########################################################################
echo "###############################################"
echo "# ${SBaseName2} $(gettext "Test Module")"
echo "###############################################"
sNormalExit 0
fi
| true
|
5db40d5ae423e5c1f5fdd339d29438237cbdcad2
|
Shell
|
SLBogach/services
|
/setup.sh
|
UTF-8
| 2,004
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#COLORS
BLUE='\033[36m'
WHITE='\033[0m'
RESET='\033[0m'
FONBLUE='\033[46m'
#STARTING PROJECT
echo -e "$BLUE ===================================== $RESET"
echo -e "$BLUE =========FT_SERVICES PROJECT========= $RESET"
echo -e "$BLUE ===================================== $RESET"
echo -e "$BLUE Starting minikube... $RESET"
minikube start --driver=virtualbox --memory='3000' --disk-size 5000MB
echo -e "$BLUE Enabling metallb... $RESET"
minikube addons list
minikube addons enable metallb
echo -e "$FONBLUE $WHITE minikube with metallb sucessfully installed $RESET"
echo -e "$BLUE Go to environment... $RESET"
eval $(minikube docker-env)
echo -e "$FONBLUE $WHITE OK $RESET"
echo -e "$BLUE Starting to build docker images...$RESET"
docker build -t service_phpmyadmin ./srcs/phpmyadmin
echo -e "$BLUE phpmyadmin sucessfully build $RESET"
docker build -t service_nginx ./srcs/nginx
echo -e "$BLUE nginx sucessfully build $RESET"
docker build -t service_wordpress ./srcs/wordpress
echo -e "$BLUE wp sucessfully build $RESET"
docker build -t service_mysql ./srcs/mysql
echo -e "$BLUE mysql sucessfully build $RESET"
docker build -t service_influxdb ./srcs/influxdb
echo -e "$BLUE influxdb sucessfully build $RESET"
docker build -t service_ftps ./srcs/ftps
echo -e "$BLUE grafana sucessfully build $RESET"
docker build -t service_grafana ./srcs/grafana
echo -e "$FONBLUE $WHITE all images sucessfully created $RESET"
echo -e "$BLUE Applying yaml settings... $RESET"
kubectl apply -f srcs/metallb.yaml
kubectl apply -f srcs/nginx.yaml
kubectl apply -f srcs/wordpress.yaml
kubectl apply -f srcs/mysql_pv.yaml
kubectl apply -f srcs/mysql.yaml
kubectl apply -f srcs/phpmyadmin.yaml
kubectl apply -f srcs/influxdb_pv.yaml
kubectl apply -f srcs/influxdb.yaml
kubectl apply -f srcs/ftps.yaml
kubectl apply -f srcs/grafana.yaml
kubectl apply -f srcs/telegraf.yaml
echo -e "$FONBLUE $WHITE Applying yaml settings sucessfully done $RESET"
echo -e "$BLUE Enabling dashboard... $RESET"
minikube dashboard
| true
|
242f63caee99ffa43ae961dc90d1123286321bbb
|
Shell
|
jcm300/DotFiles
|
/arch_linux_installation/install.sh
|
UTF-8
| 3,492
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
klayout=pt-latin9
font=Lat2-Terminus16
wifi_interface=wlp2s0
country=Portugal
timezone=Europe/Lisbon
locale=pt_PT.UTF-8
hostname=asusS410UN
username=jcm300
cpu=intel #or amd
DIR="$(cd "$(dirname "$0")" && pwd)"
escape_text_for_sed(){
text="$1"
# escape all backslashes first
text="${text//\\/\\\\}"
# escape slashes
text="${text//\//\\/}"
# escape asterisks
text="${text//\*/\\*}"
# escape full stops
text="${text//./\\.}"
# escape [ and ]
text="${text//\[/\\[}"
text="${text//\[/\\]}"
# escape ^ and $
text="${text//^/\\^}"
text="${text//\$/\\\$}"
echo "$text"
}
#Fix r8822be wifi problem
modprobe -rv r8822be
modprobe -v r8822be aspm=0
#Set keyboard layout
loadkeys $klayout
setfont $font
#Connect to the internet via wireless
wifi-menu -o $wifi_interface
#Update/Sync system clock
timedatectl set-ntp true
#Partitioning
sfdisk /dev/sda < $DIR/sda.sfdisk
#Formating the partitions
mkfs.fat -F32 /dev/sda1
mkfs.ext4 /dev/sda2
#Mounting the partitions
mount /dev/sda2 /mnt
mkdir -p /mnt/boot
mount /dev/sda1 /mnt/boot
#Enable Mirrors (put $country mirrors at first)
mirrorfile="/etc/pacman.d/mirrorlist"
servers="$(grep -A 1 "$country" $mirrorfile)"
servers="$(echo "$servers" | sed '/--/d')"
while read -r line; do
line="$(escape_text_for_sed "$line")"
sed "/$line/d" -i $mirrorfile
done <<< "$servers"
echo "$(head -n 6 $mirrorfile)" $'\n\n'"$servers" "$(tail -n +6 $mirrorfile)" > $mirrorfile
#Install base packages
pacstrap /mnt base base-devel
#fstab
genfstab -U /mnt >> /mnt/etc/fstab
#time zone
arch-chroot /mnt ln -sf /usr/share/zoneinfo/$timezone /etc/localtime
arch-chroot /mnt hwclock --systohc --utc
#localization
arch-chroot /mnt sed -i "s/#$locale\(.*\)/$locale\1/" /etc/locale.gen
arch-chroot /mnt locale-gen
arch-chroot /mnt echo "KEYMAP=$klayout"$'\n'"FONT=$font" > /mnt/etc/vconsole.conf
arch-chroot /mnt echo "LANG=$locale" > /mnt/etc/locale.conf
#Network configuration
arch-chroot /mnt echo "$hostname" > /mnt/etc/hostname
arch-chroot /mnt echo "127.0.0.1 localhost"$'\n'"::1 localhost"$'\n'"127.0.1.1 $hostname.localdomain $hostname" > /mnt/etc/hosts
#Initramfs
arch-chroot /mnt mkinitcpio -p linux
#Set root password
echo "Set root password:"
arch-chroot /mnt passwd
#Add a new user
arch-chroot /mnt useradd -m -g users -G wheel -s /usr/bin/zsh $username
#Set password to new user
echo "Set $username password:"
arch-chroot /mnt passwd $username
#Install some necessary packages
arch-chroot /mnt sudo pacman -S --noconfirm zsh vim networkmanager ntfs-3g
#Allow members of group wheel to execute any command
arch-chroot /mnt sed -i "s/# %wheel ALL=(ALL) ALL/%wheel ALL=(ALL) ALL/" /etc/sudoers
#Change default shell to zsh
arch-chroot /mnt chsh -s /usr/bin/zsh
#Bootloader Installation
arch-chroot /mnt bootctl install
arch-chroot /mnt sudo pacman -S --noconfirm $cpu-ucode
arch-chroot /mnt echo "title Arch Linux" > /mnt/boot/loader/entries/arch.conf
arch-chroot /mnt echo "linux /vmlinuz-linux" >> /mnt/boot/loader/entries/arch.conf
arch-chroot /mnt echo "initrd /$cpu-ucode.img" >> /mnt/boot/loader/entries/arch.conf
arch-chroot /mnt echo "initrd /initramfs-linux.img" >> /mnt/boot/loader/entries/arch.conf
arch-chroot /mnt echo "options root=/dev/sda2 rw" >> /mnt/boot/loader/entries/arch.conf
arch-chroot /mnt echo "default arch"$'\n'"timeout 4"$'\n'"editor 0" > /mnt/boot/loader/loader.conf
#Reboot
umount -R /mnt
reboot
| true
|
f67c8ab2fdccd5034a6c1611b8b00324020c5442
|
Shell
|
sweverett/Balrog-GalSim
|
/plots/ms_matcher
|
UTF-8
| 769
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Run: $chmod +x ms_matcher
in1=$1
in2=$2
out_1and2=$3
out_1not2=$4
out_2not1=$5
ra1=$6
dec1=$7
ra2=$8
dec2=$9
# Perform matches #
echo "Matching ..."
echo " "
/data/des30.a/data/kgrabow/stilts tmatch2 in1=${in1} ifmt1=fits in2=${in2} values1="${ra1} ${dec1}" values2="${ra2} ${dec2}" matcher=sky params=1.0 fixcols=all join=1and2 out=${out_1and2} ofmt=csv
/data/des30.a/data/kgrabow/stilts tmatch2 in1=${in1} ifmt1=fits in2=${in2} values1="${ra1} ${dec1}" values2="${ra2} ${dec2}" matcher=sky params=1.0 fixcols=all join=1not2 out=${out_1not2} ofmt=csv
/data/des30.a/data/kgrabow/stilts tmatch2 in1=${in1} ifmt1=fits in2=${in2} values1="${ra1} ${dec1}" values2="${ra2} ${dec2}" matcher=sky params=1.0 fixcols=all join=2not1 out=${out_2not1} ofmt=csv
| true
|
b18a52198c3c685aef39fedc5894970cd65d0bf9
|
Shell
|
m-lab/epoxy-images
|
/configs/stage3_ubuntu/opt/mlab/bin/write-metadata.sh
|
UTF-8
| 427
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# This script writes various pieces of metadata to files in a known location.
# This directory can be mounted into experiment pods so that the experiment can
# have some awareness of its environment. The experiment may optionally include
# this metadata in its test results.
METADATA_DIR=/var/local/metadata
mkdir -p $METADATA_DIR
# Write the kernel version
uname -r | tr -d '\n' > $METADATA_DIR/kernel-version
| true
|
cf3c4487c2b45fc26043db2cb4a4904327d78566
|
Shell
|
GlowMUCK/GlowMUCK
|
/src/upglow
|
UTF-8
| 279
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ ! -x glowmuck ]; then
echo "Need to compile glowmuck first."
exit 1
fi
echo "Moving glowmuck..."
rm -f ../game/glowmuck.old
mv -f ../game/glowmuck ../game/glowmuck.old
cp -f glowmuck ../game
echo "Restarting glowmuck..."
kill -USR1 `cat ../game/glowmuck.pid`
| true
|
418f8494e2ec9258626e9351c9482c72f4c38e17
|
Shell
|
pyhero/mogodbSlowAnalysis
|
/mongodb
|
UTF-8
| 703
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
BIN="/root/.pyenv/versions/3.5.2/bin/python3.5"
DIR=$(cd `dirname $0`;echo $PWD)
CONF="$DIR/mongodb.py"
PROG="mongodb"
PID_FILE=/tmp/${PROG}.pid
LOG_FILE=/tmp/${PROG}.log
case $1 in
start)
if [ -f $PID_FILE ];then
echo "Maybe already running...! Pid file is: $PID_FILE"
exit 2
fi
nohup $BIN $CONF > $LOG_FILE &
if [ $? -eq 0 ];then
PID=$(ps axu | grep 'python' | grep $PROG | grep -v grep | awk '{print $2}')
echo $PID > $PID_FILE
else
echo "Error"
fi
;;
stop)
if [ ! -f $PID_FILE ];then
echo "Maybe not run ! Pid file not find ($PID_FILE)"
exit 2
fi
kill $(cat $PID_FILE) && rm -rf $PID_FILE
;;
*)
echo "Usage:$0 {start|stop}"
esac
| true
|
09c1dab96cf746a00b0d18aea36a2088c60da413
|
Shell
|
clickbg/scripts
|
/Multiplatform/multiplatform_nfs_mon.sh
|
UTF-8
| 609
| 3.75
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/bash
# Author: Daniel Zhelev @ https://zhelev.biz
# USES BASH FUNCTIONALLITY RUN ONLY UNDER BASH
# Monitoring script for hung NFS shares
#
# Execution: Called by operator
NFS_MOUNTS=`grep nfs /etc/vfstab|awk '{print $3}'`
HAVE_NFS=`grep nfs /etc/vfstab | wc -l`
QUITCODE=0
if [ $HAVE_NFS -eq 0 ]
then
echo "No nfs mounts found. Lucky you."
exit 0
else
for nfs in $NFS_MOUNTS
do
read -t10 str < <(stat -t $nfs)
if [ $? -eq 1 ]
then
echo "WARNING: NFS $nfs seems to be hanged. Check with ls -al $nfs"
QUITCODE=1
else
echo "OK: NFS $nfs is accessible."
fi
done
fi
exit $QUITCODE
| true
|
888070b059665370cd63bfe90816c4774a262ba3
|
Shell
|
inferiorhumanorgans/aomi
|
/tests/integration/smoke.bats
|
UTF-8
| 1,955
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
# -*- mode: Shell-script;bash -*-
VAULT_LOG="${BATS_TMPDIR}/aomi-vault-log"
setup() {
if [ -e "${HOME}/.vault-token" ] ; then
mv "${HOME}/.vault-token" "${BATS_TMPDIR}/og-token"
fi
nohup vault server -dev &> "$VAULT_LOG" &
VAULT_PID=$!
export VAULT_ADDR='http://127.0.0.1:8200'
VAULT_TOKEN=$(grep -e 'Root Token' "$VAULT_LOG" | cut -f 3 -d ' ')
export VAULT_TOKEN
FIXTURE_DIR="${BATS_TMPDIR}/fixtures"
mkdir -p "${FIXTURE_DIR}/.secrets"
cp -r "${BATS_TEST_DIRNAME}"/fixtures/generic/* "$FIXTURE_DIR"
cd "$FIXTURE_DIR"
echo -n "$RANDOM" > "${FIXTURE_DIR}/.secrets/secret.txt"
echo -n "secret: ${RANDOM}" > "${FIXTURE_DIR}/.secrets/secret.yml"
echo ".secrets" > "${FIXTURE_DIR}/.gitignore"
}
teardown() {
if [ -e "${BATS_TMPDIR}/og-token" ] ; then
mv "${BATS_TMPDIR}/og-token" "${HOME}/.vault-token"
fi
kill $VAULT_PID
rm -f "$VAULT_LOG"
rm -rf "$FIXTURE_DIR"
}
@test "can seed and extract a file" {
run aomi seed
[ "$status" -eq 0 ]
run aomi extract_file foo/bar/baz/secret "${BATS_TMPDIR}/secret.txt"
[ "$status" -eq 0 ]
[ "$(cat ${BATS_TMPDIR}/secret.txt)" = "$(cat ${FIXTURE_DIR}/.secrets/secret.txt)" ]
}
@test "can seed and render environment" {
SECRET=$(shyaml get-value secret < ${FIXTURE_DIR}/.secrets/secret.yml)
run aomi seed
[ "$status" -eq 0 ]
run aomi environment foo/bar/bam
[ "$output" = "FOO_BAR_BAM_SECRET=\"${SECRET}\"" ]
run aomi environment foo/bar/bam --prefix aaa
[ "$output" = "AAA_SECRET=\"${SECRET}\"" ]
run aomi environment foo/bar/bam --export
[ "${lines[0]}" = "FOO_BAR_BAM_SECRET=\"${SECRET}\"" ]
[ "${lines[1]}" = "export FOO_BAR_BAM_SECRET" ]
}
@test "respects tags when seeding" {
run aomi seed --tags bar
[ "$status" -eq 0 ]
run vault read foo/bar/bam
[ "$status" -eq 1 ]
run vault read foo/bar/baz
[ "$status" -eq 0 ]
}
| true
|
4004b2a71059791460147964f9a3be22f7d02bf7
|
Shell
|
keefehuang/preCICE-2DHeatSolver
|
/Python2D/building/elastictube_activate.sh
|
UTF-8
| 677
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
source ./precice_config.sh
############
export PRECICE_ROOT
export ANACONDA_ROOT
export ELASTICTUBE_ROOT
export SCONS_PARALLELJOBS
export PRECICE_MPI_IMPLEMENTATION
CONDA_ENV=precice_tube
CONDA_ENV_ROOT=$CONDA_PREFIX
export PKG_CONFIG_PATH=$CONDA_ENV_ROOT/lib/pkgconfig:$PKG_CONFIG_PATH
export LD_LIBRARY_PATH=$CONDA_ENV_ROOT/lib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PRECICE_ROOT/build/last
export PATH=$PRECICE_ROOT/bin:$ANACONDA_ROOT/bin:$PATH
############
if [ -z $PRECICE_ROOT ]; then
echo "please define PRECICE_ROOT"
fi
if [ -z $ANACONDA_ROOT ]; then
echo "please define ANACONDA_ROOT"
fi
source $ANACONDA_ROOT/bin/activate precice_tube
| true
|
e68ca35c9d63deb7a62d9e1121bd3b47daa43da2
|
Shell
|
crocokyle/misc-scripts
|
/hadoop/ramdisktest/copy2backup.sh
|
UTF-8
| 370
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
#need to be done by normal user.
if [ -e $1 ]
then
echo $1 "already exist"
exit
fi
docluster mkdir $1
time cp -a /mnt/ramdisk1/hdfs $1 &
time ssh 10.0.0.73 cp -a /mnt/ramdisk1/hdfs $1 &
time ssh 10.0.0.74 cp -a /mnt/ramdisk1/hdfs $1 &
time ssh 10.0.0.75 cp -a /mnt/ramdisk1/hdfs $1 &
time ssh 10.0.0.76 cp -a /mnt/ramdisk1/hdfs $1 &
#docluster sync
| true
|
2155d87080a8d799ca78460cf27f4443c94331bb
|
Shell
|
lenaschimmel/mysql-cnf-graph
|
/mysql-cnf-graph
|
UTF-8
| 2,578
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Output the README.md for conveiniance. Prepend it with slashes so that dot will ignore it
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ -f $SCRIPTDIR/README.md ]; then
while IFS='' read -r line || [[ -n "$line" ]]; do
if [[ $line = "## Example" ]]; then
break
fi
echo "// $line"
done < "$SCRIPTDIR/README.md"
else
echo "// See https://github.com/lenaschimmel/mysql-cnf-graph/ for help."
fi
echo
function processFile {
if [ -f $1 ]; then
echo " "\"$FILE\" [href=\"file://$1\"]; # node for existing file
# We need to grep for include and includedir at the same time to preserve ordering
FILES=`grep "\!include" $1`
local I=0
for FILE in $FILES; do
if [[ $FILE = "!include" ]]; then
MODE="file"
else
if [[ $FILE = "!includedir" ]]; then
MODE="dir"
else
((I++))
# mode should have been set by a previous line
echo " \"$1\" -> "\"$FILE\" [label=\"\(order $I\)\"]; # link file -> (file or dir)
if [[ $MODE = "file" ]]; then
processFile $FILE
else
processDir $FILE
fi
fi
fi
done
for DIR in $DIRS; do
if [[ ! $DIR = "!includedir" ]]; then
echo " \"$1\" -> "\"$DIR\"; # link file -> dir
processDir $DIR
fi
done
else
echo " "\"$1\" [style=dotted]; # node for missing file
fi
}
function processDir {
if [ -d $1 ]; then
echo " "\"$1\" [shape=box]; # node for existing dir
FILES=`find $1 -type f -name "*.cnf"`
for FILE in $FILES; do
echo " \"$1\" -> "\"$FILE\"; # link dir -> file
processFile $FILE
done
else
echo " "\"$1\" [style=dotted, shape=box]; # node for missing dir
fi
}
ROOTS=`mysqld --verbose --help 2> /dev/null | grep -A 1 "Default options" | tail -n 1`
echo "digraph mysql {"
echo " "ROOT [shape=diamond]; # node for root
I=0
for FILE in $ROOTS; do
((I++))
echo " ROOT -> "\"$FILE\" [label=\"\(order $I\)\"]; # link root -> file
processFile $FILE
done
echo "}"
| true
|
1faa6dcde6c364971bd0af43e46f33a1f78a3e4a
|
Shell
|
peggles2/sba-eli
|
/config/deploy.sh
|
UTF-8
| 1,218
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -ex
echo 'export TAG=$(echo ${CIRCLE_SHA1} | head -c 8)' >> $BASH_ENV
echo 'export BRANCH=$(echo ${CIRCLE_BRANCH} | sed -r 's/[_]+/-/g')' >> $BASH_ENV
echo 'export DATE=$(date '+%Y-%m-%d')' >> $BASH_ENV
source $BASH_ENV
function createCluster() {
aws ecs create-cluster --cluster ${BRANCH}
}
function createService() {
ecs-cli compose --project-name ${BRANCH} --ecs-params config/ecs-params.yml \
--file docker-compose-aws.yml service up --launch-type FARGATE --create-log-groups \
--cluster ${BRANCH} --timeout 15
}
function updateDns() {
IP=$(ecs-cli ps --cluster ${BRANCH} | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b" | head -n 1)
cat > change-batch.json << EOF
{
"Comment": "change batch request on ${DATE}",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "${BRANCH}.${DOMAIN}",
"Type": "A",
"TTL": 60,
"ResourceRecords": [
{
"Value":"${IP}"
}
]
}
}
]
}
EOF
aws route53 change-resource-record-sets --hosted-zone-id ${HOSTED_ZONE_ID} \
--change-batch file://change-batch.json
}
createCluster
createService
sleep 180
updateDns
| true
|
6653570336e016644701c2949b767887f05e220f
|
Shell
|
johnstyle/.jscripts
|
/bash/utils/install/apache/InstallWebsite.sh
|
UTF-8
| 8,159
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
reset='\033[0m'
black='\033[0;30m'
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
blue='\033[0;34m'
purple='\033[0;35m'
cyan='\033[0;36m'
white='\033[0;37m'
if [ "$(whoami)" = "root" ]; then
echo -en "Nom de domaine : "
read website
if [ "${website}" ]; then
defaultUser=${website%.*}
defaultUser=${defaultUser/./_}
defaultUser=${defaultUser/-/_}
echo -en "Nom d'utilisateur (${defaultUser}) : "
read user
if [ ! "${user}" ]; then
user=${defaultUser}
fi
pathHome="/home/${user}"
pathGit="/home/git/repositories/websites/${user}"
if [ ! -d "${pathHome}" ]; then
# Création de l'utilisateur
# ------------------------------
useradd ${user}
if id -u ${user} >/dev/null 2>&1; then
echo -e "${green} - - - Création de l'utilisateur${reset}"
else
echo -e "${red} - - - Erreur lors de la création de l'utilisateur${reset}"
fi
# Création de l'acces FTP
# ------------------------------
echo -en "Créer un acces FTP ? [y/n] "
read useFtp
if [ "${useFtp}" = "y" ]; then
passwd ${user}
fi
# Création du projet GIT
# ------------------------------
echo -en "Créer un projet Git ? [y/n] "
read useGit
if [ "${useGit}" = "y" ]; then
if [ ! -d "${pathGit}" ]; then
mkdir ${pathGit}
if [ -d "${pathGit}" ]; then
cd ${pathGit}
git init --bare
chown -R git:git ${pathGit}
echo -e "${green} - - - Création du projet Git${reset}"
else
echo -e "${red} - - - Erreur lors de la création du projet Git${reset}"
fi
fi
# Clone du projet GIT
# ------------------------------
if [ -d "${pathGit}" ]; then
cd /home
echo -e "${purple}"
git clone ${pathGit}
echo -e "${reset}"
echo -e "${green} - - - Clonage du projet Git${reset}"
fi
# Premier commit
# ------------------------------
if [ ! -f "${pathHome}/.gitignore" ]; then
printf "# Numerous always-ignore extensions
*.diff
*.err
*.orig
*.log
*.rej
*.swo
*.swp
*.zip
*.vi
*~
*.sass-cache
# OS or Editor folders
.DS_Store
._*
Thumbs.db
.cache
.project
.settings
.tmproj
*.esproj
nbproject
*.sublime-project
*.sublime-workspace
# Komodo
*.komodoproject
.komodotools
# Folders to ignore
.hg
.svn
.CVS
.idea
node_modules
dist
# Home
.bash_history
.mysql_history
conf
tmp
logs
log
cache
" > ${pathHome}/.gitignore
chown ${user}:${user} ${pathHome}/.gitignore
if [ -f "${pathHome}/.gitignore" ]; then
cd ${pathHome}
echo -e "${purple}"
git add .
git commit -m "Mise en place du site internet"
git tag v1.0.0
git push --tags origin master
echo -e "${reset}"
echo -e "${green} - - - Premier commit Git${reset}"
else
echo -e "${red} - - - Erreur lors du premier commit Git${reset}"
fi
fi
fi
# Création du dossier logs
# ------------------------------
if [ ! -d "${pathHome}/logs" ]; then
mkdir "${pathHome}/logs"
if [ "${pathHome}/logs" ]; then
echo -e "${green} - - - Création du dossier logs${reset}"
else
echo -e "${red} - - - Erreur lors de la création du dossier logs${reset}"
fi
fi
# Création du dossier www
# ------------------------------
if [ ! -d "${pathHome}/www" ]; then
mkdir "${pathHome}/www"
if [ "${pathHome}/www" ]; then
echo -e "${green} - - - Création du dossier www${reset}"
else
echo -e "${red} - - - Erreur lors de la création du dossier www${reset}"
fi
fi
# Création du dossier tmp
# ------------------------------
if [ ! -d "${pathHome}/tmp" ]; then
mkdir "${pathHome}/tmp"
if [ "${pathHome}/tmp" ]; then
echo -e "${green} - - - Création du dossier tmp${reset}"
else
echo -e "${red} - - - Erreur lors de la création du dossier tmp${reset}"
fi
fi
# Création du dossier conf
# ------------------------------
if [ ! -d "${pathHome}/conf" ]; then
mkdir "${pathHome}/conf"
if [ "${pathHome}/conf" ]; then
echo -e "${green} - - - Création du dossier conf${reset}"
else
echo -e "${red} - - - Erreur lors de la création du dossier conf${reset}"
fi
fi
# Configuration des droits sur les dossiers
# ------------------------------
chmod 701 ${pathHome}
chmod 705 ${pathHome}/www
chmod 701 ${pathHome}/tmp
chmod 701 ${pathHome}/conf
chmod 600 ${pathHome}/logs
if [ "${useGit}" = "y" ]; then
chmod -R 600 ${pathHome}/.git
chmod 600 ${pathHome}/.gitignore
fi
chown -R ${user}:${user} ${pathHome}
chown -R root:root ${pathHome}/logs
chown -R root:root ${pathHome}/conf
if [ "${useGit}" = "y" ]; then
chown -R root:root ${pathHome}/.git
chown -R root:root ${pathHome}/.gitignore
fi
# Création du Vhost
# ------------------------------
if [ ! -f "/etc/apache2/sites-enabled/${website}" ]; then
echo -e "${purple}"
a2dissite ${website}
echo -e "${reset}"
fi
printf "<VirtualHost *:80>
ServerAdmin contact@${website}
ServerName www.${website}
ServerAlias ${website}
DocumentRoot ${pathHome}/www/
SuExecUserGroup ${user} ${user}
<Directory ${pathHome}/www/>
Options -Indexes FollowSymLinks MultiViews
AllowOverride All
Order Deny,Allow
</Directory>
ErrorLog ${pathHome}/logs/error.log
LogLevel warn
CustomLog ${pathHome}/logs/access.log combined
</VirtualHost>
" > /etc/apache2/sites-available/${website}
if [ -f "/etc/apache2/sites-available/${website}" ]; then
echo -e "${purple}"
a2ensite ${website}
service apache2 restart
echo -e "${reset}"
if [ -f "/etc/apache2/sites-enabled/${website}" ]; then
echo -e "${green} - - - Activation du site${reset}"
else
echo -e "${red} - - - Erreur lors de l'activation du site${reset}"
fi
else
echo -e "${red} - - - Erreur lors de la configuration du VirtualHost${reset}"
fi
# Création de la base MySql
# ------------------------------
echo -en "Créer une base de donnée MySql ? [y/n] "
read useMysql
if [ "${useMysql}" = "y" ]; then
echo -en "\nVeuillez saisir le mot de passe qui sera utilisé pour creer la base de donnée : "
stty -echo
read password
stty echo
if [ "${password}" ]; then
printf "\nMot de passe ROOT - "
mysql -u root -p -e "create database ${user}; grant usage on *.* to ${user}@localhost identified by '${password}'; grant all privileges on ${user}.* to ${user}@localhost;"
echo -e "${green} - - - Création de la base MySql${reset}"
fi
fi
cd ${pathHome}
echo -e "${green} - - - ${website} est installé !${reset}"
else
echo -e "${red} - - - Cet utilisateur existe déjà${reset}"
fi
else
echo -e "${red} - - - Veuillez renseigner un nom de domaine${reset}"
fi
else
echo -e "${red} - - - Vous devez être en ROOT${reset}"
fi
| true
|
eb00876f69a2f831a55af9497cf671345ff269a6
|
Shell
|
secsecsec/spacefish
|
/tests/fileops/runtest.sh
|
UTF-8
| 229
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$1" ]
then
echo "No argument supplied"
exit 1
fi
PREFIX="LD_PRELOAD=../../bin/libufs.so LD_LIBRARY_PATH=../../bin/"
echo "ORIGINAL $1"
eval "./$1"
echo "USERLEVEL $1"
eval "$PREFIX ./$1"
| true
|
b8da5fbff8530356e3c502aa898128c5d623f6ff
|
Shell
|
pnnlhep/osg-compute
|
/drain.sh
|
UTF-8
| 370
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
touch /var/lib/dirac_drain
echo START=UNDEFINED > /etc/condor/config.d/00shutdown
kill -HUP $(cat /var/run/condor_master.pid)
while true; do
L=$(ps -o pid --no-headers --ppid $(cat /var/run/condor_master.pid) | while read x; do ps -o pid --no-headers --ppid $x; done | wc -l)
[ $L -le 0 ] && break
sleep 1
done
condor_off
condor_off -daemon master
| true
|
92919308392e7e3b9bef9b7d576dba021e018375
|
Shell
|
anyks/backups
|
/anyks/modules/scp.sh
|
UTF-8
| 2,135
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# author: Forman
# skype: efrantick
# phone: +7(920)672-33-22
#
# Выводим сообщение о существовании модуля
print_log "${c_yellow}Module ${module} - for backup remote files${c_nc}"
# Считываем параметры из конфигурационного файла
config_params="user password"
# Извлекаем данные из конфигурационного файла
source ${confsh}
# Выводим сообщение
print_log "Assemble archives"
{
# Копирование файлов со сторонних серверов
# Текущие переменные
ldir="/usr/local"
setc="etc"
swww="www"
retc="usr_local_etc"
rwww="usr_local_www"
# Приводим в нормальный вид переменные из конфига
user=$(eval echo \${${module}_user})
password=$(eval echo \${${module}_password})
# Получаем конфигурационный файл доменов
domains_list=${root}/anyks/conf/${module}_domains
# Проверяем на существование файла
if [ -f ${domains_list} ]; then
# Переходим по всем доменам
for domain in $(cat $domains_list)
do
# Если домен существует
if [ "${domain}" != "" ]; then
# Выполняем копирование конфигов
sshpass -p "${password}" scp -o StrictHostKeyChecking=no -r ${user}@${domain}:/${setc} ${img}/${domain}_${setc}
sshpass -p "${password}" scp -o StrictHostKeyChecking=no -r ${user}@${domain}:${ldir}/${setc} ${img}/${domain}_${retc}
sshpass -p "${password}" scp -o StrictHostKeyChecking=no -r ${user}@${domain}:${ldir}/${swww} ${img}/${domain}_${rwww}
compressFile ${img} ${domain}_${setc} ${img} ${domain}_${setc}
compressFile ${img} ${domain}_${retc} ${img} ${domain}_${retc}
compressFile ${img} ${domain}_${rwww} ${img} ${domain}_${rwww}
rm -rf ${img}/${domain}_${setc}
rm -rf ${img}/${domain}_${retc}
rm -rf ${img}/${domain}_${rwww}
fi
done
fi
} 2>&1 | tee ${log}/${module}_${date}.log
| true
|
60f51d70fbdee3ce67216ab5d1ff6948b25554c4
|
Shell
|
lingo/scripts
|
/svnwhere
|
UTF-8
| 122
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "x$1" != "x" ]; then
dir="$1"
else
dir="$PWD"
fi
(cd "$dir"; svn info | grep URL: | awk '{print $2}' )
| true
|
3decfc6e2b3f8e6f8c0f1aec5f49db6fd2ada446
|
Shell
|
YenHaoChen/my-rm
|
/my_rm.sh
|
UTF-8
| 4,282
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
# by vegetablebird last modified 2020.08.21
# USAGE: (1) ln -s $PWD/my_rm.sh ~/.local/bin/rm
# (2) export PATH=~/.local/bin:$PATH
# (3) which rm
#FEATURE: Try to create a new trash directory for each rm command
# but not guaranteed if two rm commands is committed at a same time
#FEATURE: If two files(directores) are removed with same name by a single rm command,
# they will be put into different trash directories
#FEATURE: There is a log_file in the corresponding trash directory,
# unless removing a file with the same name as log_file (without warning message throwing)
#Bug1: not support space. e.g.: rm -rf arch\ final\ project/
#Bug2: mv: cannot move ‘../results/My-mix33/m5out/’ to ‘/home/vegetablebird/.trash/2017-08-06/21:03:15.99453/m5out’: Directory not empty // IGNORE??
#TODO: /home/vegetablebird/.local/bin/rm: unimplemented arg: -fr, use /bin/rm
#TODO: log_file=original_command -> log_file=rm_cmd (cmd, remove_command)
#TODO: add undo command (restore removed data)
ori_rm=/bin/rm
trash_dir=/home/vegetablebird/.trash
preserve_days=90
log_file=original_command
function actually_rm
{
if [ "`ps -A | grep rm | wc -l`" -gt "2" ] || [ -f $trash_dir/rming ]
then
# echo $0: rm is running, skipping actually_rm
# ps -A | grep rm
exit
fi
touch $trash_dir/rming
cur_year=`date +%Y`
cur_month=`date +%m`
cur_day=`date +%d`
cd $trash_dir
for f in `ls -d * | grep '[0-9]\+-[0-9]\+-[0-9]\+'`
do
file_year=`echo $f | sed 's/\([0-9]\+\)-[0-9]\+-[0-9]\+/\1/g'`
file_month=`echo $f | sed 's/[0-9]\+-\([0-9]\+\)-[0-9]\+/\1/g'`
file_day=`echo $f | sed 's/[0-9]\+-[0-9]\+-\([0-9]\+\)/\1/g'`
diff_year=$(( 10#$cur_year - 10#$file_year ))
diff_month=$(( 12*$diff_year + 10#$cur_month - 10#$file_month ))
diff_day=$(( 30*$diff_month + 10#$cur_day - 10#$file_day ))
if [ "$diff_day" -gt "$preserve_days" ]
then
$ori_rm -rf $f
fi
done
$ori_rm $trash_dir/rming
}
force=false
recursive=false
files=""
directories=""
not_exists=""
for arg in $@
do
if [[ $arg == -* ]]
then
if [ "$arg" == "-h" ] || [ "$arg" == "-help" ] || [ "$arg" == "--help" ]
then
echo -e "Usage: $0 [OPTION]... FILE..."
echo -e "Move the FILE(s) to trash_dir($trash_dir)."
echo -e "Check and Delete the files in trash_dir longer than $preserve_days days."
echo
echo -e " -f, --force\t\tignore nonexistent files and arguments"
echo -e " -r, -R, --recursive\tremove directories and their contents recursively"
echo -e " -h, -help, --help\tdisplay this help and exit"
echo -e "\t\t\tuse '$ori_rm --help' for the conventional rm help"
echo
echo -e "Created by vegetablebird"
exit 0
elif [ "$arg" == "-f" ] || [ "$arg" == "--force" ]
then
force=true
elif [ "$arg" == "-r" ] || [ "$arg" == "-R" ] || [ "$arg" == "--recursive" ]
then
recursive=true
elif [ "$arg" == "-rf" ]
then
force=true
recursive=true
else
echo "$0: unimplemented arg: $arg, use $ori_rm"
$ori_rm $@
exit $?
fi
elif [ -f "$arg" ] || [ -L "$arg" ]
then
files="$files $arg"
elif [ -d "$arg" ]
then
directories="$directories $arg"
else
not_exists="$not_exists $arg"
fi
done
# Check the operands
if [ "$files" == "" ] && [ "$directories" == "" ] && [ "$not_exists" == "" ]
then
echo "$0: missing operand"
exit 1
fi
if [ "$force" = false ] && [ "$not_exists" != "" ]
then
for f in $not_exists
do
echo "$0: cannot remove '$f': No such file or directory"
done
exit 1
fi
if [ "$recursive" = false ] && [ "$directories" != "" ]
then
for f in $directories
do
echo "$0: cannot remove '$f': Is a directory"
done
exit 1
fi
# Move files to a directory in .trash
rm_dir=""
for f in $files $directories
do
filename=`echo $f | sed -e 's/^.*\/\([^\/]\+\)\/\?$/\1/g'`
if [ "$rm_dir" == "" ] || [ -f $rm_dir/$filename ] || [ -d $rm_dir/$filename ]
then
rm_dir=$trash_dir/`date +%F/%T.%5N`
if [ -f $rm_dir/$f ] || [ -d $rm_dir/$f ]
then
echo "$0: cannot find a valid trash directory"
exit 1
fi
mkdir -p $rm_dir
echo "$LOGNAME@$HOSTNAME:`pwd`$ $0 $@" >> $rm_dir/$log_file
fi
mv $f $rm_dir
if [ "$?" == "1" ]
then
echo "GET Bug2: mv: cannot move ‘...’ to ‘...’: Directory not empty"
fi
done
# Actually rm the old files
actually_rm &
exit 0
| true
|
af51c3a77ab7f9094567160ca13955b97a624e08
|
Shell
|
dqylyln/HouseMD
|
/bin/housemd
|
UTF-8
| 304
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
if [ -z $JAVA_HOME ];
then
echo "Please set JAVA_HOME to JDK 6+!"
exit 1
else
ROOT=`dirname "$0"`
if [ -f $JAVA_HOME/lib/tools.jar ];
then
BOOT_CLASSPATH=-Xbootclasspath/a:$JAVA_HOME/lib/tools.jar
fi
java $BOOT_CLASSPATH -jar $ROOT/housemd.jar "$@"
fi
| true
|
0bd77ec55b26d0751cb8caedd12122971ed00ea5
|
Shell
|
danij/Forum.WebClient
|
/docker/bootstrap.sh
|
UTF-8
| 679
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
HOSTNAME="$1"
mkdir /forum/config/Forum.WebClient
mkdir /forum/config/Forum.WebClient/doc
mkdir /forum/config/https
mkdir /forum/logs/nginx
cp -r /var/www-old/* /var/www
cp /var/www/html/config/config.js /forum/config/Forum.WebClient/config.js
sed -i 's#"google"#//"google"#' /forum/config/Forum.WebClient/config.js
sed -i "s#http://dani.forum:8080#https://$HOSTNAME#" /forum/config/Forum.WebClient/config.js
rm /var/www/html/config/config.js
ln -s /forum/config/Forum.WebClient/config.js /var/www/html/config/config.js
cp -a /var/www/html/doc/. /forum/config/Forum.WebClient/doc
rm -r /var/www/html/doc
ln -s /forum/config/Forum.WebClient/doc /var/www/html/doc
| true
|
6a4a4eaed13ed7479a230bf623740fa58b32712d
|
Shell
|
Gingeropolous/mos-cgi
|
/index.html
|
UTF-8
| 1,083
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Content-type: text/html"
echo ""
echo '<html>'
echo '<head>'
echo '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">'
echo '<title>Monerodo Operating System Web Interface</title>'
echo '</head>'
echo '<body>'
#make username variable global
export u="$USER" # New version attempts to use $USER instead of $u or bob
export current_ip="$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/')"
export help="Type 'back' to return to previous menu"
export FILEDIR=/home/$USER/$(grep -n 'filedir' /home/$USER/monerodo/conf_files/monerodo.index |cut -d"=" -f2)
export VERSION=$(grep -n 'version' /home/$USER/monerodo/conf_files/monerodo.index |cut -d"=" -f2)
######### Checks if this is first time running, forces change of password and other important settings
# Put into its own script 20160518
./first_time.sh
echo '<h3>Monerodo OS Main Menu</h3><br><br>'
echo '<a href="device_management.html">Device Management</a><br>'
echo 'More coming soon! Like, MiniNodo, pool page access, etc<br>'
echo '</body>'
echo '</html>'
| true
|
f8a58bfda50eefda51a6f2db84f46f04ce1fc29d
|
Shell
|
kapliy/hepcode
|
/ana/branches/dg/CommonAnalysis/RootCore/scripts/update.sh
|
UTF-8
| 1,924
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
source "`dirname $0`/preamble.sh" "$0"
VERSIONFILE="$1"
test "$VERSIONFILE" != "" && test \! -f $VERSIONFILE && echo did not find version file $VERSIONFILE && exit 2
function check_head {
test "`echo $1 | grep /tags/`" != "" && return 1
test "`echo $1 | grep /trunk/`" != "" && return 0
test "`echo $1 | grep /trunk$`" != "" && return 0
test "`echo $1 | grep /branches/`" != "" && return 0
exit 3
}
for pkg in $ROOTCOREDIR `cat $pkgfile`
do
if test \! -d $pkg/.svn
then
echo `basename $pkg` not in SVN
else
name=`basename $pkg`
old_tag=`grep svn+ssh $pkg/.svn/entries | grep /$name | head -n 1`
new_tag=""
test "$VERSIONFILE" != "" && new_tag=`grep /$name $VERSIONFILE`
test "$new_tag" != "" && new_tag=`$ROOTCOREDIR/scripts/svn_get_url.sh $new_tag $old_tag`
raw_tag=$new_tag
test "$new_tag" == "" && new_tag=$old_tag
if test "$old_tag" == ""
then
echo failed to read svn info for $name
elif check_head "$old_tag"
then
if test "$old_tag" == "$new_tag"
then
echo updating $name from SVN head
if (cd $pkg && (svn update || svn update))
then
true
else
echo failed to update $pkg
exit 4
fi
else
echo you currently have the head version of $name checked out
echo please check in your changes and then switch manually
echo cd $pkg
echo svn switch $new_tag
fi
echo
else
if test "$raw_tag" == ""
then
echo $name not in release, keeping at version `echo $new_tag | sed 's/.*\/tags\///'`
elif test "$old_tag" == "$new_tag"
then
echo $name already at version `echo $new_tag | sed 's/.*\/tags\///'`
else
echo $old_tag $new_tag
echo updating $name to version `echo $new_tag | sed 's/.*\/tags\///'`
if (cd $pkg && (svn switch "$new_tag" || svn switch "$new_tag"))
then
true
else
echo failed to update $pkg
exit 5
fi
echo
fi
fi
fi
done
| true
|
c56dc486df62857f01e64e407459f8d18e1c63a3
|
Shell
|
BenUtzmich/Nautilus-Caja-Scripte
|
/FEScriptsNautilus/zMORE/MAPtools/00_oneKMLfile_REFORMAT-FoldLines_sed.sh
|
UTF-8
| 5,912
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
##
## Nautilus
## SCRIPT: 00_oneKMLfile_REFORMAT-FoldLines_sed.sh
##
## PURPOSE: For a user-selected KML (Keyhole Markup Language) file,
## the file contents are reformatted via the 'sed' utility
## to make sure that the lines are not extra-long.
##
## Several types of character replacement are used:
## - each occurrence of the '>' character is replaced
## by that character followed by a line-feed
## - then each occurrence of a space character is
## replaced by a line-feed character
## - each occurrence of a comma character is replaced
## by a space character.
##
## The output of a pipeline of 'sed' commands is directed
## into a separate text file --- say, in the same directory
## as the input file.
##
## (An alternative is to put the output file into the /tmp
## directory. This might be advisable if the output files
## are typically huge.)
##
## NOTE:
## KML coordinate data is between <coordinates> and
## </coordinates> markup indicators and can often
## go on for thousands of characters in a single line.
## We especially want to 'fold' these extra-long lines
## into lines with 2 or 3 numeric values.
##
## HOW TO USE: In Nautilus, select a KML (text) file in a directory.
## (The selected file should NOT be a directory.)
## Right click and, from the 'Scripts >' submenus,
## choose to run this script (name above).
##
## Created: 2016nov06 Based on the 2010sep FE Nautilus script
## '06b_oneTextFile_CHG-STRING-WITHIN_2promptsFromTo_sed.sh'
## Changed: 2016nov09 Changed text in the 'zenity' popup.
## FOR TESTING: (show statements as they execute)
# set -x
############################
## Set the filename var.
############################
FILENAME="$1"
BASENAME=`basename $FILENAME`
SCRIPTDIRNAME=`dirname $0`
SCRIPTBASENAME=`basename $0`
############################################
## Show an informative message with 'zenity'.
############################################
zenity --info \
--title "KML file processor - REFORMAT - 'FOLD'" \
--text "\
This utility expects the file you chose:
$BASENAME
to be a KML (Keyhole Markup Language) file. This script
scans the file contents with the 'sed' utility and
reformats the data into a separate text file.
The coordinate data is between <coordinate> and
</coordinates> markup indicators.
The output file will include some miscellaneous 'markup' and
data lines. The main intent of this utility is to make a
file that is easily edited to contain only the coordinate
data --- along with a comment line or two at the top that
may incorporate some description data, like <name> data.
A major purpose of this utility is to 'fold' extra-long
lines of coordinate data into short lines containing
only 2 (or 3) coordinate numbers.
The selected file is processed via 'sed' and the 'sed' output
is put in a file with a string like '_REFORMATTED'
appended to the midname of the selected file.
The output file is put in the same directory with the
original selected file.
This script is:
$SCRIPTBASENAME
in directory
$SCRIPTDIRNAME" &
###############################################
## Exit if the selected file is a directory.
###############################################
if test -d "$FILENAME"
then
exit
fi
####################################################
## Get the file extension and check that it is not
## blank. Skip the filename if it has no extension.
## (Assumes one '.' in filename, at the extension.)
####################################################
FILEEXT=`echo "$FILENAME" | cut -d\. -f2`
if test "$FILEEXT" = ""
then
exit
fi
####################################################
## Exit if the file extension is not 'kml'.
## COMMENTED, for now.
####################################################
# if test "$FILEEXT" != "kml"
# then
# exit
# fi
####################################################
## Get the 'midname' of the file, the part before
## the period and the extension.
####################################################
MIDNAME=`echo "$FILENAME" | cut -d\. -f1`
###################################
## Make the output filename.
###################################
OUTNAME="${MIDNAME}_REFORMATTED.$FILEEXT"
rm -f "$OUTNAME"
#######################################################
## Use 'sed' to make the new output file.
#######################################################
## Could try running the command in a window,
## to see err msgs, if any.
## Could use zenity to offer this as an option.
##
## xterm -fg white -bg black -hold -e \
#######################################################
## Each of the 'sed' statements in the pipe below do:
## 1) each occurrence of the '>' character is replaced
## by that character followed by a line-feed
## 2) remove leading spaces from each line
## 3) replace each '</' occurrence with those 2 chars
## preceded by a line-feed
## 4) then each occurrence of one or more space characters
## is replaced by one space character
## 5) each occurrence of a comma character is replaced
## by a space character.
#######################################################
## FOR TESTING:
# set -x
sed -e "s|>|>\n|g" "$FILENAME" | \
sed -e "s|^ *||g" | \
sed -e "s|</|\n</|g" | \
sed -e "s| *| |g" | \
sed -e "s| |\n|g" | \
sed -e "s|,| |g" > "$OUTNAME"
## FOR TESTING:
# set -
###################################
## Show the output file.
###################################
## . $HOME/.gnome2/nautilus-scripts/.set_VIEWERvars.shi
. $HOME/.freedomenv/feNautilusScripts/set_DIR_NautilusScripts.shi
. $DIR_NautilusScripts/.set_VIEWERvars.shi
# $TXTVIEWER "$OUTNAME" &
$TXTEDITOR "$OUTNAME" &
| true
|
845961dce8ff0803e41057820cc760e9b3a29d96
|
Shell
|
shored/sindan-docker
|
/.travis/test_building_grafana.sh
|
UTF-8
| 444
| 3.03125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
cd $(dirname $0)/..
IMAGE_TAG="$(grep 'image: sindan/grafana' docker-compose.yml | awk '{print $2}')"
echo $IMAGE_TAG
pushd grafana
BUILDKIT_HOST=tcp://0.0.0.0:1234 \
buildctl build --no-cache --frontend dockerfile.v0 --local context=. --local dockerfile=. --progress plain \
--output type=docker,name=$IMAGE_TAG | docker load
st=$?
(( $st > 0 )) && exit $st
popd
docker images
docker save $IMAGE_TAG | gzip > docker/grafana.tar.gz
| true
|
7cbdb4a9ca800855317a4edaa0fcaa49c49773b1
|
Shell
|
TheBottleSeller/liquefy
|
/setup/deploy_vbox_api.sh
|
UTF-8
| 211
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$1" ]; then
MASTERHOST="vbox-master"
else
MASTERHOST="$1"
fi
API_SERVER_HOME="$GOPATH/src/bargain/web/apiserver"
cd $API_SERVER_HOME
npm install
npm start $(docker-machine ip $MASTERHOST)
| true
|
96d9dc77b6efd2655c23f6adf6138c5be345c54a
|
Shell
|
RosenW/training-projects-1
|
/RosenW-LPI/lpi103 14-09-2018/3/validation.sh
|
UTF-8
| 420
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
allLines=`cat ~/Downloads/access.log | wc -l`
lineCount=0
while read p; do
if [[ $p =~ ^[a-Z0-9\.\-]+:[0-9]+" "[0-9\.]+" - - ["[0-9]+"/"[a-Z]+"/"[0-9\:]+" "[\+0-9]+"] "\"[^\"]+\"" "[0-9]+" "[0-9]+" "[0-9]+" "\"[^\"]+\"" "\"[^\"]+\"" - "[0-9]+" - "[0-9]+$ ]]; then
((lineCount++))
fi
done <~/Downloads/access.log
echo "Valid line count: $lineCount"
echo "Invalid line count: $((allLines-lineCount))"
| true
|
0077d3ab6ed08ffca34f1bea81b73885813d49a9
|
Shell
|
shiro/dotfiles
|
/scripts/bin/link/ln-bin.zsh
|
UTF-8
| 553
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
DEST="${HOME}/bin"
SOURCES=(
"$DOTFILES/scripts/bin"
)
linkFiles(){
if [ -d "$1" ]; then
echo "# from: $1"
else
echo "# skip $1: not found"
return
fi
local files=(`find "$1" -type f`)
for file in ${files[@]}; do
if [ -f "$DEST/${file:r:t}" ] && [ ! -L "$DEST/${file:r:t}" ]; then
echo "[SKIP] ${file:t}\nfile with the same name exists in '$DEST'"
continue
fi
echo "[LINK] ${file:t}"
ln -sf "$file" "$DEST/${file:r:t}"
done
}
for src in "${SOURCES[@]}"; do
linkFiles "$src"
done
| true
|
0e2dbfee7abf7ebe0988c2f29890c73fbf7a120d
|
Shell
|
weixingsun/ycsb-flame
|
/flame.mysql/mysql.sh
|
UTF-8
| 317
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
host=`ifconfig docker0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'`
db=ycsb
user=ycsb
pw=ws206771
rootpw=ws206771
#docker-mysql-scripts/dmysql-server $db $pw
ip=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' $db`
mysql -h $ip -uroot -p$rootpw $db << EOF
select count(1) from usertable;
EOF
| true
|
776baefbb16b15a96da424096055bb9b69163126
|
Shell
|
fecgov/regulations-parser
|
/test-travis.sh
|
UTF-8
| 592
| 3.03125
| 3
|
[
"CC0-1.0"
] |
permissive
|
set -e
set -x
if [[ $INTEGRATION_TARGET = '' ]]; then
py.test --cov-report term-missing --cov regparser
flake8 .
else
if [[ $DOCKER_BUILD ]]; then
docker build . -t eregs-parser
MANAGE_CMD="docker run --rm -it -v eregs-cache:/app/cache -v output:/app/output --entrypoint ./manage.py eregs-parser"
else
MANAGE_CMD=./manage.py
fi
$MANAGE_CMD migrate
$MANAGE_CMD integration_test uninstall
$MANAGE_CMD integration_test install $INTEGRATION_TARGET
$MANAGE_CMD integration_test build $INTEGRATION_TARGET
$MANAGE_CMD integration_test compare $INTEGRATION_TARGET
fi
| true
|
cdaccd97c531759dd1dcf0a60dc5d01b3e4216ee
|
Shell
|
sethleedy/Auto-Besside-Capturer
|
/find_up.sh
|
UTF-8
| 272
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Examples:
# find_up.sh some_dir -iname "foo*bar" -execdir pwd \;
# find_up.sh . -name "uni_functions.sh"
set -e
path="$1"
shift 1
while [[ "$path" != "/" ]];
do
find "$path" -maxdepth 1 -mindepth 1 "$@"
path="$(readlink -f $path/..)"
done
| true
|
c34be8ebd6ef7bdcc673b703abec2e97b1cdcdc5
|
Shell
|
qbit/dotfiles
|
/bin/watch
|
UTF-8
| 239
| 3.53125
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/ksh
N=5
CMD=$@
while getopts "n:d" arg; do
case $arg in
n)
N=$OPTARG
$CMD=$( echo "$CMD" | sed -e "s/${OPTARG}//" )
;;
esac
done
while true; do
echo "doing '$CMD' every $N"
#$( $@ )
sleep $N
done
| true
|
dbfb914e90565182bfcce026ed6cd49acf4b2b40
|
Shell
|
reisub/foreground-detector
|
/skripta.sh
|
UTF-8
| 203
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dir="*"
if [ $# -eq 1 ]
then
dir="$1"
fi
for i in $HOME/Downloads/video_zavrsni/$dir/*.avi
do
filename=$(basename "$i")
echo "$filename:"
./detector "$i"
echo "#"
done
| true
|
9a0c1464dabf05be8ffa8332b603b14c24764924
|
Shell
|
ra2003/reprocrawl-code-release
|
/experiment-generator/primary-experiment/launch.sh
|
UTF-8
| 290
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
read -p "are the DBs scrubbed and tubbed and adequately fed? (type 'yes' to continue)" YES
if [ "$YES" != "yes" ]; then
echo "no jobs for you!"
exit 1
fi
cd $(dirname $0)
echo "Running in $(pwd)..."
time ../mkjobs.py ../../tranco/top-1m.csv 25000 matrix.json
time ../qjobs.py
| true
|
82e2430f9530f17ee12b18b3f0432252ac7deba5
|
Shell
|
truongbb/spring-boot-ci-efk-jaeger-sentry
|
/stop_job.sh
|
UTF-8
| 268
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# stop pipeline if QUALITY GATE is FAIL
if [[ ${QUALITY_GATE} = "NOK" ]]
then
echo "QUALITY GATE FAILED. CHECK SONAR SCANNER RESULT!"
export QUALITY_GATE="OK"
exit -1
else
echo "QUALITY GATE PASSED. SONAR SCANNER'S SUCCESSFUL!"
fi
| true
|
6911d2fde28e162cbddaad6ed8e4b9172e69124a
|
Shell
|
netronome-support/IVG
|
/aovs_2.6B/vm_creator/ubuntu/vm_scripts/5_build_pktgen.sh
|
UTF-8
| 1,710
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
export DPDK_BASE_DIR=/root
export DPDK_VERSION=dpdk-17.05
export DPDK_TARGET=x86_64-native-linuxapp-gcc
export DPDK_BUILD=$DPDK_BASE_DIR/$DPDK_VERSION/$DPDK_TARGET
export PKTGEN=pktgen-3.4.1
echo "Cleaning.."
if [ -d "$DPDK_BASE_DIR/$PKTGEN" ]; then
rm -rf $DPDK_BASE_DIR/$PKTGEN
fi
if [ ! -e "$DPDK_BASE_DIR/$PKTGEN.tar.gz" ]; then
echo "Downloading.."
wget http://dpdk.org/browse/apps/pktgen-dpdk/snapshot/$PKTGEN.tar.gz --directory-prefix=$DPDK_BASE_DIR
fi
echo "Extracting.."
tar xf $DPDK_BASE_DIR/$PKTGEN.tar.gz -C $DPDK_BASE_DIR
cd $DPDK_BASE_DIR/$PKTGEN
sed 's/DEFAULT_PKT_BURST = 32/DEFAULT_PKT_BURST = 64/g' -i ./app/pktgen-constants.h
sed '/DEFAULT_RX_DESC =/d' -i ./app/pktgen-constants.h
sed '/DEFAULT_TX_DESC =/d' -i ./app/pktgen-constants.h
sed -i "/DEFAULT_PKT_BURST/aDEFAULT_RX_DESC = 512," ./app/pktgen-constants.h
sed -i "/DEFAULT_PKT_BURST/aDEFAULT_TX_DESC = 1024," ./app/pktgen-constants.h
#Change MAX_MBUFS_PER_PORT * 8 to 32 for more flows per port
sed -i '/.*number of buffers to support per port.*/c\\tMAX_MBUFS_PER_PORT\t= (DEFAULT_TX_DESC * 32),/* number of buffers to support per port */' /root/$PKTGEN/app/pktgen-constants.h
sleep 1
make RTE_SDK=$DPDK_BASE_DIR/$DPDK_VERSION RTE_TARGET=$DPDK_TARGET
rm -f /root/dpdk-pktgen
ln -s $DPDK_BASE_DIR/$PKTGEN/app/x86_64-native-linuxapp-gcc/pktgen /root/$PKTGEN/dpdk-pktgen
cat <<EOF > /etc/dpdk-pktgen-settings.sh
export DPDK_PKTGEN_DIR=$srcdir/$PKTGEN
export DPDK_PKTGEN_EXEC=$srcdir/$PKTGEN/app/app/$RTE_TARGET/pktgen
EOF
cat <<EOF > /etc/dpdk-pktgen-settings.sh
export DPDK_PKTGEN_DIR=$srcdir/$PKTGEN
export DPDK_PKTGEN_EXEC=$srcdir/$PKTGEN/app/app/$RTE_TARGET/pktgen
EOF
exit 0
| true
|
1f46a0ed1a26977b6c0c2565378d7f7709a5cef7
|
Shell
|
lucifer654321/kubernetes
|
/ansible_deploy_k8s/roles/bootstrap/templates/bootstrap.sh.j2
|
UTF-8
| 1,600
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "#### Config Bootstrap ####"
[ -d "/root/.kube" ] || mkdir -p /root/.kube
ADMIN_CONFIG="/root/.kube/config"
[ -f ${ADMIN_CONFIG} ] || cp {{remote_k8s_conf_dir}}/admin.kubeconfig ${ADMIN_CONFIG}
ALL_NAMES=({% for ip in groups.k8s %} {{ hostvars[ip].node_name }} {% endfor %})
for all_name in ${ALL_NAMES[@]}
do
echo ">>> ${all_name}"
# 创建 token
export BOOTSTRAP_TOKEN=$(kubeadm token create \
--description kubelet-bootstrap-token \
--groups system:bootstrappers:${all_name} \
--kubeconfig ~/.kube/config)
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority={{remote_k8s_cert_dir}}/ca.pem \
--embed-certs=true \
--server={{apiserver}} \
--kubeconfig=bootstrap/kubelet-bootstrap-${all_name}.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap/kubelet-bootstrap-${all_name}.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap/kubelet-bootstrap-${all_name}.kubeconfig
# 设置默认上下文
kubectl config use-context default \
--kubeconfig=bootstrap/kubelet-bootstrap-${all_name}.kubeconfig
done
# 分发bootstrap.kubeconfig
for i in ${ALL_NAMES[@]}
do
echo ">>> $i"
ssh $i "mkdir -p {{remote_k8s_conf_dir}}"
scp bootstrap/kubelet-bootstrap-${i}.kubeconfig $i:{{remote_k8s_conf_dir}}/kubelet-bootstrap.kubeconfig
done
| true
|
db5553568e2341b32c3005cb1b6061ba14bf2467
|
Shell
|
MaxMax2016/ipa2kaldi
|
/bin/add_noise.sh
|
UTF-8
| 1,439
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
# Augments an audio corpus with noise.
#
# Credit: https://github.com/gooofy/zamia-speech/blob/master/speech_gen_noisy.py
if [[ -z "$4" ]]; then
echo 'Usage: add_noise.sh input_audio fg1 fg2 bg [bg_offset] [bg_duration] [fg_offset=] [fg_duration=] [fg_level=0] [bg_level=0] [reverb=0]'
exit 1
fi
input_audio="$1"
fg1="$2"
fg2="$3"
bg="$4"
bg_offset="$5"
bg_duration="$6"
fg_offset="$7"
fg_duration="$8"
fg_level="${9:-0}"
bg_level="${10:-0}"
reverb="${11:-0}"
# -----------------------------------------------------------------------------
ffmpeg_args=()
if [[ -n "${fg_offset}" ]]; then
ffmpeg_args+=('-ss' "${fg_offset}")
fi
if [[ -n "${fg_duration}" ]]; then
ffmpeg_args+=('-t' "${fg_duration}")
fi
bg_args=()
if [[ -n "${bg_offset}" ]]; then
bg_args+=('trim' "${bg_offset}")
if [[ -n "${bg_duration}" ]]; then
bg_args+=("${bg_duration}")
fi
fi
# -----------------------------------------------------------------------------
ffmpeg -y -i "${input_audio}" "${ffmpeg_args[@]}" -ar 16000 -ac 1 -acodec pcm_s16le -f wav - | \
sox "--norm=${fg_level}" \
"${fg1}" \
-t wav --ignore-length - \
"${fg2}" \
-p \
compand 0.01,0.2 -90,-10 -5 reverb "${reverb}" | \
sox \
--combine mix \
-t sox - \
-t sox <(sox "--norm=${bg_level}" "${bg}" -p "${bg_args[@]}") \
-b 16 -r 16000 -c 1 \
-t wav -
| true
|
ad46d37e998a97c59d8d7aec49e7c1e744b1cb57
|
Shell
|
Hpmaharaja/resang_production
|
/machine_learning/tensorflow/python-test.sh
|
UTF-8
| 633
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
PYTHON_PATH=$(python -c "import site; print(site.getsitepackages()[0])")
CLASSIFY_IMAGE_PATH=$PYTHON_PATH"/tensorflow/models/image/imagenet/classify_image.py"
################################################
# 88931400 bytes (89 MB) needed for tensorflow #
################################################
MY_PATH=$(cd $(dirname $0) && pwd)
IMAGES_PATH=$MY_PATH"/images/"
wget -P $IMAGES_PATH http://haileyidaho.com/wp-content/uploads/2015/01/Stanley-lake-camping-Credit-Carol-Waller-2011.jpg
TEST_IMAGE="Stanley-lake-camping-Credit-Carol-Waller-2011.jpg"
python $CLASSIFY_IMAGE_PATH --image_file=$IMAGES_PATH$TEST_IMAGE
| true
|
15c9691d419f95684d0b9fde899f275aa5de261b
|
Shell
|
justsowicked1/Misc-Scripts
|
/Revert_Chrome_Updates.sh
|
UTF-8
| 364
| 2.890625
| 3
|
[] |
no_license
|
#! /bin/bash
echo "Quitting Chrome"
killall "Google Chrome"
echo "Removing modified system level Google Software Update"
rm -rf /Library/Google/GoogleSoftwareUpdate
echo "Removing modified user level Google Software Update"
rm -rf ~/Library/Google/GoogleSoftwareUpdate
echo "Reopening Chrome in 5 seconds"
sleep 5
open /Applications/Google\ Chrome.app
exit 0
| true
|
fd299a951ba8c5ba7a8af5bff531be94e0034e48
|
Shell
|
gh0stwizard/staticperl-modules
|
/misc/solaris/staticperlrc-stableperl
|
UTF-8
| 4,640
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# < Optional >
# Might be useful for some modules like Feersum.
# Comment out the line below at the configure stage.
# Comment the line at the install stage.
# See details at staticperl CPAN's page.
###export PERL_LDFLAGS="-Wl,--no-gc-sections -Wl,--allow-multiple-definition"
# Solaris 10 + CSW
# $HOME/bin must contains symbolic links to /opt/csw/gnu/* programs:
# find, make, grep, sed, strip, tr.
export PATH=$HOME/bin:$PATH
###export LD_ALTEXEC=/opt/csw/gnu/ld
###export LD_ALTEXEC=/usr/sfw/i386-sun-solaris2.10/bin/ld
export PERL_CC="/usr/sfw/bin/gcc -B/usr/sfw/i386-sun-solaris2.10/bin/"
# Your email address.
# It will be inserted inside of the Perl binary file.
# To see it type "~/staticperl perl -V".
export EMAIL="stablestaticperl@example.com"
# Where is perl source files will be kept (perl-VERSION.tar.bz2)
export DLCACHE="$HOME/dl/source/perl"
# A directory with patched modules by gh0stwizard
GW_PATCHSET="$HOME/dev/perl/staticperl-modules"
export GW_MODULES="${GW_PATCHSET}/modules"
export GW_PATCHES="${GW_PATCHSET}/patches"
unset GW_PATCHSET
# Choose Perl version to install.
# List of the Perl releases: http://www.cpan.org/src/README.html
export PERL_VERSION="5.22.0"
export STABLEPERL_RELEASE="1.001"
# staticperl's directory with installed modules, scripts, etc.
export STATICPERL="$HOME/.stablestaticperl-$PERL_VERSION-$STABLEPERL_RELEASE"
# Some modules requires LIBHOME environment variable to be set correctly.
export LIBHOME="$STATICPERL/lib/CORE"
# Configuration flags while building Perl.
# See installation instructions in Perl distribution for details.
export PERL_CONFIGURE="-Duse64bitint -Duselargefiles -Dusenm"
# Enable "ask questions" option of the CPAN shell.
# Useful for some modules, e.g. EV, to configure & install them manually.
#export PERL_MM_USE_DEFAULT=0
# Optimization flags when building Perl.
#export PERL_OPTIMIZE="-Os -ffunction-sections -fdata-sections -finline-limit=8 -mpush-args -mno-inline-stringops-dynamically -mno-align-stringops"
# Linking against this libraries when building Perl.
export PERL_LIBS="-lm -lcrypt -lsocket -lresolv -lnsl"
# DBD::Oracle (note for Solaris 10: InstantClient 11 seems to be broken)
export LD_LIBRARY_PATH="$HOME/lib/instantclient_10_2"
export ORACLE_HOME="$HOME/lib/instantclient_10_2"
#export LD_LIBRARY_PATH="$HOME/lib/instantclient_12_1"
#export ORACLE_HOME="$HOME/lib/instantclient_12_1"
# DBD::Oracle additionals
export ORACLE_USERID="tiger/scott"
export ORACLE_SID="XBD"
export DBI_DSN="dbi:Oracle:$ORACLE_SID"
export TNS_ADMIN="/etc"
#
# Post installation functions.
#
# See postinstall() at the bottom of this file for details.
#
install_aio_ev() {
instcpan IO::AIO EV
}
install_json_xs() {
instcpan JSON::XS JSON
}
install_anyevent() {
instcpan Guard
# < Optional >
# Net::SSLeay requires openssl-devel (libssl-dev)
# package installed in OS
#
#instcpan Net::SSLeay
#instcpan Async::Interrupt
instcpan AnyEvent
}
install_coro() {
install_aio_ev
install_anyevent
instcpan BDB
instcpan AnyEvent::AIO
instcpan AnyEvent::BDB
instcpan Event
instcpan Coro
}
install_ae_fork() {
install_aio_ev
install_json_xs
install_anyevent
instcpan EV::Loop::Async
instcpan IO::FDPass
instcpan Proc::FastSpawn
instcpan AnyEvent::Fork
instcpan AnyEvent::Fork::RPC
instcpan Array::Heap
instcpan AnyEvent::Fork::Pool
}
install_xml() {
instcpan XML::NamespaceSupport
instcpan XML::SAX::Base
instcpan XML::SAX
instcpan XML::LibXML
}
install_feersum() {
install_aio_ev
install_json_xs
install_anyevent
instcpan Feersum
}
install_dbd_oracle() {
instcpan DBI
instcpan DBD::Oracle
}
# ??? requirement for one of dependence module of DateTime
install_class_load() {
# Class::Load requirements
instcpan Try::Tiny
instcpan Module::Runtime
instcpan Module::Implementation
instcpan Package::Stash
instcpan Data::OptList
instcpan namespace::clean
instcpan Class::Load
# Class-Load-XS 0.09 installs out of box
instcpan Class::Load::XS
}
# requirement for DateTime
install_params_validate() {
instsrc ${GW_MODULES}/Params-Classify-0.013
# Params::Validate requirements
instcpan Module::Implementation
instsrc ${GW_MODULES}/Params-Validate-1.22
}
install_datetime() {
install_class_load
install_params_validate
instcpan DateTime::Locale
instcpan List::AllUtils
instcpan DateTime::TimeZone
instsrc ${GW_MODULES}/DateTime-1.24
}
postinstall() {
#
# Comment out lines below if you wish to install these modules
# at "~/staticperl install" stage.
#
# instcpan YAML::XS
# install_aio_ev
# install_json_xs
# install_anyevent
# install_ae_fork
# install_feersum
# install_datetime
: ;
}
| true
|
1b31b51163a6cec889b8eb1ca4c848ba980b50f8
|
Shell
|
Adjective-Object/mtg-cardframes
|
/download-cards.sh
|
UTF-8
| 669
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ "$#" != "1" ]]; then
echo "usage: $0 <path/to/cards/json/list>"
exit 1
fi
QUERY_NAME=${1%.json}
mkdir -p "$QUERY_NAME"
LINES=$(cat $1 | jq '.[]|select(.image_uris.png != null)|.image_uris.png + ";" +( .name | sub( "[, '"'"'/]+" ; "-"; "g") ) + ".png"')
echo "$LINES"
for IMAGE_LINE in $LINES; do
# echo "line: " $IMAGE_LINE
NOQUOTES=`echo "$IMAGE_LINE" | sed s/\"//g`
URL=`echo $NOQUOTES | cut -d ';' -f 1`
FNAME=`echo $NOQUOTES | cut -d ';' -f 2`
echo "IMAGE_LINE: '$IMAGE_LINE'"
echo "'$NOQUOTES'" "'$URL'" "'$FNAME'"
set -ex
curl "$URL" > "$QUERY_NAME/$FNAME"
set +ex
sleep 0.5
done
| true
|
172f6c62748aac117667373e1e687344c1cba40f
|
Shell
|
openstreetmap/dns
|
/bin/update
|
UTF-8
| 677
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Upload data to Bytemark Hosting's content DNS servers
#
RSYNC=/usr/bin/rsync
USER=openstreetmap
if [ ! -f $RSYNC ] ; then
echo "You need rsync installed to use this script"
if [ -f /etc/debian_version ] ; then
echo "I'll try to install it automatically."
apt-get install rsync
fi
fi
for SERVER in upload ; do
echo -n "Server $SERVER.ns.bytemark.co.uk..."
if ping -c 1 $SERVER.ns.bytemark.co.uk >/dev/null 2>&1 ; then
echo -n "alive, sending updates..."
if $RSYNC -C -r --delete data/ dns@$SERVER.ns.bytemark.co.uk::$USER; then
echo "sent."
else
echo "failed :-("
fi
else
echo "not responding."
fi
done
| true
|
5961eb31763bdb97ac3b2de404d81bd4ef352915
|
Shell
|
1001Pharmacies/infra
|
/docker/elastic/curator/docker-entrypoint.sh
|
UTF-8
| 441
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
set -euo pipefail
set -o errexit
trap 'kill -SIGQUIT $PID' INT
CRON_DAILY_COMMAND="/usr/bin/curator --config /etc/curator/config.yml /etc/curator/action.yml"
[ "${DEPLOY:-}" = "true" ] && CRON_DAILY_COMMAND="cronlock ${CRON_DAILY_COMMAND}"
cat > /etc/periodic/daily/curator <<EOF
#!/bin/sh
${CRON_DAILY_COMMAND}
EOF
chmod +x /etc/periodic/daily/curator
[ $# -eq 0 ] && exec crond -f -L/dev/stdout || exec "$@" &
PID=$! && wait
| true
|
d44250e0dd682ceab1cc2a61db84c9b808f59e1b
|
Shell
|
joeytwiddle/jsh
|
/code/shellscript/text/html2txt.sh
|
UTF-8
| 285
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if false # command -v lynx >/dev/null 2>&1
then
url="$*"
[ -z "$url" ] && url="-stdin"
lynx -dump -nolist "$url"
else
# Strips tags but does not strip CSS and does not unescape text content
cat "$@" | sed -e 's/<[^>]*>//g' | sed 's+^\s*$++'
fi
| true
|
3cb890e96e597522925fd9b61ee62b56784e038e
|
Shell
|
Xarchuser/Arch-Dotfiles
|
/bin/i3battery
|
UTF-8
| 589
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
NUM=$(cat /sys/class/power_supply/BAT0/capacity)
STATE=$(cat /sys/class/power_supply/BAT0/status)
colorget() {
if [[ $NUM -ge 80 ]]; then
color="#AEC795"
elif [[ $NUM -ge 60 ]]; then
color="#F0C674"
elif [[ $NUM -ge 40 ]]; then
color="#C7AE95"
elif [[ $NUM -ge 20 ]]; then
color="#C75959"
else
color="#FF0000"
fi ;}
if [[ $STATE == "Charging" ]]; then
color="#AEC795"
else
colorget
fi
echo "<span color='$color'>$(echo $STATE | sed -e "s/,//g;s/Discharging/🔌/;s/Charging//;s/Unknown/❓/;s/Full/⚡/;s/ 0*/ /g;s/ :/ /g") $(echo $NUM | sed -e 's/$/%/')</span>"
| true
|
8780dc5f31404fe492b2429a37a16c4d9fab883f
|
Shell
|
innerfunction/liquid-node
|
/scripts/run-tests.sh
|
UTF-8
| 173
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
npm test
if [ "${TRAVIS_NODE_VERSION}" = "6" ]; then
npm run coverage
npm run lint
else
echo "Not running coverage and linting..."
fi
| true
|
8014e4f32deb888d23eaca23da8e53438900e4b3
|
Shell
|
trigrass2/eview-server
|
/setup/arm-linux/5.postbuild.sh
|
GB18030
| 2,958
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
CURDIR=$(cd `dirname $0`; pwd)
cd ${CURDIR}
echo prepare eveiw-server directory
rm -rf ../../repo/eview-server
mkdir ../../repo/eview-server
#python plugin. linux±soչpydҪȥlibҪlibpydriver.pydlibpydriver.so,ֻҪpydriver.so
rm ../../bin/pydriver.so
mv ../../bin/libpydriver.so ../../bin/pydriver.so
rm ../../bin/pydata.so
mv -f ../../bin/libpydata.so ../../bin/pydata.so
echo copy all bin with drivers
cp -r ../../bin ../../repo/eview-server/
echo pythonļ
rmdir -rf ../../repo/eview-server/python
mkdir ../../repo/eview-server/python
tar -xzvf ../../thirdparty/python2.7.tar.gz -C ../../repo/eview-server/python/
rm -rf ../../repo/eview-server/python/include/
echo copy config directory
mkdir ../../repo/eview-server/config
mkdir -p ../../repo/eview-server/etc/init.d
cp -r ../../template/config/logconfig.xml ../../repo/eview-server/config/
cp -r ../../config/pkhisdb.conf ../../repo/eview-server/config/
cp -r ../../config/pkmemdb.conf ../../repo/eview-server/config/
cp -r ../../config/pkmqtt.conf ../../repo/eview-server/config/
cp -r ../../template/config/arm-linux/pkservermgr.xml ../../repo/eview-server/config/
chmod +x ../../template/etc/init.d/eview
cp -r ../../template/etc/init.d/eview ../../repo/eview-server/etc/init.d/
cp -r ../../template/etc/arm-linux/profile ../../repo/eview-server/etc/
echo copy start.sh...
cp -r ../../template/bin/linux/start_all.sh ../../repo/eview-server/bin/
cp -r ../../template/bin/linux/stop_all.sh ../../repo/eview-server/bin/
chmod +x ../../repo/eview-server/bin/*.sh
chmod +x ../../repo/eview-server/bin/*
echo ִнűreadme
chmod +x ../../repo/eview-server/*.sh
echo now package it ......
find ../../repo/eview-server -name ".svn"|xargs rm -rf
rm -f ../../repo/eview-server/bin/libboost_locale* ../../repo/eview-server/bin/libboost_context* ../../repo/eview-server/bin/libboost_coroutine* ../../repo/eview-server/bin/libboost_graph* ../../repo/eview-server/bin/libboost_log*
rm -f ../../repo/eview-server/bin/libboost_math* ../../repo/eview-server/bin/libboost_prg* ../../repo/eview-server/bin/libboost_program_options* ../../repo/eview-server/bin/libboost_random* ../../repo/eview-server/bin/libboost_serialization*
rm -f ../../repo/eview-server/bin/libboost_signals* ../../repo/eview-server/bin/libboost_timer* ../../repo/eview-server/bin/libboost_unit_test_framework* ../../repo/eview-server/bin/libboost_wave* ../../repo/eview-server/bin/libboost_wserialization*
#rm -f ../../repo/eview-server/bin/libboost_regex*
rm -f ../../repo/eview-server-2*.tar.gz
tar -cvzf ../../repo/eview-server-base.tar.gz -C ../../repo/eview-server/ .
#zip -r ../../repo/eview-server-base.zip ./../repo/eview-server/
CURDATE=$(date +%Y%m%d)
cp -f ../../repo/eview-server-base.tar.gz ../../repo/eview-server-base-${CURDATE}.tar.gz
cd ${CURDIR}
sh postbuild-driversdk.sh
sh postbuild-pkdata.sh
cd ${CURDIR}
| true
|
9f64aa4b96540f4c36e5b1eba6f076303fa2a670
|
Shell
|
ojosilva/baseliner
|
/bin/bali_web
|
UTF-8
| 1,126
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# starts the web server
LOGDIR=$BASELINER_LOGHOME
LOGFILENAME=bali_web
NOW=`perl -MDateTime -le 'print DateTime->now()'`
LOGFILEOLD=$LOGDIR/$LOGFILENAME_$NOW.log
LOGFILE=$LOGDIR/$LOGFILENAME.log
PIDFILE=$LOGDIR/bali_web.pid
if [ "$1" != "stop" ]; then
if [ -e $PIDFILE ]; then
echo "Server is already running."
exit 1;
fi
mv "$LOGFILE" "$LOGFILEOLD"
echo "Log file: $LOGFILE"
cd $BASELINER_HOME
CATALYST_ENGINE=HTTP::Prefork nohup perl script/baseliner_server.pl > $LOGFILE 2>&1 &
SERVER_PID=$!
echo $SERVER_PID > $PIDFILE
echo "Server started with pid: $SERVER_PID"
echo "Waiting for children to start..."
CHILDREN=`ps -ef|grep perl|grep baseliner_|perl -n -e 'next unless /$SERVER_PID/; @a=split / /; print \$a[2],","'`
sleep 20
print "Children started: " . $CHILDREN;
else
SERVER_PID=`cat $PIDFILE`
echo "Server pid: $SERVER_PID"
kill $SERVER_PID 2>/dev/null
if [ $? = 0 ]; then
echo "Server stopped."
else
echo "Server is not running."
fi
rm "$PIDFILE"
fi
~
| true
|
9921a2ead395ef60c0e5ae2bba3043e796b888e4
|
Shell
|
hortonworks/cloudbreak-images
|
/saltstack/base/salt/ccm-client/cdp/bin/update-reverse-tunnel-values.sh
|
UTF-8
| 626
| 2.671875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash -ux
CCM_TUNNEL_ROLE=$1
CCM_TUNNEL_SERVICE_PORT=$2
cat > /cdp/bin/reverse-tunnel-values-${CCM_TUNNEL_ROLE}.sh <<EOF
CCM_HOST=${CCM_HOST}
CCM_SSH_PORT=${CCM_SSH_PORT}
CCM_PUBLIC_KEY_FILE=${CCM_PUBLIC_KEY_FILE}
CCM_TUNNEL_INITIATOR_ID=${CCM_TUNNEL_INITIATOR_ID}
CCM_KEY_ID=${CCM_KEY_ID}
CCM_ENCIPHERED_PRIVATE_KEY_FILE=${CCM_ENCIPHERED_PRIVATE_KEY_FILE}
CCM_TUNNEL_ROLE=${CCM_TUNNEL_ROLE}
CCM_TUNNEL_SERVICE_PORT=${CCM_TUNNEL_SERVICE_PORT}
EOF
chmod 740 /cdp/bin/reverse-tunnel-values-${CCM_TUNNEL_ROLE}.sh
systemctl enable ccm-tunnel@${CCM_TUNNEL_ROLE}.service
systemctl start ccm-tunnel@${CCM_TUNNEL_ROLE}.service
| true
|
a2e22fdfc04b6a78fa55946b0d4be13973579d2e
|
Shell
|
nostneji/keeleliin-server
|
/docker_start.sh
|
UTF-8
| 263
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f /config/config.js ]; then
cp -R -u -p /src/config_dist.js /config/config.js
fi
if [ ! -f /src/config.js ]; then
ln -s /config/config.js /src/config.js
fi
forever start -l /forever.log /src/app.js
forever list
tail -f /forever.log
| true
|
6459a2d99b0cef10269763b147d6e207ae8d6cfa
|
Shell
|
etorres4/packaging-scripts
|
/misc/fqo.sh
|
UTF-8
| 988
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
# Fuzzy find a file and then check which package owns it
declare -r scriptname="fqo"
printHelp() {
cat << helpinfo
${scriptname} - fuzzy find a file and then check which package owns it
Usage: ${scriptname} [-h] [patterns]
Options:
-h, --help print this help page
helpinfo
}
while true; do
case "${1}" in
"-h"|"--help")
printHelp
exit
;;
--)
shift
break
;;
-*)
printf '%s\n' "Unknown option: ${1}" >&2
exit 1
;;
*)
break
;;
esac
done
[[ ! -x '/usr/bin/locate' ]] && echo 'locate is not present' && exit 1
[[ ! -x '/usr/bin/fzf' ]] && echo 'fzf is not present' && exit 1
[[ -z "${*}" ]] && printf '%s\n' "No patterns entered" >&2 && exit 1
file="$(locate --all --ignore-case --null -- "${@}" | fzf --exit-0 --select-1 --read0 --no-mouse)"
[[ ! "${file}" ]] && exit 1
pacman -Qo "${file}"
| true
|
fa75f0b297d87c53cee24318bdea7d077444c71d
|
Shell
|
jaboca/6b-JavierBot-aCastillo
|
/6b-JavierBotíaCastillo/ejercicio5.sh
|
UTF-8
| 592
| 3.390625
| 3
|
[] |
no_license
|
IFS=$'\n'
contadorWindows=0;
contadorLinux=0
procesosWindows=0
procesosLinux=0
for i in $(cat listado.txt);do
unset IFS
read -ra E <<< "$i"
#E 0 es el usuario E1 el sistema E2 los procesos
if [[ "${E[1]}" == "Linux" ]]; then
contadorLinux=$((contadorLinux+1))
procesosLinux=$((procesosLinux+${E[2]}))
fi
if [[ "${E[1]}" == "Windows" ]]; then
contadorWindows=$((contadorWindows+1))
procesosWindows=$((procesosWindows+${E[2]}))
fi
IFS=$'\n'
done
echo "Windows -> $contadorWindows $procesosWindows"
echo "Linux -> $contadorLinux $procesosLinux"
| true
|
7f7ad899f31d6bb28489c0bc10f66846d8f40f04
|
Shell
|
arangrhie/T2T-Polish
|
/winnowmap/init.sh
|
UTF-8
| 300
| 2.921875
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
if [[ -z $1 ]]; then
echo "Usage: ./init.sh ref.fa"
echo -e "\tCollect repetitive k=15 mers for winnowmap"
exit -1
fi
ref=$1
meryl count k=15 $ref output merylDB # add "compress" for homopolymer compression
meryl print greater-than distinct=0.9998 merylDB > repetitive_k15.txt
| true
|
ea9641a836270d6454e979ca24b4eed3d926744a
|
Shell
|
mschiffm/source-sift
|
/source-sift.sh
|
UTF-8
| 571
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# source-sift: recursive find that looks through all files program-y related
# in a specified path, looking for a needle
#
# Vernon Schryver <schryver@fsi.io> Original idea
# Mike Schiffman <mschiffm@fsi.io> A few tweaks and upgrades
if [ $# != 2 ]
then
echo "Usage: source-sift needle haystack"
echo "needle: what you're looking for"
echo "haystack: where to look"
exit 0
fi
NEEDLE=$1
HAYSTACK=$2
find -EL $2 -type f -regex "(.*/)?[^.][^/]*(\.[chs1-8]|\.man|\.m4|\.rc|\.cpp|\.c\+\+|akefile|\.inc|\.inc2|\.in|\.ac|\.py|\.def)" ! -name "*\.so\.[0-9]" | xargs grep -in $1
| true
|
912fb6de6d4c8aad14ce9cc2be01815cb17271d3
|
Shell
|
i3c-cloud/dev-orientdb
|
/dockerfiles/orientdb/run-orientdb.sh
|
UTF-8
| 677
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
case "$1" in
startup)
#cd $JBOSS_HOME/bin
#cp $JSRC/standalone/deployments/* $JBOSS_HOME/standalone/deployments
#if [ ! -e $JBOSS_HOME/standalone/configuration/standalone.xml ]; then
# cp -rpT $JBOSS_HOME/standalone/configuration.backup $JBOSS_HOME/standalone/configuration
#fi
#./standalone.sh -b 0.0.0.0 -bmanagement 0.0.0.0
export ORIENTDB_ROOT_PASSWORD=root
cd /orientdb/bin
./server.sh
while true; do
sleep 1000
done
;;
echo)
echo "Echo from /run-orientdb.sh: ${@:2}"
;;
*)
echo "/r Usage(run-orientdb):"
echo "======================================"
echo "docker exec orientdb /r echo 'Hello World!'"
;;
esac
| true
|
4b3ad81ed9c05e32f47902ae3761a899ce6579f5
|
Shell
|
randomnoob/windows-ova
|
/debian-root/installer
|
UTF-8
| 2,805
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
[ -d /c ] || mkdir /c
[ -d /d ] || mkdir /d
[ -d /wim ] || mkdir /wim
[ -d /iso ] || mkdir /iso
dialog(){
/usr/bin/dialog --backtitle "Windows.ova v. $(< /etc/version)" "$@"
}
version=""
localmirror=""
# Since we've been booted with a valid cdrom we should copy it to d, then
# shutdown
if mount /dev/sr0 /iso; then
echo ',,7,*;' | sfdisk /dev/sdb
mkfs.ntfs -f -L data /dev/sdb1
mount /dev/sdb1 /d
rsync -Pa /iso/ /d/
umount /d
umount /iso
echo "We're done! The iso can be removed and this OVA can be exported."
echo "Press any key to continue"
read _
poweroff -f
fi
if ! mount /dev/sdb1 /d; then
echo ',,7,*;' | sfdisk /dev/sdb
mkfs.ntfs -f -L data /dev/sdb1
mount /dev/sdb1 /d
fi
if [ -e /d/variables.txt ]; then
. /d/variables.txt
# strip linefeed incase the file is in dos format
version="${version%%$'\r'}"
localmirror="${localmirror%%$'\r'}"
fi
cmd="$(echo dialog --title '"Windows Version Selection"' --no-tags --radiolist '"Select a Windows version to install"' 20 60 13 $(awk -F, '{print $1,"\"" $2 "\"","off"}' /etc/versions) | sed 's/off/on/')"
if [ -z "$version" ]; then
version="$(eval "$cmd" 3>&2 2>&1 1>&3)"
fi
# Allow just overriding of the Autounattend.xml file, based on version
if [ -e "/d/${version}/Autounattend.xml" ]; then
au="/d/${version}/Autounattend.xml"
else
au="/au/${version}.xml"
fi
if [ -z "$version" ]; then
poweroff -f
fi
URL="$(awk -F, "\$1 == \"$version\" {print \$4}" /etc/versions)"
SHA1SUM="$(awk -F, "\$1 == \"$version\" {print \$3}" /etc/versions)"
# If local mirror isn't set, ask the user about it
if [ -z "$localmirror" ]; then
localmirror="$(dialog --title "Mirror selection" --inputbox "Enter URL of local mirror. This can be left blank." 8 60 3>&2 2>&1 1>&3)"
fi
# if localmirror is set to none, then unset it
if [ "$localmirror" = "none" ]; then
localmirror=""
fi
# if local mirror is set, pull from there instead
if [ -n "$localmirror" ]; then
echo "Local mirror is $localmirror"
URL="$localmirror/$(basename "$URL")"
fi
if ! [ -e /dev/sda1 ]; then
mount /dev/sda /c
rm /c/initramfs.gz
rm /c/kernel.gz
rsync -a /c/ /newc
umount /c
echo ',,83,*;' | sfdisk /dev/sda
mkfs.ext4 /dev/sda1
bootlace.com /dev/sda
mount /dev/sda1 /c
rsync -a /newc/ /c
rm -rf /newc
fi
cd /c
aria2c -U 'Wget/1.19.5 (linux-gnu)' -x 8 -s 8 --check-certificate=false --file-allocation=prealloc "$URL" -o "/${version}.iso"
sync
echo "Checking iso"
echo "$SHA1SUM /c/${version}.iso" | sha1sum -c >/dev/null
if [ $? != 0 ]; then
echo "Checksum failed"
read -r _
fi
cd /d
7z x "/c/${version}.iso"
wimmountrw /d/sources/boot.wim 2 /wim
cp "$au" /wim/Autounattend.xml
wimunmount --commit /wim
cd /
sync
sync
sync
until umount /d; do sync; sleep 1; done
until umount /c; do sync; sleep 1; done
exit 0
| true
|
fa0a61717f3430bb2c29ccb274d0f2ba4f80f83f
|
Shell
|
xptsp/modify_ubuntu_kit
|
/files/kodi-bind.sh
|
UTF-8
| 305
| 3.234375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
if [[ ! -f /etc/sudoers.d/kodi-bind && ! "$1"=="-f" ]]; then
echo "ERROR: /etc/sudoers.d/kodi-bind not found! Aborting!"
elif ! mount | grep "/mnt/hdd"; then
echo "WARNING: /mnt/hdd not mounted yet!"
elif [[ -d /mnt/hdd/.kodi ]]; then
mount --bind /mnt/hdd/.kodi ${HOME}/.kodi
fi
| true
|
7740035f48d0bc042bf1ba43e1d87aaacd56a612
|
Shell
|
ChennisVang/01-31-18
|
/week4.sh
|
UTF-8
| 343
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo -n "Enter your name:"
read name
clear
echo "Hello $name.
echo "What is your Favorite Football Team?"
read Team
clear
echo "Team is a good Team"
echo "Now Saving that Info"
echo "$name favorite Team is $Team." >> Team.log
echo "Data saved."
echo "Press Enter to continue"
read
clear
echo "Have a nice Day $name"
| true
|
c627cd5082b703445d111ec4c72579807d82c66a
|
Shell
|
hellohellenmao/Intern-Auto
|
/check_reports_img_end_offset.sh
|
UTF-8
| 634
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Case RHEL7-6968 begin!"
qemu-img create -f qcow2 base.qcow2 20G > /dev/null 2>&1
#check defaultly
qemu-img check base.qcow2 > check.log
grep "Image end offset" check.log > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "There is no offset info in the check log file, check please."
exit 1
fi
#Check info with json
qemu-img check base.qcow2 --output=json > check_json.log
grep "image-end-offset" check_json.log > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "There is no offset info in the check log file, check please."
exit 1
fi
echo "Check reports image end offset test(RHEL7-6968) done, and passed!"
| true
|
b8c8ae46f14f483fd0200d16d21e5839b55b7b3a
|
Shell
|
stahta01/MINGW-opt-packages
|
/mingw-w64-opt-crt5-git/PKGBUILD
|
UTF-8
| 9,887
| 2.890625
| 3
|
[] |
no_license
|
# Maintainer: Tim Stahlhut <stahta01@gmail.com>
_pkg_prefix=${MINGW_PREFIX}/opt
_basename=crt
_realname=${_basename}
pkgbase=mingw-w64-opt-${_basename}5-git
pkgname=(
"${MINGW_PACKAGE_PREFIX}-opt-${_basename}-git"
"${MINGW_PACKAGE_PREFIX}-opt-headers-git"
"${MINGW_PACKAGE_PREFIX}-opt-pthread-stub-headers-git"
# "${MINGW_PACKAGE_PREFIX}-opt-libmangle-git"
)
pkgver=5.0.0.4742.fb1deb11
pkgrel=1
_git_branch="v5.x" # 5.0.0.4742.fb1deb11
#_git_branch="v4.x" # 4.0.0.4514.ae0e22cd
#_git_branch="v3.x" # 3.4.0.3935.8c7e0088
#_git_branch="v2.x" #
#_git_tag="v2.0.1" #
pkgdesc='MinGW-w64 CRT for Windows'
arch=('any')
url='https://mingw-w64.sourceforge.io/'
license=('custom')
groups=("${MINGW_PACKAGE_PREFIX}-opt-toolchain")
makedepends=('git' 'make'
# "${MINGW_PACKAGE_PREFIX}-tools" # Is this really needed?
"${MINGW_PACKAGE_PREFIX}-opt-gcc"
"${MINGW_PACKAGE_PREFIX}-binutils")
options=('!strip' 'staticlibs' '!buildflags' '!emptydirs')
_sourcedir=mingw-w64-git
_git_repo_url=git+https://git.code.sf.net/p/mingw-w64/mingw-w64
if [ -n "$_git_branch" ]; then
source=(${_sourcedir}::"${_git_repo_url}#branch=$_git_branch")
elif [ -n "$_git_tag" ]; then
source=(${_sourcedir}::"${_git_repo_url}#tag=$_git_tag")
elif [ -n "$_git_commit" ]; then
source=(${_sourcedir}::"${_git_repo_url}#commit=$_git_commit")
else
source=(${_sourcedir}::"${_git_repo_url}")
fi
source+=(0001-Allow-to-use-bessel-and-complex-functions-without-un.patch)
sha256sums=('SKIP'
'd641257f7e1469aff89adc33e57702b75fe9667ca035978f890eae1020b6814c')
# Declare global variables; begin with underscore to avoid name conflicts
_git_base_commit=
pkgver() {
cd "${srcdir}/${_sourcedir}"
local _major=$(head -n 16 mingw-w64-headers/crt/_mingw_mac.h | sed '/__MINGW64_STRINGIFY/d' | grep '__MINGW64_VERSION_MAJOR' | sed -e 's/.* //' | tr '\n' '.' | sed 's/.$/\n/')
local _minor=$(head -n 16 mingw-w64-headers/crt/_mingw_mac.h | sed '/__MINGW64_STRINGIFY/d' | grep '__MINGW64_VERSION_MINOR' | sed -e 's/.* //' | tr '\n' '.' | sed 's/.$/\n/')
local _rev=0
printf "%s.%s.%s.%s.%s" ${_major} ${_minor} ${_rev} "$(git rev-list --count $_git_base_commit)" "$(git rev-parse --short $_git_base_commit)"
}
prepare() {
cd ${srcdir}/${_sourcedir}
_git_base_commit=$(git rev-parse HEAD)
echo "_git_base_commit := $_git_base_commit"
git am --committer-date-is-author-date "${srcdir}/0001-Allow-to-use-bessel-and-complex-functions-without-un.patch"
cd "${srcdir}"/${_sourcedir}/mingw-w64-libraries/libmangle
autoreconf -vfi
}
build() {
local _crt_configure_args
case "$CARCH" in
i686)
_crt_configure_args="--disable-lib64 --enable-lib32"
;;
x86_64)
_crt_configure_args="--disable-lib32 --enable-lib64"
;;
armv7)
_crt_configure_args="--disable-lib32 --disable-lib64 --enable-libarm32"
;;
aarch64)
_crt_configure_args="--disable-lib32 --disable-lib64 --disable-libarm32 --enable-libarm64"
;;
esac
# case "${CARCH}" in
# i686)
# local _local_gcc32_prefix=/c/GreenApps32/gcc_4.6.0-mingw32_x86_generic/mingw
# export PATH="${_local_gcc32_prefix}/bin":$PATH
# export GNATBIND="${_local_gcc32_prefix}/bin/gnatbind"
# export GNATMAKE="${_local_gcc32_prefix}/bin/gnatmake"
# export CC="${_local_gcc32_prefix}/bin/gcc"
# export CXX="${_local_gcc32_prefix}/bin/g++"
# ;;
#
# x86_64)
# local _local_gcc64_prefix=/c/GreenApps64/gcc_4.6.0_mingw64_x86_64_K8+ada/mingw64
# export PATH="${_local_gcc64_prefix}/bin":$PATH
# export GNATBIND="${_local_gcc64_prefix}/bin/gnatbind"
# export GNATMAKE="${_local_gcc64_prefix}/bin/gnatmake"
# export CC="${_local_gcc64_prefix}/bin/gcc"
# export CXX="${_local_gcc64_prefix}/bin/g++"
# ;;
# esac
local _local_gcc_prefix=${MINGW_PREFIX}/opt
export PATH="${_local_gcc_prefix}/bin":$PATH
export GNATBIND="${_local_gcc_prefix}/bin/gnatbind"
export GNATMAKE="${_local_gcc_prefix}/bin/gnatmake"
export CC="${_local_gcc_prefix}/bin/gcc"
export CXX="${_local_gcc_prefix}/bin/g++"
msg "Configuring ${MINGW_CHOST} headers"
[[ -d ${srcdir}/headers-${MINGW_CHOST} ]] && rm -rf ${srcdir}/headers-${MINGW_CHOST}
mkdir -p ${srcdir}/headers-${MINGW_CHOST} && cd ${srcdir}/headers-${MINGW_CHOST}
${srcdir}/${_sourcedir}/mingw-w64-headers/configure \
--build=${MINGW_CHOST} \
--host=${MINGW_CHOST} \
--target=${MINGW_CHOST} \
--prefix=${_pkg_prefix}/${MINGW_CHOST} \
--enable-sdk=all \
--enable-idl \
--without-widl
msg "Installing local copy of ${MINGW_CHOST} headers"
cd ${srcdir}/headers-${MINGW_CHOST}
rm -fr ${srcdir}/${_pkg_prefix}/${MINGW_CHOST}/include
make DESTDIR=${srcdir} install
msg "Building ${MINGW_CHOST} CRT"
[[ -d ${srcdir}/crt-${MINGW_CHOST} ]] && rm -rf ${srcdir}/crt-${MINGW_CHOST}
mkdir -p ${srcdir}/crt-${MINGW_CHOST} && cd ${srcdir}/crt-${MINGW_CHOST}
CFLAGS+=" -I${srcdir}/${_pkg_prefix}/${MINGW_CHOST}/include" \
${srcdir}/${_sourcedir}/mingw-w64-crt/configure \
--prefix=${_pkg_prefix}/${MINGW_CHOST} \
--build=${MINGW_CHOST} \
--host=${MINGW_CHOST} \
--target=${MINGW_CHOST} \
--with-sysroot=${_pkg_prefix}/${MINGW_CHOST} \
--enable-wildcard \
${_crt_configure_args}
make
msg "Installing local copy of ${MINGW_CHOST} CRT"
cd ${srcdir}/crt-${MINGW_CHOST}
rm -fr ${srcdir}${_pkg_prefix}/${MINGW_CHOST}/lib
make DESTDIR=${srcdir} install-strip
# msg "Building ${MINGW_CHOST} libmangle"
# [[ -d ${srcdir}/mangle-${MINGW_CHOST} ]] && rm -rf ${srcdir}/mangle-${MINGW_CHOST}
# mkdir -p ${srcdir}/mangle-${MINGW_CHOST} && cd ${srcdir}/mangle-${MINGW_CHOST}
# LDFLAGS+=" -L${srcdir}/${_pkg_prefix}/${MINGW_CHOST}/lib" \
# CFLAGS+=" -I${srcdir}/${_pkg_prefix}/${MINGW_CHOST}/include" \
# ${srcdir}/${_sourcedir}/mingw-w64-libraries/libmangle/configure \
# --prefix=${_pkg_prefix} \
# --build=${MINGW_CHOST} \
# --host=${MINGW_CHOST}
# make
}
package_headers() {
pkgdesc="MinGW-w64 headers for Windows"
provides=("${MINGW_PACKAGE_PREFIX}-opt-headers")
conflicts=("${MINGW_PACKAGE_PREFIX}-opt-headers")
options=('!strip' '!libtool' '!emptydirs')
msg "Copying ${MINGW_CHOST} headers"
mkdir -p ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}
cp --recursive ${srcdir}${_pkg_prefix}/${MINGW_CHOST}/include ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/
rm ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include/pthread_signal.h
rm ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include/pthread_time.h
rm ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include/pthread_unistd.h
msg "Installing MinGW-w64 licenses"
install -Dm644 ${srcdir}/${_sourcedir}/mingw-w64-headers/ddk/readme.txt ${pkgdir}${_pkg_prefix}/share/licenses/headers/ddk-readme.txt
install -Dm644 ${srcdir}/${_sourcedir}/COPYING ${pkgdir}${_pkg_prefix}/share/licenses/headers/COPYING
install -Dm644 ${srcdir}/${_sourcedir}/COPYING.MinGW-w64/COPYING.MinGW-w64.txt ${pkgdir}${_pkg_prefix}/share/licenses/headers/COPYING.MinGW-w64.txt
install -Dm644 ${srcdir}/${_sourcedir}/COPYING.MinGW-w64-runtime/COPYING.MinGW-w64-runtime.txt ${pkgdir}${_pkg_prefix}/share/licenses/headers/COPYING.MinGW-w64-runtime.txt
}
package_pthread_stub_headers() {
pkgdesc="MinGW-w64 pthread stub headers for Windows"
provides=("${MINGW_PACKAGE_PREFIX}-opt-pthread-stub-headers")
conflicts=("${MINGW_PACKAGE_PREFIX}-opt-pthread-stub-headers" "${MINGW_PACKAGE_PREFIX}-opt-winpthreads")
msg "Copying ${MINGW_CHOST} pthread stub headers"
mkdir -p ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include
cp ${srcdir}${_pkg_prefix}/${MINGW_CHOST}/include/pthread_signal.h ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include/
cp ${srcdir}${_pkg_prefix}/${MINGW_CHOST}/include/pthread_time.h ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include/
cp ${srcdir}${_pkg_prefix}/${MINGW_CHOST}/include/pthread_unistd.h ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/include/
}
package_crt() {
provides=("${MINGW_PACKAGE_PREFIX}-opt-${_basename}")
conflicts=("${MINGW_PACKAGE_PREFIX}-opt-${_basename}")
options=('!strip' 'staticlibs' '!buildflags' '!emptydirs')
depends=("${MINGW_PACKAGE_PREFIX}-opt-headers-git")
msg "Copying ${MINGW_CHOST} crt"
mkdir -p ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}
cp --recursive ${srcdir}${_pkg_prefix}/${MINGW_CHOST}/lib ${pkgdir}${_pkg_prefix}/${MINGW_CHOST}/
msg "Installing MinGW-w64 licenses"
install -Dm644 ${srcdir}/${_sourcedir}/COPYING ${pkgdir}${_pkg_prefix}/share/licenses/${_realname}/COPYING
install -Dm644 ${srcdir}/${_sourcedir}/COPYING.MinGW-w64/COPYING.MinGW-w64.txt ${pkgdir}${_pkg_prefix}/share/licenses/${_realname}/COPYING.MinGW-w64.txt
install -Dm644 ${srcdir}/${_sourcedir}/COPYING.MinGW-w64-runtime/COPYING.MinGW-w64-runtime.txt ${pkgdir}${_pkg_prefix}/share/licenses/${_realname}/COPYING.MinGW-w64-runtime.txt
}
package_libmangle() {
pkgdesc="MinGW-w64 libmangle"
provides=("${MINGW_PACKAGE_PREFIX}-opt-libmangle")
conflicts=("${MINGW_PACKAGE_PREFIX}-opt-libmangle")
options=('strip' 'staticlibs' '!emptydirs')
cd ${srcdir}/mangle-${MINGW_CHOST}
make DESTDIR=${pkgdir} install
install -Dm644 ${srcdir}/${_sourcedir}/mingw-w64-libraries/libmangle/COPYING ${pkgdir}${_pkg_prefix}/share/licenses/mingw-w64-libraries/COPYING.libmangle
}
# Wrappers for package functions
# 32-bit wrappers
package_mingw-w64-i686-opt-crt-git() {
package_crt
}
package_mingw-w64-i686-opt-headers-git() {
package_headers
}
package_mingw-w64-i686-opt-pthread-stub-headers-git() {
package_pthread_stub_headers
}
package_mingw-w64-i686-opt-libmangle-git() {
package_libmangle
}
# 64-bit wrappers
package_mingw-w64-x86_64-opt-crt-git() {
package_crt
}
package_mingw-w64-x86_64-opt-headers-git() {
package_headers
}
package_mingw-w64-x86_64-opt-pthread-stub-headers-git() {
package_pthread_stub_headers
}
package_mingw-w64-x86_64-opt-libmangle-git() {
package_libmangle
}
| true
|
aa1850e8ab75baad37400592d29818a4b17f0d72
|
Shell
|
chriswongwk/bootcamps
|
/deploy/asyncdeploy.sh
|
UTF-8
| 966
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function remote_exec {
sshpass -p $MY_PE_PASSWORD ssh -o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null nutanix@$MY_PE_HOST "$@"
}
function send_file {
sshpass -p $MY_PE_PASSWORD scp -o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null "$1" nutanix@$MY_PE_HOST:/home/nutanix/"$1"
}
function acli {
remote_exec /usr/local/nutanix/bin/acli "$@"
}
function ncli {
remote_exec /home/nutanix/prism/cli/ncli "$@"
}
for MY_LINE in `cat pocs.txt | grep -v ^#`
do
set -f # avoid globbing (expansion of *).
array=(${MY_LINE//|/ })
MY_PE_HOST=${array[0]}
MY_PE_PASSWORD=${array[1]}
# Send a file
send_file config.sh
send_file pcconfig.sh
# Execute that file asynchroneously remotely (script keeps running on CVM in the background)
remote_exec "MY_PE_PASSWORD=${MY_PE_PASSWORD} nohup bash /home/nutanix/config.sh >> config.log 2>&1 &"
done
| true
|
b6bcc1abcad3e97c8c42d68b06ad38ec2ecbe24e
|
Shell
|
Philosoft/dotfiles
|
/bash_aliases
|
UTF-8
| 1,057
| 2.796875
| 3
|
[] |
no_license
|
# включаем цвет
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias mocp='mocp -T Lianli'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias la='ls -Alh'
alias ncmpc='ncmpc -c'
alias mc='mc -S zenburn'
alias openports='netstat --all --numeric --programs --inet'
alias da='date "+%A, %B %d, %Y [%T]"'
alias du1='du --max-depth=1 -h'
[[ $(command -v pydf) ]] && alias df='pydf'
alias eject='eject /dev/sr0'
if [ -f /usr/bin/grc ]; then
alias cat='grc cat'
alias tail='grc tail'
alias head='grc head'
alias ping='grc ping'
alias traceroute='grc traceroute'
alias netstat='grc netstat'
alias diff='grc diff'
fi
[[ $(command -v pacman-color) ]] && alias pacman='pacman-color'
if [[ $(command -v pm-suspned) ]]; then
alias suspend='pm-suspend'
alias hibernate='pm-hibernate'
fi
if [[ $(command -v systemctl) ]]; then
alias suspend='systemctl suspend'
alias hibernate='systemctl hibernate'
fi
# vim:ft=sh
| true
|
70cacbb1578a966efbc23220eada8003d0496c18
|
Shell
|
chazer/bash-scripts
|
/bin/replace-in-names
|
UTF-8
| 564
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
NC='\e[0m'
GR='\e[1;30m'
if [ -z "$3" ]; then
echo "No argument supplied">&2
cat << EOF >&2
Usage:
$0 <path> <search> <replace>
Example:
$0 ./src/ XXXX plugin
EOF
exit 1
fi
DIR="$1"
SEARCH="$2"
REPLACE="$3"
if [ ! -d "$DIR" ]; then
echo "Source path $DIR doesn't exist">&2
exit 2
fi
XSEARCH="$( echo "$SEARCH" | sed -e 's/[]\/$*.^|[]/\\&/g' )"
XREPLACE="$( echo "$REPLACE" | sed -e 's/[]\/$*.^|[]/\\&/g' )"
find -L "${DIR}/" -type f -exec sh -c 'mv "$1" "$(echo "$1" | sed s/'"${XSEARCH}"'/'"${XREPLACE}"'/)"' _ {} \;
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.