blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a42f2a8d2433680e5353546c1bd180975c5f7392 | Shell | AlekseySyryh/otus_linux | /05.proc/Part1/script.sh | UTF-8 | 1,829 | 3.8125 | 4 | [] | no_license | #!/bin/bash
clock_ticks=$(getconf CLK_TCK)
echo " PID TTY STAT TIME COMMAND"
for pid in `ls -1 /proc/ | egrep '^[0-9]+$' | sort -h`; do
if [ -d "/proc/$pid" ]; then
#TTY
ttyid=`awk '{print $7}' /proc/$pid/stat`
if [ $ttyid -eq 0 ]; then
tty='?'
else
major=$((ttyid / 256))
minor=$((ttyid % 256))
if [ $major -eq 4 ]; then
tty="tty$minor"
elif [ $major -eq 136 ]; then
tty="pts/$minor"
else
tty='?'
fi
fi
#Status
status=`awk '{print $3}' /proc/$pid/stat`
#High/low priority
pnice=`awk '{print $19}' /proc/$pid/stat`
if [ $pnice -lt 0 ]; then
status="${status}<"
elif [ $pnice -gt 0 ]; then
status="${status}N"
fi
#has pages locked into memory
lock=`egrep 'VmLck:.+[1-9]' /proc/$pid/status | wc -l`
if [ $lock -gt 0 ]; then
status="${status}L"
fi
#session leader
sid=`awk '{print $6}' /proc/$pid/stat`
if [ $pid -eq $sid ]; then
status="${status}s"
fi
#multi-threaded
threads=`awk '{print $20}' /proc/$pid/stat`
if [ $threads -gt 1 ]; then
status="${status}l"
fi
#foreground process group
tpgid=`awk '{print $8}' /proc/$pid/stat`
if [ $pid -eq $tpgid ]; then
status="${status}+"
fi
#time
utime=`awk '{print $14}' /proc/$pid/stat`
stime=`awk '{print $15}' /proc/$pid/stat`
cutime=`awk '{print $16}' /proc/$pid/stat`
cstime=`awk '{print $17}' /proc/$pid/stat`
time=$(((utime+stime+cutime+cstime)/clock_ticks))
min=$((time / 60))
sec=$((time % 60))
printf '%5d %-8s %-6s %d:%02d ' $pid $tty $status $min $sec
#cmdline
cols=`tput cols`
remain=$((cols - 27))
cmdline=`head -c $remain /proc/$pid/cmdline | tr '\0' ' '`
if [[ -z $cmdline ]]; then
cmdline=`awk '{gsub("\\\\(","[",$2);gsub("\\\\)","]",$2);print $2}' /proc/$pid/stat`
fi
echo $cmdline
fi
done
| true |
9d2363971d25132c0ccfbd75e5bd5200bc7cc16c | Shell | petronny/aur3-mirror | /amaya/PKGBUILD | UTF-8 | 2,974 | 2.625 | 3 | [] | no_license | # Contributor: Brad Fanella <bradfanella@archlinux.us>
# Contributor: Zerial <fernando@zerial.org>
# Contributor: Dalius <dagis@takas.lt>
# Contributor: Sergej Pupykin <pupykin.s+arch@gmail.com>
pkgname=amaya
pkgver=11.4.4
pkgrel=10
pkgdesc="W3C's WYSIWYG HTML Editor"
arch=('i686' 'x86_64')
url="http://www.w3.org/Amaya/"
license=('W3C')
depends=('wxgtk' 'raptor1' 'glu')
makedepends=('perl' 'mesa')
options=('!makeflags')
install=$pkgname.install
source=(amaya-fix-thotlib-png15.patch \
amaya-wakeupidle.patch \
amaya-splitmode.patch \
amaya-wxyield.patch \
explicite_linking2.patch \
gzread.patch \
amaya-11.4.4-socketbufsize.patch \
configure.patch \
ftp://ftp.w3.org/pub/$pkgname/$pkgname-sources-$pkgver.tgz \
http://www.w3.org/Amaya/Distribution/Dutch.tgz \
http://www.w3.org/Amaya/Distribution/German.tgz \
http://www.w3.org/Amaya/Distribution/Italian.tgz \
http://www.w3.org/Amaya/Distribution/Spanish.tgz \
http://www.w3.org/Amaya/Distribution/Swedish.tgz)
md5sums=('6ebd78d57ee0a4b30b2dfb3369439288'
'32347b32aded742b705a2038416f74de'
'bc42d4b3ff7b43c8d0f7955dd1498b01'
'c42175f9cc9e90277547828b9cf6a92a'
'572cdeaa2a6b318217f69c37edee116c'
'f35ae7158133b6d39aa6a83e11bc261b'
'9fc28d3fd4da1147f8baefa46ec3ae52'
'd095a76b4ccf6203cf78f8919a2693a4'
'e8072c7b1d06b983951c56e9f51fbacf'
'3edb9cce5ce160d7270b23808c1d5981'
'400eeeae974a64d23de4fcdd609c30bc'
'05e2d25ee8af11faaaa25a33da89d504'
'b504a75cd0f789a3046bf2041067b18b'
'6536ab2e31e3f58618ba79d9fddc7f76')
build() {
cd $srcdir/Amaya$pkgver
patch -p1 < $srcdir/amaya-fix-thotlib-png15.patch
patch -p1 < $srcdir/amaya-wakeupidle.patch
patch -p1 < $srcdir/amaya-splitmode.patch
patch -p1 < $srcdir/amaya-wxyield.patch
patch -p1 < $srcdir/explicite_linking2.patch
patch -p1 < $srcdir/gzread.patch
patch -p1 < $srcdir/amaya-11.4.4-socketbufsize.patch
patch -p2 < $srcdir/configure.patch
cd Mesa/configs
rm current
[[ $CARCH == x86_64 ]] && ln -s linux-x86-64 current
[[ $CARCH == i686 ]] && ln -s linux-x86 current
cd ../../Amaya
if [ -d ./WX ]; then
rm -rf WX
fi
mkdir WX; cd WX
../configure --prefix=/usr/share --exec=/usr/share \
--datadir=/usr/share --enable-system-raptor \
--enable-system-wx --with-gl
make
}
package() {
cd $srcdir/Amaya$pkgver/Amaya/WX
install -d $pkgdir/usr/share/Amaya/resources/{svg,icons}
make prefix=$pkgdir/usr/bin install
install -Dm755 $pkgdir/usr/bin/Amaya/wx/bin/amaya $pkgdir/usr/bin/amaya
rm $pkgdir/usr/bin/Amaya/wx/bin/amaya
sed -i s+share+bin+ $pkgdir/usr/bin/amaya
cd $srcdir
for _i in Dutch German Italian Spanish Swedish
do
bsdtar xf $srcdir/${_i}.tgz
done
for _i in Gprinc.dic Iprinc.dic Nprinc.dic Sprinc.dic Wprinc.dic
do
install -Dm644 ${_i} $pkgdir/usr/share/Amaya/dicopar/${_i}
done
install -d $pkgdir/usr/share/doc/amaya
cp -r $srcdir/Amaya$pkgver/Amaya/doc/* $pkgdir/usr/share/doc/amaya
}
| true |
149920642a4d5d4305eed05cfb274be5b3ef61f0 | Shell | microsoft/MLOS | /mlos_bench/mlos_bench/config/environments/os/linux/boot/scripts/remote/prepare-os-boot-time.sh | UTF-8 | 685 | 3.65625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
##
## Copyright (c) Microsoft Corporation.
## Licensed under the MIT License.
##
# Script to store old grub config and reboot VM (if necessary).
# Config file created in scheduler should have been moved to
# VM BEFORE this script is run.
# This script should be run in the VM.
set -eu
scriptdir=$(dirname "$(readlink -f "$0")")
cd "$scriptdir"
source ./common-boot-time.sh
# remove original boot time parameters file if it exists
rm -f "$ORIG_BOOT_TIME"
# create copy of original boot-time parameters
cp /etc/default/grub.cfg "$ORIG_BOOT_TIME"
update-grub
# check if the real config file has changed
if diff -u /boot/grub/grub.cfg "$ORIG_BOOT_TIME"; then
reboot
fi
| true |
a62d5aa64bea7bef40f92ebcc2c737d32260e8e4 | Shell | XingyuXu-cuhk/Landuse_DL | /thawslumpScripts/exe_qtp.sh | UTF-8 | 2,661 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#introduction: Run the whole process of mapping thaw slumps base on DeeplabV3+ on a large area like Tibetan Plateau
#
#authors: Huang Lingcao
#email:huanglingcao@gmail.com
#add time: 30 September, 2019
#MAKE SURE the /usr/bin/python, which is python2 on Cryo06
export PATH=/usr/bin:$PATH
# python2 on Cryo03, tensorflow 1.6
export PATH=~/programs/anaconda2/bin:$PATH
# Exit immediately if a command exits with a non-zero status. E: error trace
set -eE -o functrace
eo_dir=~/codes/PycharmProjects/Landuse_DL
cd ${eo_dir}
git pull
cd -
## modify according to test requirement or environment
#set GPU on Cryo06
export CUDA_VISIBLE_DEVICES=1
#set GPU on Cryo03
export CUDA_VISIBLE_DEVICES=0,1 # comment this line if run on the ITSC cluster
gpu_num=2
para_file=para_qtp.ini
################################################
SECONDS=0
# remove previous data or results if necessary
${eo_dir}/thawslumpScripts/remove_previous_data.sh ${para_file}
#extract sub_images based on the training polgyons
${eo_dir}/thawslumpScripts/get_sub_images_multi_files.py ${para_file}
################################################
## preparing training images.
# there is another script ("build_RS_data.py"), but seem have not finished. 26 Oct 2018 hlc
${eo_dir}/thawslumpScripts/split_sub_images.py ${para_file}
${eo_dir}/thawslumpScripts/training_img_augment.sh ${para_file}
## convert to TFrecord
python ${eo_dir}/datasets/build_muti_lidar_data.py
#exit
duration=$SECONDS
echo "$(date): time cost of preparing training: ${duration} seconds">>"time_cost.txt"
SECONDS=0
################################################
## training
${eo_dir}/grss_data_fusion/deeplab_mutiLidar_train.sh ${para_file} ${gpu_num}
duration=$SECONDS
echo "$(date): time cost of training: ${duration} seconds">>"time_cost.txt"
SECONDS=0
################################################
#export model
${eo_dir}/grss_data_fusion/export_graph.sh ${para_file}
################################################
## inference
${eo_dir}/sentinelScripts/parallel_predict_rts.py ${para_file}
## post processing and copy results, including output "time_cost.txt"
test_name=1
${eo_dir}/sentinelScripts/postProc_qtp.sh ${para_file} ${test_name}
## merge polygons
${eo_dir}/sentinelScripts/merge_shapefiles.sh ${para_file} ${test_name}
################################################
#${eo_dir}/thawslumpScripts/accuracies_assess.sh ${para_file}
################################################
## conduct polygon-based change detection based on the multi-temporal mapping results
cd_code=~/codes/PycharmProjects/ChangeDet_DL
${cd_code}/thawSlumpChangeDet/polygons_cd_multi_exe.py ${para_file} ${test_name} | true |
59f68b8ca61d54d4ba05c7c06c00844fc2168982 | Shell | Nextdoor/conda_lockfile | /ci/install.sh | UTF-8 | 574 | 3.125 | 3 | [] | no_license | #!/bin/bash
set -e
if [[ "$(uname)" == "Darwin" ]]; then
URL="https://repo.anaconda.com/miniconda/Miniconda3-py37_4.10.3-MacOSX-x86_64.sh"
HOMEBREW_NO_AUTO_UPDATE=1 brew install wget
else
URL="https://repo.anaconda.com/miniconda/Miniconda3-py37_4.10.3-Linux-x86_64.sh"
fi
wget $URL -O miniconda.sh;
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
conda config --set always_yes yes --set changeps1 no
conda update -q conda
# Useful for debugging any issues with conda
conda info -a
conda install -q conda-build anaconda-client
| true |
9d337251613053d1433d18e62d594157c6f42dbb | Shell | romkatv/powerlevel10k | /gitstatus/gitstatus.plugin.zsh | UTF-8 | 33,986 | 3.140625 | 3 | [
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] | permissive | # Zsh bindings for gitstatus.
#
# ------------------------------------------------------------------
#
# Example: Start gitstatusd, send it a request, wait for response and print it.
#
# source ~/gitstatus/gitstatus.plugin.zsh
# gitstatus_start MY
# gitstatus_query -d $PWD MY
# typeset -m 'VCS_STATUS_*'
#
# Output:
#
# VCS_STATUS_ACTION=''
# VCS_STATUS_COMMIT=c000eddcff0fb38df2d0137efe24d9d2d900f209
# VCS_STATUS_COMMITS_AHEAD=0
# VCS_STATUS_COMMITS_BEHIND=0
# VCS_STATUS_COMMIT_ENCODING=''
# VCS_STATUS_COMMIT_SUMMARY='pull upstream changes from gitstatus'
# VCS_STATUS_HAS_CONFLICTED=0
# VCS_STATUS_HAS_STAGED=0
# VCS_STATUS_HAS_UNSTAGED=1
# VCS_STATUS_HAS_UNTRACKED=1
# VCS_STATUS_INDEX_SIZE=33
# VCS_STATUS_LOCAL_BRANCH=master
# VCS_STATUS_NUM_ASSUME_UNCHANGED=0
# VCS_STATUS_NUM_CONFLICTED=0
# VCS_STATUS_NUM_STAGED=0
# VCS_STATUS_NUM_UNSTAGED=1
# VCS_STATUS_NUM_SKIP_WORKTREE=0
# VCS_STATUS_NUM_STAGED_NEW=0
# VCS_STATUS_NUM_STAGED_DELETED=0
# VCS_STATUS_NUM_UNSTAGED_DELETED=0
# VCS_STATUS_NUM_UNTRACKED=1
# VCS_STATUS_PUSH_COMMITS_AHEAD=0
# VCS_STATUS_PUSH_COMMITS_BEHIND=0
# VCS_STATUS_PUSH_REMOTE_NAME=''
# VCS_STATUS_PUSH_REMOTE_URL=''
# VCS_STATUS_REMOTE_BRANCH=master
# VCS_STATUS_REMOTE_NAME=origin
# VCS_STATUS_REMOTE_URL=git@github.com:romkatv/powerlevel10k.git
# VCS_STATUS_RESULT=ok-sync
# VCS_STATUS_STASHES=0
# VCS_STATUS_TAG=''
# VCS_STATUS_WORKDIR=/home/romka/powerlevel10k
[[ -o 'interactive' ]] || 'return'
# Temporarily change options.
'builtin' 'local' '-a' '_gitstatus_opts'
[[ ! -o 'aliases' ]] || _gitstatus_opts+=('aliases')
[[ ! -o 'sh_glob' ]] || _gitstatus_opts+=('sh_glob')
[[ ! -o 'no_brace_expand' ]] || _gitstatus_opts+=('no_brace_expand')
'builtin' 'setopt' 'no_aliases' 'no_sh_glob' 'brace_expand'
autoload -Uz add-zsh-hook || return
zmodload zsh/datetime zsh/system || return
zmodload -F zsh/files b:zf_rm || return
typeset -g _gitstatus_plugin_dir"${1:-}"="${${(%):-%x}:A:h}"
# Retrieves status of a git repo from a directory under its working tree.
#
## Usage: gitstatus_query [OPTION]... NAME
#
# -d STR Directory to query. Defaults to the current directory. Has no effect if GIT_DIR
# is set.
# -c STR Callback function to call once the results are available. Called only after
# gitstatus_query returns 0 with VCS_STATUS_RESULT=tout.
# -t FLOAT Timeout in seconds. Negative value means infinity. Will block for at most this long.
# If no results are available by then: if -c isn't specified, will return 1; otherwise
# will set VCS_STATUS_RESULT=tout and return 0.
# -p Don't compute anything that requires reading Git index. If this option is used,
# the following parameters will be 0: VCS_STATUS_INDEX_SIZE,
# VCS_STATUS_{NUM,HAS}_{STAGED,UNSTAGED,UNTRACKED,CONFLICTED}.
#
# On success sets VCS_STATUS_RESULT to one of the following values:
#
# tout Timed out waiting for data; will call the user-specified callback later.
# norepo-sync The directory isn't a git repo.
# ok-sync The directory is a git repo.
#
# When the callback is called, VCS_STATUS_RESULT is set to one of the following values:
#
# norepo-async The directory isn't a git repo.
# ok-async The directory is a git repo.
#
# If VCS_STATUS_RESULT is ok-sync or ok-async, additional variables are set:
#
# VCS_STATUS_WORKDIR Git repo working directory. Not empty.
# VCS_STATUS_COMMIT Commit hash that HEAD is pointing to. Either 40 hex digits or
# empty if there is no HEAD (empty repo).
# VCS_STATUS_COMMIT_ENCODING Encoding of the HEAD's commit message. Empty value means UTF-8.
# VCS_STATUS_COMMIT_SUMMARY The first paragraph of the HEAD's commit message as one line.
# VCS_STATUS_LOCAL_BRANCH Local branch name or empty if not on a branch.
# VCS_STATUS_REMOTE_NAME The remote name, e.g. "upstream" or "origin".
# VCS_STATUS_REMOTE_BRANCH Upstream branch name. Can be empty.
# VCS_STATUS_REMOTE_URL Remote URL. Can be empty.
# VCS_STATUS_ACTION Repository state, A.K.A. action. Can be empty.
# VCS_STATUS_INDEX_SIZE The number of files in the index.
# VCS_STATUS_NUM_STAGED The number of staged changes.
# VCS_STATUS_NUM_CONFLICTED The number of conflicted changes.
# VCS_STATUS_NUM_UNSTAGED The number of unstaged changes.
# VCS_STATUS_NUM_UNTRACKED The number of untracked files.
# VCS_STATUS_HAS_STAGED 1 if there are staged changes, 0 otherwise.
# VCS_STATUS_HAS_CONFLICTED 1 if there are conflicted changes, 0 otherwise.
# VCS_STATUS_HAS_UNSTAGED 1 if there are unstaged changes, 0 if there aren't, -1 if
# unknown.
# VCS_STATUS_NUM_STAGED_NEW The number of staged new files. Note that renamed files
# are reported as deleted plus new.
# VCS_STATUS_NUM_STAGED_DELETED The number of staged deleted files. Note that renamed files
# are reported as deleted plus new.
# VCS_STATUS_NUM_UNSTAGED_DELETED The number of unstaged deleted files. Note that renamed files
# are reported as deleted plus new.
# VCS_STATUS_HAS_UNTRACKED 1 if there are untracked files, 0 if there aren't, -1 if
# unknown.
# VCS_STATUS_COMMITS_AHEAD Number of commits the current branch is ahead of upstream.
# Non-negative integer.
# VCS_STATUS_COMMITS_BEHIND Number of commits the current branch is behind upstream.
# Non-negative integer.
# VCS_STATUS_STASHES Number of stashes. Non-negative integer.
# VCS_STATUS_TAG The last tag (in lexicographical order) that points to the same
# commit as HEAD.
# VCS_STATUS_PUSH_REMOTE_NAME The push remote name, e.g. "upstream" or "origin".
# VCS_STATUS_PUSH_REMOTE_URL Push remote URL. Can be empty.
# VCS_STATUS_PUSH_COMMITS_AHEAD Number of commits the current branch is ahead of push remote.
# Non-negative integer.
# VCS_STATUS_PUSH_COMMITS_BEHIND Number of commits the current branch is behind push remote.
# Non-negative integer.
# VCS_STATUS_NUM_SKIP_WORKTREE The number of files in the index with skip-worktree bit set.
# Non-negative integer.
# VCS_STATUS_NUM_ASSUME_UNCHANGED The number of files in the index with assume-unchanged bit set.
# Non-negative integer.
#
# The point of reporting -1 via VCS_STATUS_HAS_* is to allow the command to skip scanning files in
# large repos. See -m flag of gitstatus_start.
#
# gitstatus_query returns an error if gitstatus_start hasn't been called in the same shell or
# the call had failed.
#
# !!!!! WARNING: CONCURRENT CALLS WITH THE SAME NAME ARE NOT ALLOWED !!!!!
#
# It's illegal to call gitstatus_query if the last asynchronous call with the same NAME hasn't
# completed yet. If you need to issue concurrent requests, use different NAME arguments.
function gitstatus_query"${1:-}"() {
emulate -L zsh -o no_aliases -o extended_glob -o typeset_silent
local fsuf=${${(%):-%N}#gitstatus_query}
unset VCS_STATUS_RESULT
local opt dir callback OPTARG
local -i no_diff OPTIND
local -F timeout=-1
while getopts ":d:c:t:p" opt; do
case $opt in
+p) no_diff=0;;
p) no_diff=1;;
d) dir=$OPTARG;;
c) callback=$OPTARG;;
t)
if [[ $OPTARG != (|+|-)<->(|.<->)(|[eE](|-|+)<->) ]]; then
print -ru2 -- "gitstatus_query: invalid -t argument: $OPTARG"
return 1
fi
timeout=OPTARG
;;
\?) print -ru2 -- "gitstatus_query: invalid option: $OPTARG" ; return 1;;
:) print -ru2 -- "gitstatus_query: missing required argument: $OPTARG"; return 1;;
*) print -ru2 -- "gitstatus_query: invalid option: $opt" ; return 1;;
esac
done
if (( OPTIND != ARGC )); then
print -ru2 -- "gitstatus_query: exactly one positional argument is required"
return 1
fi
local name=$*[OPTIND]
if [[ $name != [[:IDENT:]]## ]]; then
print -ru2 -- "gitstatus_query: invalid positional argument: $name"
return 1
fi
(( _GITSTATUS_STATE_$name == 2 )) || return
if [[ -z $GIT_DIR ]]; then
if [[ $dir != /* ]]; then
if [[ $PWD == /* && $PWD -ef . ]]; then
dir=$PWD/$dir
else
dir=${dir:a}
fi
fi
else
if [[ $GIT_DIR == /* ]]; then
dir=:$GIT_DIR
elif [[ $PWD == /* && $PWD -ef . ]]; then
dir=:$PWD/$GIT_DIR
else
dir=:${GIT_DIR:a}
fi
fi
if [[ $dir != (|:)/* ]]; then
typeset -g VCS_STATUS_RESULT=norepo-sync
_gitstatus_clear$fsuf
return 0
fi
local -i req_fd=${(P)${:-_GITSTATUS_REQ_FD_$name}}
local req_id=$EPOCHREALTIME
print -rnu $req_fd -- $req_id' '$callback$'\x1f'$dir$'\x1f'$no_diff$'\x1e' || return
(( ++_GITSTATUS_NUM_INFLIGHT_$name ))
if (( timeout == 0 )); then
typeset -g VCS_STATUS_RESULT=tout
_gitstatus_clear$fsuf
else
while true; do
_gitstatus_process_response$fsuf $name $timeout $req_id || return
[[ $VCS_STATUS_RESULT == *-async ]] || break
done
fi
[[ $VCS_STATUS_RESULT != tout || -n $callback ]]
}
# If the last call to gitstatus_query timed out (VCS_STATUS_RESULT=tout), wait for the callback
# to be called. Otherwise do nothing.
#
# Usage: gitstatus_process_results [OPTION]... NAME
#
# -t FLOAT Timeout in seconds. Negative value means infinity. Will block for at most this long.
#
# Returns an error only when invoked with incorrect arguments and when gitstatusd isn't running or
# broken.
#
# If a callback gets called, VCS_STATUS_* parameters are set as in gitstatus_query.
# VCS_STATUS_RESULT is either norepo-async or ok-async.
function gitstatus_process_results"${1:-}"() {
emulate -L zsh -o no_aliases -o extended_glob -o typeset_silent
local fsuf=${${(%):-%N}#gitstatus_process_results}
local opt OPTARG
local -i OPTIND
local -F timeout=-1
while getopts ":t:" opt; do
case $opt in
t)
if [[ $OPTARG != (|+|-)<->(|.<->)(|[eE](|-|+)<->) ]]; then
print -ru2 -- "gitstatus_process_results: invalid -t argument: $OPTARG"
return 1
fi
timeout=OPTARG
;;
\?) print -ru2 -- "gitstatus_process_results: invalid option: $OPTARG" ; return 1;;
:) print -ru2 -- "gitstatus_process_results: missing required argument: $OPTARG"; return 1;;
*) print -ru2 -- "gitstatus_process_results: invalid option: $opt" ; return 1;;
esac
done
if (( OPTIND != ARGC )); then
print -ru2 -- "gitstatus_process_results: exactly one positional argument is required"
return 1
fi
local name=$*[OPTIND]
if [[ $name != [[:IDENT:]]## ]]; then
print -ru2 -- "gitstatus_process_results: invalid positional argument: $name"
return 1
fi
(( _GITSTATUS_STATE_$name == 2 )) || return
while (( _GITSTATUS_NUM_INFLIGHT_$name )); do
_gitstatus_process_response$fsuf $name $timeout '' || return
[[ $VCS_STATUS_RESULT == *-async ]] || break
done
return 0
}
function _gitstatus_clear"${1:-}"() {
unset VCS_STATUS_{WORKDIR,COMMIT,LOCAL_BRANCH,REMOTE_BRANCH,REMOTE_NAME,REMOTE_URL,ACTION,INDEX_SIZE,NUM_STAGED,NUM_UNSTAGED,NUM_CONFLICTED,NUM_UNTRACKED,HAS_STAGED,HAS_UNSTAGED,HAS_CONFLICTED,HAS_UNTRACKED,COMMITS_AHEAD,COMMITS_BEHIND,STASHES,TAG,NUM_UNSTAGED_DELETED,NUM_STAGED_NEW,NUM_STAGED_DELETED,PUSH_REMOTE_NAME,PUSH_REMOTE_URL,PUSH_COMMITS_AHEAD,PUSH_COMMITS_BEHIND,NUM_SKIP_WORKTREE,NUM_ASSUME_UNCHANGED}
}
function _gitstatus_process_response"${1:-}"() {
local name=$1 timeout req_id=$3 buf
local -i resp_fd=_GITSTATUS_RESP_FD_$name
local -i dirty_max_index_size=_GITSTATUS_DIRTY_MAX_INDEX_SIZE_$name
(( $2 >= 0 )) && timeout=-t$2 && [[ -t $resp_fd ]]
sysread $timeout -i $resp_fd 'buf[$#buf+1]' || {
if (( $? == 4 )); then
if [[ -n $req_id ]]; then
typeset -g VCS_STATUS_RESULT=tout
_gitstatus_clear$fsuf
fi
return 0
else
gitstatus_stop$fsuf $name
return 1
fi
}
while [[ $buf != *$'\x1e' ]]; do
if ! sysread -i $resp_fd 'buf[$#buf+1]'; then
gitstatus_stop$fsuf $name
return 1
fi
done
local s
for s in ${(ps:\x1e:)buf}; do
local -a resp=("${(@ps:\x1f:)s}")
if (( resp[2] )); then
if [[ $resp[1] == $req_id' '* ]]; then
typeset -g VCS_STATUS_RESULT=ok-sync
else
typeset -g VCS_STATUS_RESULT=ok-async
fi
for VCS_STATUS_WORKDIR \
VCS_STATUS_COMMIT \
VCS_STATUS_LOCAL_BRANCH \
VCS_STATUS_REMOTE_BRANCH \
VCS_STATUS_REMOTE_NAME \
VCS_STATUS_REMOTE_URL \
VCS_STATUS_ACTION \
VCS_STATUS_INDEX_SIZE \
VCS_STATUS_NUM_STAGED \
VCS_STATUS_NUM_UNSTAGED \
VCS_STATUS_NUM_CONFLICTED \
VCS_STATUS_NUM_UNTRACKED \
VCS_STATUS_COMMITS_AHEAD \
VCS_STATUS_COMMITS_BEHIND \
VCS_STATUS_STASHES \
VCS_STATUS_TAG \
VCS_STATUS_NUM_UNSTAGED_DELETED \
VCS_STATUS_NUM_STAGED_NEW \
VCS_STATUS_NUM_STAGED_DELETED \
VCS_STATUS_PUSH_REMOTE_NAME \
VCS_STATUS_PUSH_REMOTE_URL \
VCS_STATUS_PUSH_COMMITS_AHEAD \
VCS_STATUS_PUSH_COMMITS_BEHIND \
VCS_STATUS_NUM_SKIP_WORKTREE \
VCS_STATUS_NUM_ASSUME_UNCHANGED \
VCS_STATUS_COMMIT_ENCODING \
VCS_STATUS_COMMIT_SUMMARY in "${(@)resp[3,29]}"; do
done
typeset -gi VCS_STATUS_{INDEX_SIZE,NUM_STAGED,NUM_UNSTAGED,NUM_CONFLICTED,NUM_UNTRACKED,COMMITS_AHEAD,COMMITS_BEHIND,STASHES,NUM_UNSTAGED_DELETED,NUM_STAGED_NEW,NUM_STAGED_DELETED,PUSH_COMMITS_AHEAD,PUSH_COMMITS_BEHIND,NUM_SKIP_WORKTREE,NUM_ASSUME_UNCHANGED}
typeset -gi VCS_STATUS_HAS_STAGED=$((VCS_STATUS_NUM_STAGED > 0))
if (( dirty_max_index_size >= 0 && VCS_STATUS_INDEX_SIZE > dirty_max_index_size )); then
typeset -gi \
VCS_STATUS_HAS_UNSTAGED=-1 \
VCS_STATUS_HAS_CONFLICTED=-1 \
VCS_STATUS_HAS_UNTRACKED=-1
else
typeset -gi \
VCS_STATUS_HAS_UNSTAGED=$((VCS_STATUS_NUM_UNSTAGED > 0)) \
VCS_STATUS_HAS_CONFLICTED=$((VCS_STATUS_NUM_CONFLICTED > 0)) \
VCS_STATUS_HAS_UNTRACKED=$((VCS_STATUS_NUM_UNTRACKED > 0))
fi
else
if [[ $resp[1] == $req_id' '* ]]; then
typeset -g VCS_STATUS_RESULT=norepo-sync
else
typeset -g VCS_STATUS_RESULT=norepo-async
fi
_gitstatus_clear$fsuf
fi
(( --_GITSTATUS_NUM_INFLIGHT_$name ))
[[ $VCS_STATUS_RESULT == *-async ]] && emulate zsh -c "${resp[1]#* }"
done
return 0
}
function _gitstatus_daemon"${1:-}"() {
local -i pipe_fd
exec 0<&- {pipe_fd}>&1 1>>$daemon_log 2>&1 || return
local pgid=$sysparams[pid]
[[ $pgid == <1-> ]] || return
builtin cd -q / || return
{
{
trap '' PIPE
local uname_sm
uname_sm="${${(L)$(command uname -sm)}//ı/i}" || return
[[ $uname_sm == [^' ']##' '[^' ']## ]] || return
local uname_s=${uname_sm% *}
local uname_m=${uname_sm#* }
if [[ $GITSTATUS_NUM_THREADS == <1-> ]]; then
args+=(-t $GITSTATUS_NUM_THREADS)
else
local cpus
if (( ! $+commands[sysctl] )) || [[ $uname_s == linux ]] ||
! cpus="$(command sysctl -n hw.ncpu)"; then
if (( ! $+commands[getconf] )) || ! cpus="$(command getconf _NPROCESSORS_ONLN)"; then
cpus=8
fi
fi
args+=(-t $((cpus > 16 ? 32 : cpus > 0 ? 2 * cpus : 16)))
fi
command mkfifo -- $file_prefix.fifo || return
print -rnu $pipe_fd -- ${(l:20:)pgid} || return
exec <$file_prefix.fifo || return
zf_rm -- $file_prefix.fifo || return
local _gitstatus_zsh_daemon _gitstatus_zsh_version _gitstatus_zsh_downloaded
function _gitstatus_set_daemon$fsuf() {
_gitstatus_zsh_daemon="$1"
_gitstatus_zsh_version="$2"
_gitstatus_zsh_downloaded="$3"
}
local gitstatus_plugin_dir_var=_gitstatus_plugin_dir$fsuf
local gitstatus_plugin_dir=${(P)gitstatus_plugin_dir_var}
builtin set -- -d $gitstatus_plugin_dir -s $uname_s -m $uname_m \
-p "printf '\\001' >&$pipe_fd" -e $pipe_fd -- _gitstatus_set_daemon$fsuf
[[ ${GITSTATUS_AUTO_INSTALL:-1} == (|-|+)<1-> ]] || builtin set -- -n "$@"
builtin source $gitstatus_plugin_dir/install || return
[[ -n $_gitstatus_zsh_daemon ]] || return
[[ -n $_gitstatus_zsh_version ]] || return
[[ $_gitstatus_zsh_downloaded == [01] ]] || return
if (( UID == EUID )); then
local home=~
else
local user
user="$(command id -un)" || return
local home=${userdirs[$user]}
[[ -n $home ]] || return
fi
if [[ -x $_gitstatus_zsh_daemon ]]; then
HOME=$home $_gitstatus_zsh_daemon -G $_gitstatus_zsh_version "${(@)args}" >&$pipe_fd
local -i ret=$?
[[ $ret == (0|129|130|131|137|141|143|159) ]] && return ret
fi
(( ! _gitstatus_zsh_downloaded )) || return
[[ ${GITSTATUS_AUTO_INSTALL:-1} == (|-|+)<1-> ]] || return
[[ $_gitstatus_zsh_daemon == \
${GITSTATUS_CACHE_DIR:-${XDG_CACHE_HOME:-$HOME/.cache}/gitstatus}/* ]] || return
builtin set -- -f "$@"
_gitstatus_zsh_daemon=
_gitstatus_zsh_version=
_gitstatus_zsh_downloaded=
builtin source $gitstatus_plugin_dir/install || return
[[ -n $_gitstatus_zsh_daemon ]] || return
[[ -n $_gitstatus_zsh_version ]] || return
[[ $_gitstatus_zsh_downloaded == 1 ]] || return
HOME=$home $_gitstatus_zsh_daemon -G $_gitstatus_zsh_version "${(@)args}" >&$pipe_fd
} always {
local -i ret=$?
zf_rm -f -- $file_prefix.lock $file_prefix.fifo
kill -- -$pgid
}
} &!
(( lock_fd == -1 )) && return
{
if zsystem flock -- $file_prefix.lock && command sleep 5 && [[ -e $file_prefix.lock ]]; then
zf_rm -f -- $file_prefix.lock $file_prefix.fifo
kill -- -$pgid
fi
} &!
}
# Starts gitstatusd in the background. Does nothing and succeeds if gitstatusd is already running.
#
# Usage: gitstatus_start [OPTION]... NAME
#
# -t FLOAT Fail the self-check on initialization if not getting a response from gitstatusd for
# this this many seconds. Defaults to 5.
#
# -s INT Report at most this many staged changes; negative value means infinity.
# Defaults to 1.
#
# -u INT Report at most this many unstaged changes; negative value means infinity.
# Defaults to 1.
#
# -c INT Report at most this many conflicted changes; negative value means infinity.
# Defaults to 1.
#
# -d INT Report at most this many untracked files; negative value means infinity.
# Defaults to 1.
#
# -m INT Report -1 unstaged, untracked and conflicted if there are more than this many
# files in the index. Negative value means infinity. Defaults to -1.
#
# -e Count files within untracked directories like `git status --untracked-files`.
#
# -U Unless this option is specified, report zero untracked files for repositories
# with status.showUntrackedFiles = false.
#
# -W Unless this option is specified, report zero untracked files for repositories
# with bash.showUntrackedFiles = false.
#
# -D Unless this option is specified, report zero staged, unstaged and conflicted
# changes for repositories with bash.showDirtyState = false.
function gitstatus_start"${1:-}"() {
emulate -L zsh -o no_aliases -o no_bg_nice -o extended_glob -o typeset_silent || return
print -rnu2 || return
local fsuf=${${(%):-%N}#gitstatus_start}
local opt OPTARG
local -i OPTIND
local -F timeout=5
local -i async=0
local -a args=()
local -i dirty_max_index_size=-1
while getopts ":t:s:u:c:d:m:eaUWD" opt; do
case $opt in
a) async=1;;
+a) async=0;;
t)
if [[ $OPTARG != (|+)<->(|.<->)(|[eE](|-|+)<->) ]] || (( ${timeout::=OPTARG} <= 0 )); then
print -ru2 -- "gitstatus_start: invalid -t argument: $OPTARG"
return 1
fi
;;
s|u|c|d|m)
if [[ $OPTARG != (|-|+)<-> ]]; then
print -ru2 -- "gitstatus_start: invalid -$opt argument: $OPTARG"
return 1
fi
args+=(-$opt $OPTARG)
[[ $opt == m ]] && dirty_max_index_size=OPTARG
;;
e|U|W|D) args+=-$opt;;
+(e|U|W|D)) args=(${(@)args:#-$opt});;
\?) print -ru2 -- "gitstatus_start: invalid option: $OPTARG" ; return 1;;
:) print -ru2 -- "gitstatus_start: missing required argument: $OPTARG"; return 1;;
*) print -ru2 -- "gitstatus_start: invalid option: $opt" ; return 1;;
esac
done
if (( OPTIND != ARGC )); then
print -ru2 -- "gitstatus_start: exactly one positional argument is required"
return 1
fi
local name=$*[OPTIND]
if [[ $name != [[:IDENT:]]## ]]; then
print -ru2 -- "gitstatus_start: invalid positional argument: $name"
return 1
fi
local -i lock_fd resp_fd stderr_fd
local file_prefix xtrace=/dev/null daemon_log=/dev/null culprit
{
if (( _GITSTATUS_STATE_$name )); then
(( async )) && return
(( _GITSTATUS_STATE_$name == 2 )) && return
lock_fd=_GITSTATUS_LOCK_FD_$name
resp_fd=_GITSTATUS_RESP_FD_$name
xtrace=${(P)${:-GITSTATUS_XTRACE_$name}}
daemon_log=${(P)${:-GITSTATUS_DAEMON_LOG_$name}}
file_prefix=${(P)${:-_GITSTATUS_FILE_PREFIX_$name}}
else
typeset -gi _GITSTATUS_START_COUNTER
local log_level=$GITSTATUS_LOG_LEVEL
if [[ -n "$TMPDIR" && ( ( -d "$TMPDIR" && -w "$TMPDIR" ) || ! ( -d /tmp && -w /tmp ) ) ]]; then
local tmpdir=$TMPDIR
else
local tmpdir=/tmp
fi
local file_prefix=${tmpdir:A}/gitstatus.$name.$EUID
file_prefix+=.$sysparams[pid].$EPOCHSECONDS.$((++_GITSTATUS_START_COUNTER))
(( GITSTATUS_ENABLE_LOGGING )) && : ${log_level:=INFO}
if [[ -n $log_level ]]; then
xtrace=$file_prefix.xtrace.log
daemon_log=$file_prefix.daemon.log
fi
args+=(-v ${log_level:-FATAL})
typeset -g GITSTATUS_XTRACE_$name=$xtrace
typeset -g GITSTATUS_DAEMON_LOG_$name=$daemon_log
typeset -g _GITSTATUS_FILE_PREFIX_$name=$file_prefix
typeset -gi _GITSTATUS_CLIENT_PID_$name="sysparams[pid]"
typeset -gi _GITSTATUS_DIRTY_MAX_INDEX_SIZE_$name=dirty_max_index_size
fi
() {
if [[ $xtrace != /dev/null && -o no_xtrace ]]; then
exec {stderr_fd}>&2 || return
exec 2>>$xtrace || return
setopt xtrace
fi
setopt monitor || return
if (( ! _GITSTATUS_STATE_$name )); then
if [[ -r /proc/version && "$(</proc/version)" == *Microsoft* ]]; then
lock_fd=-1
else
print -rn >$file_prefix.lock || return
zsystem flock -f lock_fd $file_prefix.lock || return
[[ $lock_fd == <1-> ]] || return
fi
typeset -gi _GITSTATUS_LOCK_FD_$name=lock_fd
if [[ $OSTYPE == cygwin* && -d /proc/self/fd ]]; then
# Work around bugs in Cygwin 32-bit.
#
# This hangs:
#
# emulate -L zsh
# () { exec {fd}< $1 } <(:)
# =true # hangs here
#
# This hangs:
#
# sysopen -r -u fd <(:)
local -i fd
exec {fd}< <(_gitstatus_daemon$fsuf) || return
{
[[ -r /proc/self/fd/$fd ]] || return
sysopen -r -o cloexec -u resp_fd /proc/self/fd/$fd || return
} always {
exec {fd} >&- || return
}
else
sysopen -r -o cloexec -u resp_fd <(_gitstatus_daemon$fsuf) || return
fi
typeset -gi GITSTATUS_DAEMON_PID_$name="${sysparams[procsubstpid]:--1}"
[[ $resp_fd == <1-> ]] || return
typeset -gi _GITSTATUS_RESP_FD_$name=resp_fd
typeset -gi _GITSTATUS_STATE_$name=1
fi
if (( ! async )); then
(( _GITSTATUS_CLIENT_PID_$name == sysparams[pid] )) || return
local pgid
while (( $#pgid < 20 )); do
[[ -t $resp_fd ]]
sysread -s $((20 - $#pgid)) -t $timeout -i $resp_fd 'pgid[$#pgid+1]' || return
done
[[ $pgid == ' '#<1-> ]] || return
typeset -gi GITSTATUS_DAEMON_PID_$name=pgid
sysopen -w -o cloexec -u req_fd -- $file_prefix.fifo || return
[[ $req_fd == <1-> ]] || return
typeset -gi _GITSTATUS_REQ_FD_$name=req_fd
print -nru $req_fd -- $'}hello\x1f\x1e' || return
local expected=$'}hello\x1f0\x1e' actual
if (( $+functions[p10k] )) && [[ ! -t 1 && ! -t 0 ]]; then
local -F deadline='EPOCHREALTIME + 4'
else
local -F deadline='1'
fi
while true; do
[[ -t $resp_fd ]]
sysread -s 1 -t $timeout -i $resp_fd actual || return
[[ $expected == $actual* ]] && break
if [[ $actual != $'\1' ]]; then
[[ -t $resp_fd ]]
while sysread -t $timeout -i $resp_fd 'actual[$#actual+1]'; do
[[ -t $resp_fd ]]
done
culprit=$actual
return 1
fi
(( EPOCHREALTIME < deadline )) && continue
if (( deadline > 0 )); then
deadline=0
if (( stderr_fd )); then
unsetopt xtrace
exec 2>&$stderr_fd {stderr_fd}>&-
stderr_fd=0
fi
if (( $+functions[p10k] )); then
p10k clear-instant-prompt || return
fi
if [[ $name == POWERLEVEL9K ]]; then
local label=powerlevel10k
else
local label=gitstatus
fi
if [[ -t 2 ]]; then
local spinner=($'\b%3F-%f' $'\b%3F\\%f' $'\b%3F|%f' $'\b%3F/%f')
print -Prnu2 -- "[%3F$label%f] fetching %2Fgitstatusd%f .. "
else
local spinner=('.')
print -rnu2 -- "[$label] fetching gitstatusd .."
fi
fi
print -Prnu2 -- $spinner[1]
spinner=($spinner[2,-1] $spinner[1])
done
if (( deadline == 0 )); then
if [[ -t 2 ]]; then
print -Pru2 -- $'\b[%2Fok%f]'
else
print -ru2 -- ' [ok]'
fi
if [[ $xtrace != /dev/null && -o no_xtrace ]]; then
exec {stderr_fd}>&2 || return
exec 2>>$xtrace || return
setopt xtrace
fi
fi
while (( $#actual < $#expected )); do
[[ -t $resp_fd ]]
sysread -s $(($#expected - $#actual)) -t $timeout -i $resp_fd 'actual[$#actual+1]' || return
done
[[ $actual == $expected ]] || return
function _gitstatus_process_response_$name-$fsuf() {
emulate -L zsh -o no_aliases -o extended_glob -o typeset_silent
local pair=${${(%):-%N}#_gitstatus_process_response_}
local name=${pair%%-*}
local fsuf=${pair#*-}
[[ $name == POWERLEVEL9K && $fsuf == _p9k_ ]] && eval $__p9k_intro_base
if (( ARGC == 1 )); then
_gitstatus_process_response$fsuf $name 0 ''
else
gitstatus_stop$fsuf $name
fi
}
if ! zle -F $resp_fd _gitstatus_process_response_$name-$fsuf; then
unfunction _gitstatus_process_response_$name-$fsuf
return 1
fi
function _gitstatus_cleanup_$name-$fsuf() {
emulate -L zsh -o no_aliases -o extended_glob -o typeset_silent
local pair=${${(%):-%N}#_gitstatus_cleanup_}
local name=${pair%%-*}
local fsuf=${pair#*-}
(( _GITSTATUS_CLIENT_PID_$name == sysparams[pid] )) || return
gitstatus_stop$fsuf $name
}
if ! add-zsh-hook zshexit _gitstatus_cleanup_$name-$fsuf; then
unfunction _gitstatus_cleanup_$name-$fsuf
return 1
fi
if (( lock_fd != -1 )); then
zf_rm -- $file_prefix.lock || return
zsystem flock -u $lock_fd || return
fi
unset _GITSTATUS_LOCK_FD_$name
typeset -gi _GITSTATUS_STATE_$name=2
fi
}
} always {
local -i err=$?
(( stderr_fd )) && exec 2>&$stderr_fd {stderr_fd}>&-
(( err == 0 )) && return
gitstatus_stop$fsuf $name
setopt prompt_percent no_prompt_subst no_prompt_bang
(( $+functions[p10k] )) && p10k clear-instant-prompt
print -ru2 -- ''
print -Pru2 -- '[%F{red}ERROR%f]: gitstatus failed to initialize.'
print -ru2 -- ''
if [[ -n $culprit ]]; then
print -ru2 -- $culprit
return err
fi
if [[ -s $xtrace ]]; then
print -ru2 -- ''
print -Pru2 -- " Zsh log (%U${xtrace//\%/%%}%u):"
print -Pru2 -- '%F{yellow}'
print -lru2 -- "${(@)${(@f)$(<$xtrace)}/#/ }"
print -Pnru2 -- '%f'
fi
if [[ -s $daemon_log ]]; then
print -ru2 -- ''
print -Pru2 -- " Daemon log (%U${daemon_log//\%/%%}%u):"
print -Pru2 -- '%F{yellow}'
print -lru2 -- "${(@)${(@f)$(<$daemon_log)}/#/ }"
print -Pnru2 -- '%f'
fi
if [[ $GITSTATUS_LOG_LEVEL == DEBUG ]]; then
print -ru2 -- ''
print -ru2 -- ' System information:'
print -Pru2 -- '%F{yellow}'
print -ru2 -- " zsh: $ZSH_VERSION"
print -ru2 -- " uname -a: $(command uname -a)"
print -Pru2 -- '%f'
print -ru2 -- ' If you need help, open an issue and attach this whole error message to it:'
print -ru2 -- ''
print -Pru2 -- ' %Uhttps://github.com/romkatv/gitstatus/issues/new%u'
else
print -ru2 -- ''
local home=~
local zshrc=${${${(q)${ZDOTDIR:-~}}/#${(q)home}/'~'}//\%/%%}/.zshrc
print -Pru2 -- " Add the following parameter to %U$zshrc%u for extra diagnostics on error:"
print -ru2 -- ''
print -Pru2 -- ' %BGITSTATUS_LOG_LEVEL=DEBUG%b'
print -ru2 -- ''
print -ru2 -- ' Restart Zsh to retry gitstatus initialization:'
print -ru2 -- ''
print -Pru2 -- ' %F{green}%Uexec%u zsh%f'
fi
}
}
# Stops gitstatusd if it's running.
#
# Usage: gitstatus_stop NAME.
function gitstatus_stop"${1:-}"() {
emulate -L zsh -o no_aliases -o extended_glob -o typeset_silent
local fsuf=${${(%):-%N}#gitstatus_stop}
if (( ARGC != 1 )); then
print -ru2 -- "gitstatus_stop: exactly one positional argument is required"
return 1
fi
local name=$1
if [[ $name != [[:IDENT:]]## ]]; then
print -ru2 -- "gitstatus_stop: invalid positional argument: $name"
return 1
fi
local state_var=_GITSTATUS_STATE_$name
local req_fd_var=_GITSTATUS_REQ_FD_$name
local resp_fd_var=_GITSTATUS_RESP_FD_$name
local lock_fd_var=_GITSTATUS_LOCK_FD_$name
local client_pid_var=_GITSTATUS_CLIENT_PID_$name
local daemon_pid_var=GITSTATUS_DAEMON_PID_$name
local inflight_var=_GITSTATUS_NUM_INFLIGHT_$name
local file_prefix_var=_GITSTATUS_FILE_PREFIX_$name
local dirty_max_index_size_var=_GITSTATUS_DIRTY_MAX_INDEX_SIZE_$name
local req_fd=${(P)req_fd_var}
local resp_fd=${(P)resp_fd_var}
local lock_fd=${(P)lock_fd_var}
local daemon_pid=${(P)daemon_pid_var}
local file_prefix=${(P)file_prefix_var}
local cleanup=_gitstatus_cleanup_$name-$fsuf
local process=_gitstatus_process_response_$name-$fsuf
if (( $+functions[$cleanup] )); then
add-zsh-hook -d zshexit $cleanup
unfunction -- $cleanup
fi
if (( $+functions[$process] )); then
[[ -n $resp_fd ]] && zle -F $resp_fd
unfunction -- $process
fi
[[ $daemon_pid == <1-> ]] && kill -- -$daemon_pid 2>/dev/null
[[ $file_prefix == /* ]] && zf_rm -f -- $file_prefix.lock $file_prefix.fifo
[[ $lock_fd == <1-> ]] && zsystem flock -u $lock_fd
[[ $req_fd == <1-> ]] && exec {req_fd}>&-
[[ $resp_fd == <1-> ]] && exec {resp_fd}>&-
unset $state_var $req_fd_var $lock_fd_var $resp_fd_var $client_pid_var $daemon_pid_var
unset $inflight_var $file_prefix_var $dirty_max_index_size_var
unset VCS_STATUS_RESULT
_gitstatus_clear$fsuf
}
# Usage: gitstatus_check NAME.
#
# Returns 0 if and only if `gitstatus_start NAME` has succeeded previously.
# If it returns non-zero, gitstatus_query NAME is guaranteed to return non-zero.
function gitstatus_check"${1:-}"() {
emulate -L zsh -o no_aliases -o extended_glob -o typeset_silent
local fsuf=${${(%):-%N}#gitstatus_check}
if (( ARGC != 1 )); then
print -ru2 -- "gitstatus_check: exactly one positional argument is required"
return 1
fi
local name=$1
if [[ $name != [[:IDENT:]]## ]]; then
print -ru2 -- "gitstatus_check: invalid positional argument: $name"
return 1
fi
(( _GITSTATUS_STATE_$name == 2 ))
}
(( ${#_gitstatus_opts} )) && setopt ${_gitstatus_opts[@]}
'builtin' 'unset' '_gitstatus_opts'
| true |
ae138229c9e4a7f97af76ef8102635ffad7c250e | Shell | ralongi/tools | /scripts/time_to_execute.sh | UTF-8 | 164 | 3.109375 | 3 | [] | no_license | time_to_execute()
{
SECONDS=0
## execute tasks here ###
time_to_execute=$SECONDS
echo "Time required for operation to complete: $time_to_execute seconds"
}
| true |
c41978e8f90f62104dd35cd5e7b9e4d5ee2adc13 | Shell | A-Julien/nachos-docker | /install.sh | UTF-8 | 4,233 | 4.375 | 4 | [] | no_license | #!/bin/bash
# To compile NachOS on a Debian environment (x86), you need to
# install the folowing packages:
# build-essential
# g++-multilib (on x86-64 only)
# and a MIPS cross-compiler. See below.
# At the end of the document, you will find a script that can help
# to create a cross-compiler on Debian. However, currently (11/2011),
# Debian is migrating toward multiarch and the script does not work.
#
# What I propose is to install Debian packages of a GCC MIPS cross-compiler
# that has been compiled from and on a Debian squeeze (stable)
# These packages should work on Debian stable (squeeze), testing (wheezy)
# and unstable (sid). They also work on Ubuntu sometimes requiring to
# manually install a extra package.
#
# Only the AMD64 and i386 architecture is supported by these pre-compiled
# packages, not the i386 one.
#
# Just run this script (or look at it and type the commands yourself) to
# install a MIPS cross-compiler
set -e
# either this script must be called as root or the user must be
# able to use sudo
if [ "$(whoami)" = root ]; then
ROOT=
else
ROOT=sudo
fi
# Check that we are on amd64 or i386
case "$(uname -m)" in
x86_64)
ARCH=amd64
;;
i*86)
ARCH=i386
;;
*)
echo "Wrong architechture $(uname -m). Aborting." 1>&2
exit 1
esac
install_package() {
local UPDATE=
if [ "$1" = --no-update ]; then
UPDATE=": skipping "
shift
fi
$UPDATE $ROOT apt-get -qq update > /dev/null
local p
local INST_REQ=
for p in $1 ; do
if dpkg -l "$p" | grep -s '^ii' ; then
:
else
INST_REQ=1
break
fi
done
if [ -z "$INST_REQ" ]; then
return 0
fi
set +x
echo "**************************************************************"
echo "* Installing the $1 package(s) from your distrib"
echo "* $2"
echo "**************************************************************"
echo "* Refuse the installation (and try to install it yourself *"
echo "* with your usual package manager) if something seems wrong *"
echo "* (packages that need to be removed, to many packages *"
echo "* upgraded, ...) *"
echo "**************************************************************"
set -x
$ROOT apt-get -qq -y --no-install-recommends install -y $1 $3
}
install_package build-essential "Installing basic development tools (make, gcc, g++, etc.)"
NO_UPDATE=--no-update
echo "MIPS cross compiler seems available in your distrib. Trying to use it"
if ! dpkg --print-foreign-architectures | grep -sq mipsel ; then
echo "Adding mipsel as a foreign architecture on your system"
$ROOT dpkg --add-architecture mipsel
NO_UPDATE=
fi
if test "$ARCH" = amd64 ; then
GCC_VER=$(gcc --version | head -1 | \
sed -e 's/.* \([0-9]\+\.[0-9]\+\)\.[0-9]\+\( .*\)\?$/\1/p;d')
if [ -z "$GCC_VER" ]; then
echo "Cannot find your GCC version. Aborting."
exit 1
fi
GCC_VER_MAJ="$(echo $GCC_VER | cut -d . -f 1)"
if ! dpkg --print-foreign-architectures | grep -sq i386 ; then
echo "Adding i386 as a foreign architecture on your system"
$ROOT dpkg --add-architecture i386
NO_UPDATE=
fi
fi
install_package $NO_UPDATE gcc-mipsel-linux-gnu "Installing the cross-compiler of your distrib. gcc-multiarch and g++-multiarch might be asked to be removed"
if test "$ARCH" = amd64 ; then
PACKAGES=
if [ "$(apt-cache -q policy g++-${GCC_VER_MAJ}-multilib)" != "" ] ; then
PACKAGES="$PACKAGES g++-${GCC_VER_MAJ}-multilib"
else
# before gcc 5
PACKAGES="$PACKAGES libc6-dev:i386 lib32stdc++-$GCC_VER-dev"
fi
install_package --no-update "$PACKAGES linux-libc-dev:i386" \
"in order to compile NachOS on amd64 systems" gcc-mipsel-linux-gnu
fi
echo "Ok, you should be able to compile NachOS"
exit 0
############################################################################
####### # # ######
# ## # # #
# # # # # #
##### # # # # #
# # # # # #
# # ## # #
####### # # ######
# never go further: creating a MIPS cross-compiler is not working for now | true |
74a56cb8543388a40b6b3a3c27ae279668247209 | Shell | ass-a2s/ccs-calendarserver | /support/_run_from_ve | UTF-8 | 1,262 | 2.515625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | #!/bin/sh
# -*- sh-basic-offset: 2 -*-
##
# Copyright (c) 2014-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
set -e;
set -u;
dstroot="$(cd "$(dirname "$0")/../virtualenv" && pwd)";
export PATH="${dstroot}/bin:${PATH}";
export C_INCLUDE_PATH="${dstroot}/include:${C_INCLUDE_PATH:-}";
export LD_LIBRARY_PATH="${dstroot}/lib:${dstroot}/lib64:${LD_LIBRARY_PATH:-}";
export CPPFLAGS="-I${dstroot}/include ${CPPFLAGS:-} ";
export LDFLAGS="-L${dstroot}/lib -L${dstroot}/lib64 ${LDFLAGS:-} ";
export DYLD_LIBRARY_PATH="${dstroot}/lib:${dstroot}/lib64:${DYLD_LIBRARY_PATH:-}";
export PKG_CONFIG_PATH="${dstroot}/lib/pkgconfig:${PKG_CONFIG_PATH:-}";
exec "${dstroot}/bin/$(basename "$0")";
| true |
665075b39c4c1407cb8ebf4c3fe6ab254500f178 | Shell | harrifeng/system-config | /lib/jc/build-url-to-dir | UTF-8 | 2,501 | 3.828125 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/env bash
# 这个脚本:输入参数是一个 build_url,输出是一个 build_dir(在 jenkins master 上名为类似“jobs/JOB_NAME/build/NNN”的一个目录)
# We will change the jenkins url
#
# ${scm_jenkins_url}job/Daily-apk-matrix-build.test/APP_NAME=hupowercontrol,SOURCE_BRANCH=O-M01-MASTER,label_exp=app_build/1
#
# into a build dir:
#
# ${scm_jenkins_jobs_dir}/Daily-apk-matrix-build.test/configurations/axis-APP_NAME/hupowercontrol/axis-SOURCE_BRANCH/O-M01-MASTER/axis-label_exp/app_build/builds/1
#
# Or this url:
# ${scm_jenkins_url}job/test.bhj/113
#
# into this dir:
# ${scm_jenkins_jobs_dir}/test.bhj/builds/113
set -e
## start code-generator "^\\s *#\\s *"
# generate-getopt -P @job-and-build-number-only
## end code-generator
## start generated code
TEMP=$(POSIXLY_CORRECT=true getopt -o h \
--long job-and-build-number-only,help,no-job-and-build-number-only \
-n $(basename -- $0) -- "$@")
declare job_and_build_number_only=false
eval set -- "$TEMP"
while true; do
case "$1" in
--job-and-build-number-only|--no-job-and-build-number-only)
if test "$1" = --no-job-and-build-number-only; then
job_and_build_number_only=false
else
job_and_build_number_only=true
fi
shift
;;
-h|--help)
set +x
echo -e
echo
echo Options and arguments:
printf "%06s" " "
printf %-24s '--[no-]job-and-build-number-only'
echo
exit
shift
;;
--)
shift
break
;;
*)
die "internal error: $(. bt; echo; bt | indent-stdin)"
;;
esac
done
## end generated code
build_url=$1
job_name=$(echo "${build_url}" | perl -pe 's,.*?/job/,,; s,/.*,,')
build_number=$(echo "${build_url}" | perl -pe 's,.*?/(\d+)/?$,$1,')
if echo "$build_url" | grep -q -P '/job/.*/.*=.*'; then
config_dir=configurations/$(
echo -n "${build_url}" | perl -pe "$(cat <<'EOF5f2741a5cb71'
# {%perl-mode%}
s,.*?/job/.*?/,,;
s,/.*,,;
s#(.*?)=(.*?)(,|$)#axis-$1/$2/#g;
# {%/perl-mode%}
EOF5f2741a5cb71
)"
)
else
config_dir=
fi
if test "${job_and_build_number_only}" = true; then
echo "${job_name}/${build_number}"
else
echo "${scm_jenkins_jobs_dir%/}/${job_name}/${config_dir}builds/${build_number}"
fi
| true |
1dda2729d007e0c1ef96fbf5525aafc6e8e01719 | Shell | pierreozoux/openshift-origin-multi-node-cluster | /shell/setup_ansible.sh | UTF-8 | 265 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# Import Parent Shell
#
source /vagrant/shell/parent-shell.sh
#
# Functions
#
main() {
info "Installing and Configuring Ansible."
install ansible
cp -f /vagrant/ansible/ansible-hosts.yaml /etc/ansible/hosts
ansible all -m ping
}
main | true |
1667819346e007f2091f51e7344b35bced02d303 | Shell | FAForever/faf-stack | /scripts/scheduled-leaderboard-purge.sh | UTF-8 | 1,783 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
# Set all leaderboard records to inactive, where no game was played in the last 3 months
SCRIPT_BASE_DIR=$(cd "$(dirname "$0")" || exit; cd ../; pwd)
DB_CONFIG_FILE="$SCRIPT_BASE_DIR/config/faf-db/faf-db.env"
echo "Reading db config from $DB_CONFIG_FILE"
# shellcheck source=../config/faf-db/faf-db.env"
source "$DB_CONFIG_FILE"
# Ladder1v1 leaderboard
echo "Processing inactive users for ladder1v1 leaderboard"
docker exec -u root -i faf-db mysql -D "${MYSQL_DATABASE}" <<SQL_SCRIPT
CREATE TEMPORARY TABLE active_players AS
(
SELECT DISTINCT gps.playerId
FROM game_player_stats gps
INNER JOIN game_stats gs on gps.gameId = gs.id
WHERE gs.endTime > now() - INTERVAL 1 YEAR
AND gs.gameMod = 6
AND gs.validity = 0
);
UPDATE ladder1v1_rating
LEFT JOIN active_players ON ladder1v1_rating.id = active_players.playerId
SET is_active = active_players.playerId IS NOT NULL
WHERE is_active != active_players.playerId IS NOT NULL;
DROP TABLE active_players;
SQL_SCRIPT
# Global leaderboard
echo "Processing inactive users for global leaderboard"
docker exec -u root -i faf-db mysql -D "${MYSQL_DATABASE}" <<SQL_SCRIPT
CREATE TEMPORARY TABLE active_players AS
(
SELECT DISTINCT gps.playerId
FROM game_player_stats gps
INNER JOIN game_stats gs on gps.gameId = gs.id
WHERE gs.endTime > now() - INTERVAL 1 YEAR
AND gs.gameMod = 0
AND gs.validity = 0
);
UPDATE global_rating
LEFT JOIN active_players ON global_rating.id = active_players.playerId
SET is_active = active_players.playerId IS NOT NULL
WHERE is_active != active_players.playerId IS NOT NULL;
DROP TABLE active_players;
SQL_SCRIPT
| true |
548ceee2259f6da319697cfa5d444531c48c4a10 | Shell | Shicheng-Guo/RNA-Seq | /bed2gc_content | UTF-8 | 2,360 | 3.953125 | 4 | [] | no_license | #!/bin/bash
#PBS -l nodes=1:ppn=4
GENOME="mm9"
#### usage ####
usage() {
echo Program: "bed2gc_content (compute gc content for sequences corresponding to input coordinates)"
echo Author: BRIC, University of Copenhagen, Denmark
echo Version: 1.0
echo Contact: pundhir@binf.ku.dk
echo "Usage: bed2gc_content -i <file> -o <dir> [OPTIONS]"
echo "Options:"
echo " -i <file> [input BED file]"
echo " -o <dir> [output directory]"
echo "[OPTIONS]"
echo " -g <string> [genome (default: mm9)]"
echo " -h [help]"
echo
exit 0
}
#### parse options ####
while getopts i:o:g:h ARG; do
case "$ARG" in
i) INFILE=$OPTARG;;
o) OUTDIR=$OPTARG;;
g) GENOME=$OPTARG;;
h) HELP=1;;
esac
done
## usage, if necessary file and directories are given/exist
if [ -z "$INFILE" -o -z "$OUTDIR" -o "$HELP" ]; then
usage
fi
## populating files based on input genome
if [ "$GENOME" == "mm9" ]; then
GENOME_FILE="/home/pundhir/project/genome_annotations/mouse.mm9.genome"
GENOME_FASTA="/home/pundhir/software/RNAPipe/data/Mus_musculus/Ensembl/NCBIM37/Bowtie2IndexWithAbundance/bowtie2_chr/Bowtie2IndexWithAbundance.fa"
elif [ "$GENOME" == "hg19" ]; then
GENOME_FILE="/home/pundhir/project/genome_annotations/human.hg19.genome"
GENOME_FASTA="/home/pundhir/software/RNAPipe/data/Homo_sapiens/Ensembl/GRCh37/Bowtie2IndexInklAbundant/bowtie2_chr/genome_and_Abundant.fa"
else
echo "Presently the program only support analysis for mm9 or hg19"
echo
usage
fi
## create output directory, if it does not exist
if [ ! -d "$OUTDIR" ]; then
mkdir $OUTDIR
fi
## create temporary file if input is from stdin
if [ "$INFILE" == "stdin" ]; then
TMP=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
while read LINE; do
echo ${LINE}
done | perl -ane '$line=""; foreach(@F) { $line.="$_\t"; } $line=~s/\t$//g; print "$line\n";' > $TMP
INFILE=$TMP
fi
## retrieve name of input file
NAME=$(echo $INFILE | perl -ane '$_=~s/^.*\///g; print $_;')
bedtools getfasta -fi $GENOME_FASTA -bed $INFILE -fo $OUTDIR/$NAME.fasta -s
get_gc_content.pl $OUTDIR/$NAME.fasta &>/dev/null
mv gc_out.txt $OUTDIR/gc_content.txt
bed2gc_content.R -i $OUTDIR/gc_content.txt
#fasta2seqlogo.R -i $OUTDIR/$NAME.fasta
## remove temporary file
if [ ! -z "$TMP" ]; then
rm $TMP
rm $OUTDIR/$NAME.fasta
fi
| true |
bc56a279a8c1d6a4d3662b9dad3cff433a1758ff | Shell | liangweilu/docker | /docker/third-ubuntu14.04-mysql/mysql-cluster/sql-node/run.sh | UTF-8 | 1,410 | 3.40625 | 3 | [] | no_license | #!/bin/bash
DATADIR="$(/usr/sbin/mysqld --verbose --help --log-bin-index=/tmp/tmp.index 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
wait_for_mysql() {
for i in {1..30}; do
mysql -u root -S /tmp/mysql.sock -e "SELECT 1" &> /dev/null
if [[ "$?" == "0" ]] ; then
break
fi
sleep 1
done
if [ "$i" = 0 ]; then
echo "MySQL init process failed!"
exit 1
fi
}
if [ ! -d "$DATADIR/mysql" ]; then
echo "No database found, initializing..."
/usr/sbin/mysqld --user=mysql --initialize-insecure=on
echo "Setting credentials..."
/usr/sbin/mysqld --no-defaults --user=mysql --console --skip-networking --socket=/tmp/mysql.sock &
pid="$!"
wait_for_mysql
mysql -e " \
SET @@SESSION.SQL_LOG_BIN=0; \
DELETE FROM mysql.user WHERE user NOT IN ('mysql.sys', 'mysqlxsys'); \
CREATE USER 'root'@'%' IDENTIFIED BY 'mysql'; \
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION; \
CREATE USER 'repl'@'%' IDENTIFIED BY 'repl'; \
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'%'; \
CREATE USER 'mem'@'%' IDENTIFIED BY 'mem'; \
GRANT SELECT, SHOW DATABASES, SUPER, REPLICATION CLIENT, PROCESS ON *.* TO 'mem'@'%'; \
FLUSH PRIVILEGES;"
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo "MySQL init process failed!"
exit 1
fi
fi
CMD="/usr/sbin/mysqld --user=mysql --ndb-nodeid=$NODE_ID --ndb-connectstring=$CONNECTSTRING --server-id=$NODE_ID"
echo "Running: $CMD"
exec $CMD
| true |
acac0a651f1d60e950402020e5a95b3c4d3cec34 | Shell | wancom/ledctrl-rpi | /install.sh | UTF-8 | 1,231 | 3.875 | 4 | [] | no_license | #!/bin/sh
echo "ledctrl package installer"
echo
# Environments
BOOTCONFPATH="/boot/config.txt"
BOOTCONFFLAG="# Appended by ledctrl"
# Check user is root
if [ ! $(id -u) = "0" ];then
echo "Please run as a root. Abort."
exit 1
fi
# Check already installed
test ! -f ${BOOTCONFPATH} && touch ${BOOTCONFPATH}
if [ ! "$(grep -e "^${BOOTCONFFLAG}$" ${BOOTCONFPATH})" = "" ];then
echo "Already installed. Abort."
exit 0
fi
# Install
echo "Installing package..."
mkdir -p /usr/local/lib/ledctrl
cp ledctrl/start /usr/local/lib/ledctrl/
cp ledctrl/ledctrl.conf /usr/local/lib/ledctrl/ledctrl.conf.default
cp ledctrl/ledctrl.conf /etc/ledctrl.conf
echo "Installing service..."
cp service/ledctrl.service /lib/systemd/system/
systemctl enable ledctrl.service
# Update config.txt
echo "Updating ${BOOTCONFPATH}..."
if [ "$(grep -e "^${BOOTCONFFLAG}$" ${BOOTCONFPATH})" = "" ];then
echo >> ${BOOTCONFPATH}
echo "#----------------------" >> ${BOOTCONFPATH}
echo "${BOOTCONFFLAG}" >> ${BOOTCONFPATH}
echo "dtparam=pwr_led_trigger=timer" >> ${BOOTCONFPATH}
echo "#----------------------" >> ${BOOTCONFPATH}
echo >> ${BOOTCONFPATH}
fi
# Start service
systemctl start ledctrl.service
echo "Installation is finished!" | true |
28c910722f220f3cb2e634fc2cef05a4af5b5961 | Shell | AdRoll/erlmld | /priv/run_fake.sh | UTF-8 | 210 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
#
# MLD spawning script placeholder for testing. See run_{dynamo,kinesis}.sh.
#
echo "$0 got $@" >&2
echo "sleeping... (stderr)" >&2
echo "sleeping... (stdout)"
sleep 600
echo "exiting..."
exit 0
| true |
951c00d842f15882edc608c62fc6f0dc7c2a730f | Shell | agakow/dotfiles | /macos/setup.sh | UTF-8 | 12,167 | 2.734375 | 3 | [] | no_license | #!/bin/bash
# set -e # Abort on error
echo "*** MACOS SETUP: START ***"
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# General
###############################################################################
# Set computer name (as done via System Preferences → Sharing)
# echo 'Computer name:'
# read NEW_NAME
# if [[ NEW_NAME != "" || NEW_NAME != "\n" ]]; then
# sudo scutil --set ComputerName "${NEW_NAME}"
# sudo scutil --set LocalHostName "${NEW_NAME}"
# sudo scutil --set HostName "${NEW_NAME}"
# fi
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Save to disk (not to iCloud) by default
defaults write -g NSDocumentSaveNewDocumentsToCloud -bool false
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Disable auto-correct
defaults write -g NSAutomaticSpellingCorrectionEnabled -bool false
# Reveal IP address, hostname, OS version, etc. when clicking the clock in the login window
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
# Use AirDrop over every interface.
# undo using defaults write com.apple.NetworkBrowser BrowseAllInterfaces 0
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Disable ‘natural’ scrolling
defaults write -g com.apple.swipescrolldirection -bool false
# Disable opening and closing window animations
defaults write -g NSAutomaticWindowAnimationsEnabled -bool false
###############################################################################
# Menu
###############################################################################
# Show bluetooth, wifi, volume, battery (with percentage) and clock
defaults write com.apple.menuextra.battery ShowPercent -bool true
defaults write com.apple.systemuiserver "NSStatusItem Visible com.apple.menuextra.bluetooth" -bool true
defaults write com.apple.systemuiserver menuExtras -array \
"/System/Library/CoreServices/Menu Extras/AirPort.menu" \
"/System/Library/CoreServices/Menu Extras/Bluetooth.menu" \
"/System/Library/CoreServices/Menu Extras/Clock.menu" \
"/System/Library/CoreServices/Menu Extras/Volume.menu"
###############################################################################
# Safari
###############################################################################
# Set up Safari for development.
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true
# Add a context menu item for showing the Web Inspector in web views
defaults write -g WebKitDeveloperExtras -bool true
# Security: Warn About Fraudulent Websites
defaults write com.apple.Safari WarnAboutFraudulentWebsites -bool true
# Security: Block pop-up windows
defaults write com.apple.Safari WebKitJavaScriptCanOpenWindowsAutomatically -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool false
# Privacy: don’t send search queries to Apple
defaults write com.apple.Safari UniversalSearchEnabled -bool false
defaults write com.apple.Safari SuppressSearchSuggestions -bool true
# Disable auto-playing video
defaults write com.apple.Safari WebKitMediaPlaybackAllowsInline -bool false
defaults write com.apple.SafariTechnologyPreview WebKitMediaPlaybackAllowsInline -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2AllowsInlineMediaPlayback -bool false
defaults write com.apple.SafariTechnologyPreview com.apple.Safari.ContentPageGroupIdentifier.WebKit2AllowsInlineMediaPlayback -bool false
# Prevent Safari from opening ‘safe’ files automatically after downloading
defaults write com.apple.Safari AutoOpenSafeDownloads -bool false
###############################################################################
# Chrome
###############################################################################
# Disable Swipe controls for Google Chrome
defaults write com.google.Chrome.plist AppleEnableSwipeNavigateWithScrolls -bool false
###############################################################################
# Mail
###############################################################################
# Disable inline attachments (just show the icons)
defaults write com.apple.mail DisableInlineAttachmentViewing -bool true
# Copy email addresses as `foo@example.com` instead of `Foo Bar <foo@example.com>`
defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool false
###############################################################################
# Terminal & iTerm2
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
# Don’t display the annoying prompt when quitting iTerm
defaults write com.googlecode.iterm2 PromptOnQuit -bool false
###############################################################################
# Interfaces: trackpad, mouse, keyboard, bluetooth, etc.
###############################################################################
# Set a really fast keyboard repeat rate.
defaults write -g KeyRepeat -int 2
defaults write -g InitialKeyRepeat -int 15
# next one is required to take effect.
# Disable press-and-hold for accent keys in favor of key repeat.
defaults write -g ApplePressAndHoldEnabled -bool false
# Disable key repeat in favor of press-and-hold for accent keys.
# defaults write -g ApplePressAndHoldEnabled -bool true
# Set language and text formats. (GBR and Imperial Units)
defaults write -g AppleLanguages -array "en"
defaults write -g AppleLocale -string "en_GB@currency=GBP"
# Hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# Show Desktop if mouse hits the bottom right corner of the screen.
defaults write com.apple.dock wvous-br-corner -int 4
defaults write com.apple.dock wvous-br-modifier -int 0
###############################################################################
# Screen
###############################################################################
# Require password immediately after sleep or screen saver.
# defaults write com.apple.screensaver askForPassword -int 1
# defaults write com.apple.screensaver askForPasswordDelay -int 0
SCREENSHOTS="$HOME/Desktop/screenshots"
mkdir -p $SCREENSHOTS
# Save screenshots to ~/Desktop/screenshots
defaults write com.apple.screencapture location -string $SCREENSHOTS
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "jpg"
# Disable shadow in screenshots
defaults write com.apple.screencapture disable-shadow -bool true
# Disable and kill Dashboard
defaults write com.apple.dashboard mcx-disabled -boolean YES; killall Dock
###############################################################################
# Finder
###############################################################################
# Show the ~/Library folder.
chflags nohidden ~/Library
# Show icons for hard drives, servers, and removable media on the desktop.
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool false
defaults write com.apple.finder ShowMountedServersOnDesktop -bool true
# Always open everything in Finder's column view.
defaults write com.apple.finder FXPreferredViewStyle Nlsv
# Show hidden files
defaults write com.apple.finder AppleShowAllFiles -bool true
# Disable the warning when changing file extensions
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Allow text-selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Show status bar (bottom of finder window)
defaults write com.apple.finder ShowStatusBar -bool true
# Show path bar (bottom of finder window)
defaults write com.apple.finder ShowPathbar -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# Show file extensions by default
defaults write -g AppleShowAllExtensions -bool true
# Disable auto-correct
defaults write -g NSAutomaticSpellingCorrectionEnabled -bool false
# Expand print panel by default
defaults write -g PMPrintingExpandedStateForPrint -bool true
# Expand save panel by default
defaults write -g NSNavPanelExpandedStateForSaveMode -bool true
###############################################################################
# Dock
###############################################################################
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool true
# Remove the animation when hiding/showing the Dock
defaults write com.apple.dock autohide-time-modifier -float 0
# Do not show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
# Automatically hide and show the Dock
# defaults write com.apple.dock autohide -bool true
###############################################################################
# Mac App
###############################################################################
# Enable the automatic update check
defaults write com.apple.SoftwareUpdate AutomaticCheckEnabled -bool true
# Check for software updates daily, not just once per week
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
# Download newly available updates in background
defaults write com.apple.SoftwareUpdate AutomaticDownload -int 1
# Turn on app auto-update
defaults write com.apple.commerce AutoUpdate -bool true
# Allow the App Store to reboot machine on macOS updates
defaults write com.apple.commerce AutoUpdateRestartRequired -bool true
###############################################################################
# Photos
###############################################################################
# Prevent Photos from opening automatically when devices are plugged in
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool true
###############################################################################
# NordVpn
###############################################################################
defaults write com.nordvpn.osx SUAutomaticallyUpdate -bool true
defaults write com.nordvpn.osx SUEnableAutomaticChecks -bool true
defaults write com.nordvpn.osx SUSendProfileInfo -bool false
defaults write com.nordvpn.osx SUEnableAutomaticChecks -bool true
defaults write com.nordvpn.osx appIcon -bool true
defaults write com.nordvpn.osx automaticUpdates -bool true
defaults write com.nordvpn.osx appDataCollectionAccepted -bool true
defaults write com.nordvpn.osx helpUsImprove -bool false
NORDVPN="$HOME/Library/Preferences/com.nordvpn.osx.plist"
/usr/libexec/PlistBuddy -c 'Delete :connectOnLaunch:openvpn_udp' $NORDVPN >/dev/null 2>&1
/usr/libexec/PlistBuddy -c "Add :connectOnLaunch:openvpn_udp:firstItem string 'United Kingdom'" $NORDVPN
/usr/libexec/PlistBuddy -c "Add :connectOnLaunch:openvpn_udp:secondItem string 'Recommended server'" $NORDVPN
/usr/libexec/PlistBuddy -c "Add :connectOnLaunch:openvpn_udp:type string 'country'" $NORDVPN
###############################################################################
killall SystemUIServer
killall Dock
killall Finder
echo "Some changes will not take effect until you reboot your machine"
echo "*** MACOS SETUP: DONE ***"
| true |
587ae10580015c0b0ab53b5af928b66df93caddf | Shell | qbilinux/qbilinux | /qbilinux/01_minimum/devel.txz/kernel_headers_rpi/PackageBuild.kernel_headers-5.15.36 | UTF-8 | 2,820 | 3.21875 | 3 | [] | no_license | #!/bin/sh -x
######################################################################
#url="https://www.kernel.org/pub/linux/kernel/v4.x/linux-4.11.7.tar.xz
# git://github.com/sfjro/aufs5-standalone.git"
version=5.15.36
url="https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-${version%.*}.tar.xz
https://cdn.kernel.org/pub/linux/kernel/v5.x/patch-${version}.xz"
# git://github.com/sfjro/aufs5-standalone.git"
pkgbase=kernel_headers_rpi
apply_arch="armv6l armv7l aarch64"
arch=`uname -m`
build=1
src=linux-${version%.*}
patchfiles="cjktty-kernel-5.15.patch cjktty-kernel-font-JP.patch overlayfs-allow_unprivileged_mounts.patch"
arm_patchfiles="rpi-5.15.36-20220508-9af1cc301.diff"
patchfiles_not_used="tuxonice-for-linux-5.0.18.patch 4.8-morse_code_panics.patch 4.8-panic_beep.patch"
case $arch in
armv6l) ext=armv6l_rpi1 ;;
armv7l) ext=armv7l_rpi2 ;;
aarch64) ext=aarch64_rpi4 ;;
*) ext=aarch64_rpi3 ;;
esac
vers=${version}_${ext}
configfile=config-${version%.*}-${ext}
#git_branch=aufs5.4
#aufs_git_vers=5a1e53d
OPT_CONFIG=""
DOCS=""
compress=txz
SRC_URL=${SRC_URL:-"https://qbilinux.org/pub/source/"}
SRC_DIR=${SRC_DIR:-"/home/archives/source/"}
######################################################################
source /usr/src/qbilinux/PackageBuild.def
do_prepare() {
cd ${S[$1]}
xz -dc $W/patch-${version}.xz | patch -p1
for patch in $patchfiles ; do
patch -p1 < $W/$patch
done
}
do_config() {
if [ -d ${B[$1]} ] ; then rm -rf ${B[$1]} ; fi
cp -a ${S[$1]} ${B[$1]}
cd ${B[$1]}
# patch -Np1 -i $W/source/aufs5-standalone/aufs5-kbuild.patch
# patch -Np1 -i $W/source/aufs5-standalone/aufs5-base.patch
# patch -Np1 -i $W/source/aufs5-standalone/aufs5-mmap.patch
# patch -Np1 -i $W/source/aufs5-standalone/aufs5-standalone.patch
# cp -av $W/source/aufs5-standalone/Documentation .
# cp -av $W/source/aufs5-standalone/fs .
# cp -pv $W/source/aufs5-standalone/include/uapi/linux/aufs_type.h include/uapi/linux
if [ $arch == armv7l -o $arch == aarch64 ] ; then
cat $W/$arm_patchfiles | patch -Np1 -i -
chmod 755 drivers/net/wireless/realtek/rtl8192cu/{clean,runwpa,wlan0dhcp}
fi
cp -p $W/${configfile} .config
}
do_build() {
cd ${B[$1]}
}
do_install() {
cd ${B[$1]}
make headers_install INSTALL_HDR_PATH=$P/usr
if [ $? != 0 ]; then
echo "make install error. $0 script stop"
exit 255
fi
find $P/usr/include \( -name ".install" -o -name "..install.cmd" \) \
-exec rm {} \;
}
do_package() {
for i in $configfileARMv6 $configfileARMv7 $configfileARM64 $arm_patchfiles ; do
cp $W/$i $docdir/$src/$i
gzip $docdir/$src/$i
done
for i in $pkgbase ; do
cd $P
/sbin/makepkg $W/$pkg.$compress <<EOF
y
1
EOF
done
}
source /usr/src/qbilinux/PackageBuild.func
| true |
e5f690870fd2b1c4e43b2e2030116de0f860de8e | Shell | tkutcher/hello | /HTML/run.sh | UTF-8 | 181 | 2.796875 | 3 | [] | no_license | # !/usr/bin/sh
# Tim Kutcher
# HTML/run.sh
PARAM=$1
if [ "$PARAM" = "-c" ]; then
PARAM="--chrome"
fi
if [ "$PARAM" = "--chrome" ]; then
open -a "Google Chrome" hello.html
fi
| true |
4bba1144e5d81ba2e6bd15ce477e058ff45bfb7d | Shell | webercoder/gocean | /scripts/pre-commit | UTF-8 | 379 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
echo "\nRuning revive linter..."
revive --formatter friendly ./...
status=$?
if [[ "$status" -ne "0" ]]; then
exit $status
fi
echo "Building..."
go build
status=$?
if [[ "$status" -ne "0" ]]; then
exit $status
fi
echo "Running tests ..."
go clean -testcache
go test ./...
status=$?
if [[ "$status" -ne "0" ]]; then
exit $status
fi
echo "pre-commit complete.\n"
| true |
becfcc99f42deb2b2fd12265d81bb7acfebd9a5b | Shell | ludwig/docker-aptly | /create-volume.sh | UTF-8 | 249 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Create the `aptly-data` volume if it doesn't exist.
readonly volume=aptly-data
docker volume inspect -f '{{ .Mountpoint }}' "${volume}" >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
set -x
docker volume create --name "${volume}"
fi
| true |
b039543af40c26ae8a450a552b4b93f40a9fff14 | Shell | williamcanin/modbl | /install | UTF-8 | 6,450 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
#
DIR_MODBL_EXEC="/usr/bin"
MODBL_FILE="modbl"
function _CORRECTING(){
# Enable EOF
sed -i "s/%EOF/EOF/g" $DIR_MODBL_EXEC/$MODBL_FILE
}
function _PERMISSIONS(){
# Permissions for script ModBlackList
chmod +x $DIR_MODBL_EXEC/$MODBL_FILE
}
function _INIT(){
if [[ "$(id -u)" != "0" ]]; then
printf "\n[x] You need to be root to use the ModBlackList (or sudo).\n\n"
else
/bin/cat << EOF > $DIR_MODBL_EXEC/$MODBL_FILE
#!/bin/bash
# --------------------------------------------------------------------------------
# Name: ModBlackList
# Executable: modbl
# Type: Shell
# Description: ModBlackList is a simple script to create a list of blocked modules.
# Credits: William C. Canin <http://williamcanin.com>
# Raw: http://git.io/modbl
# Province: Brazil/SP
# License: The MIT License (MIT)
# Copyright (c) 2015 William C. Canin <william.costa.canin@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ******************************************************************************
# --------------------------------------------------------------------------------
#
# Variables global
NAME="ModBlackList"
VERSION="1.0.2"
EXEC="modbl"
DIR_BLACKLIST="/etc/modprobe.d"
BLACKLIST_CONF="blacklist.conf"
# Colors
BRed='\e[1;31m' # Red
BGreen='\e[1;32m' # Green
BYellow='\e[1;33m' # Yellow
BBlue='\e[1;34m' # Blue
None='\033[00m' # Close
# Begin Functions
function _ADD_MODULE_BLACKLIST(){
# Create header blacklist in "/etc/modprobe.d".
if [[ ! -f "\$DIR_BLACKLIST/\$BLACKLIST_CONF" ]]; then
/bin/cat << EOF > \$DIR_BLACKLIST/\$BLACKLIST_CONF
# ================================================== #
# List of blocked modules. Created by \$NAME. #
# ================================================== #
# List Empty
%EOF
fi
SEARCH_MODULE="\$(grep -i -n -w "\$1" \$DIR_BLACKLIST/\$BLACKLIST_CONF | cut -d':' -f1)"
if [[ ! -z "\$SEARCH_MODULE" ]]; then
printf "\n\${BYellow}[ WARNING ] Already exists in this module blacklist.\${None}\n\n"
else
# Insert blocked modules.
sed - "s|# List Empty|#|g" \$DIR_BLACKLIST/\$BLACKLIST_CONF
echo "blacklist \$1" >> \$DIR_BLACKLIST/\$BLACKLIST_CONF
printf "\n\${BYellow}Module (\$1)\${None} \${BGreen}added\${None} \${BYellow}to the blacklist.\${None}\n\${BGreen}Done ✔\${None}\n\n"
fi
}
function _VIEW_BACKLIST(){
if [[ -f "\$DIR_BLACKLIST/\$BLACKLIST_CONF" ]]; then
cat \$DIR_BLACKLIST/\$BLACKLIST_CONF | less
else
printf "\nThe blacklist of \"Modbl\" modules does not exist. :(\n"
fi
}
function _REMOVE_MODULE_BLACKLIST(){
SEARCH_MODULE="\$(grep -i -n -w "\$1" \$DIR_BLACKLIST/\$BLACKLIST_CONF | cut -d':' -f1)"
if [[ -z "\$SEARCH_MODULE" ]]; then
printf "\n\${BYellow}[ WARNING ] No module with this name to be removed.\${None}\n\n"
else
# Remove module of blacklist
sed -i \$SEARCH_MODULE"d" \$DIR_BLACKLIST/\$BLACKLIST_CONF
printf "\n\${BYellow}Module (\$1)\${None} \${BRed}removed\${None} \${BYellow}from the blacklist.\${None}\n\${BGreen}Done ✔\${None}\n\n"
fi
}
function _HELP(){
printf "\n======================================================"
printf "\n Options Description\n"
printf "======================================================"
printf "\n\n-a (--add) Add module in the blacklist\n"
printf "\n\n-r (--remove) Remove module in the blacklist\n"
printf "\n\n-v (--view) See the blacklist modules\n"
printf "\n\nuninstall Uninstall \$NAME\n\n"
printf "\nUsage: \$0 { -a <module_name> | -r <module_name> | uninstall | help } \n\n"
printf "\n\n\$NAME version \$VERSION (c) Copyright \$(date +%Y)\n\n"
}
function _UNINSTALL(){
if [[ ! -f "\$DIR_BLACKLIST/\$BLACKLIST_CONF" ]]; then
rm -f /usr/bin/\$EXEC
printf "\n\${BGreen}Complete uninstall ✔\${None}\n\n"
else
printf "\nYou are uninstalling the \$NAME.\nWant to delete the blacklist modules?(y/n)\n\n"
printf "Reply > "
read resp_uninstall
case "\$resp_uninstall" in
y|Y)
rm -f /usr/bin/\$EXEC
rm -f \$DIR_BLACKLIST/\$BLACKLIST_CONF
printf "\n\${BGreen}Complete uninstall ✔\${None}\n\n"
;;
n|N)
rm -f /usr/bin/\$EXEC
printf "\n\${BGreen}Complete uninstall ✔\${None}\n\n"
printf "\n\${BYellow}[ WARNING ] The blacklist was maintained.\${None}\n\n"
;;
esac
fi
}
# End Functions
# Main
if [[ "\$(id -u)" != "0" ]]; then
printf "\n\${BRed}[x] You need to be root to use the \$NAME (or sudo).\${None}\n\n"
else
case "\$1" in
-a|--add)
if [[ -z "\$2" ]]; then
bash \$0
else
_ADD_MODULE_BLACKLIST \$2
fi
;;
-r|--remove)
if [[ -z "\$2" ]]; then
bash \$0
else
_REMOVE_MODULE_BLACKLIST \$2
fi
;;
uninstall)
_UNINSTALL
;;
-v|--view)
_VIEW_BACKLIST
;;
help)
_HELP
;;
*)
printf "\n\${BYellow}[ WARNING ] You must pass the module as a parameter to put on blacklist.\${None}\n"
printf "\nUsage: \$0 { -a <module_name> | -r <module_name> | -v | uninstall | help } \n"
printf "\n\n\n\$NAME version \$VERSION (c) Copyright \$(date +%Y)\n\n"
;;
esac
fi
EOF
_CORRECTING
_PERMISSIONS
printf "\n\e[1;32mInstallation completed ✔\033[00m\n"
printf "\nUsage: $MODBL_FILE { -a <module_name> | -r <module_name> | -v | uninstall | help } \n\n"
fi
}
# Start install
_INIT | true |
7e31c8e87c7a9226b52985cc0671ffa1415a4f2b | Shell | DanielThurau/DTG-Benchmarking | /Vidio/configure | UTF-8 | 1,267 | 2.75 | 3 | [] | no_license | #!/bin/bash
#echo "Desired Directory: "
#read input_variable
#repo=$input_variable
repo=$1
workingDir="$(pwd)";
echo $repo
echo $workingDir
sudo rm $workingDir/MasterVidio
sudo rm $workingDir/scripts/VIDIOBASH.sh
sudo rm $workingDir/scripts/VIDIOMETA.sh
sudo rm $workingDir/scripts/VIDIODEPTHTESTER.sh
sudo rm $workingDir/scripts/VIDIOFILESIZE.sh
sudo python scripts/findAndReplace.py $workingDir/resources/blanks/VIDIOBASH.sh $workingDir/scripts/VIDIOBASH.sh $workingDir $repo
sudo python scripts/findAndReplace.py $workingDir/resources/blanks/VIDIODEPTHTESTER.sh $workingDir/scripts/VIDIODEPTHTESTER.sh $workingDir $repo
sudo python scripts/findAndReplace.py $workingDir/resources/blanks/VIDIOFILESIZE.sh $workingDir/scripts/VIDIOFILESIZE.sh $workingDir $repo
sudo python scripts/findAndReplace.py $workingDir/resources/blanks/VIDIOMETA.sh $workingDir/scripts/VIDIOMETA.sh $workingDir $repo
sudo python scripts/findAndReplace.py $workingDir/resources/blanks/MasterVidio $workingDir/MasterVidio $workingDir $repo
sudo chmod 777 $workingDir/scripts/VIDIOBASH.sh
sudo chmod 777 $workingDir/scripts/VIDIODEPTHTESTER.sh
sudo chmod 777 $workingDir/scripts/VIDIOMETA.sh
sudo chmod 777 $workingDir/scripts/VIDIOFILESIZE.sh
sudo chmod 777 $workingDir/MasterVidio
| true |
9c543db4d6df0a13c405a85a2be7cdd21faa8efd | Shell | bharathappali/appsody-criu | /criu-script.sh | UTF-8 | 981 | 3.734375 | 4 | [] | no_license | #!/bin/bash
create_dump_folder() {
mkdir -p /home/criu-dump-location
touch /home/criu-dump-location/dump.log /home/criu-dump-location/restore.log
}
run_app() {
java -jar /app.jar > out.log 2>&1 &
}
get_app_pid() {
echo `ps -ef | grep java | grep -v grep | awk '{ print $2 }'`
}
check_server_started() {
for i in $(seq 1 100)
do
cat /out.log | grep "Tomcat started on port" &> /dev/null
local init=$?
if [ ${init} -eq 0 ]; then
break
else
if [ $i -eq 100 ]; then
exit 1
fi
sleep 1
fi
done
}
initiate_dump() {
mkdir -p /home/criu-dump-location/dump-image-store
cd /home/criu-dump-location/dump-image-store
check_server_started
if [ $? -eq 0 ]; then
criu dump -t "$1" --tcp-established -j -v4 -o "$2"
fi
}
create_dump_folder
run_app
app_pid=$(get_app_pid)
initiate_dump $app_pid "/home/criu-dump-location/dump.log"
| true |
5871b4835822628df06470134931ab5463ccf9f7 | Shell | pianolinux/infrastructure-as-code | /scripts/new_client_ovpn.sh | UTF-8 | 2,539 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
echo "Generating OpenVPN client configuration..."
OUTPUT=$ROOT/openvpn
SOURCE=$ROOT/secrets
if [ ! -z "$1" ]; then
echo "Client name "$1
mkdir -p $OUTPUT
rm $OUTPUT/keystore-openvpn-client-$1.jks $OUTPUT/openvpn_client_cert_and_key_$1.p12
## Create openvpn-client keystore
keytool -noprompt -keystore $OUTPUT/keystore-openvpn-client-$1.jks -genkey -alias selfsigned -dname "CN=openvpn" -storetype PKCS12 -keyalg RSA -keysize 2048 -validity 360 -storepass secret -keypass secret
## Sign openvpn-client certificate
keytool -noprompt -keystore $OUTPUT/keystore-openvpn-client-$1.jks -alias selfsigned -certreq -file $OUTPUT/openvpn_client_csr_$1.pem -storepass secret
openssl x509 -extfile $SOURCE/openssl.cnf -extensions extended -req -CA $SOURCE/openvpn_ca_cert.pem -CAkey $SOURCE/openvpn_ca_key.pem -in $OUTPUT/openvpn_client_csr_$1.pem -out $OUTPUT/openvpn_client_cert_$1.pem -days 360 -CAcreateserial -passin pass:secret
## Import CA and openvpn-client signed certificate into openvpn keystore
keytool -noprompt -keystore $OUTPUT/keystore-openvpn-client-$1.jks -alias CARoot -import -file $SOURCE/openvpn_ca_cert.pem -storepass secret
keytool -noprompt -keystore $OUTPUT/keystore-openvpn-client-$1.jks -alias selfsigned -import -file $OUTPUT/openvpn_client_cert_$1.pem -storepass secret
### Extract signed openvpn-client certificate
keytool -noprompt -keystore $OUTPUT/keystore-openvpn-client-$1.jks -exportcert -alias selfsigned -rfc -storepass secret -file $OUTPUT/openvpn_client_cert_$1.pem
### Extract openvpn-client key
keytool -noprompt -srckeystore $OUTPUT/keystore-openvpn-client-$1.jks -importkeystore -srcalias selfsigned -destkeystore $OUTPUT/openvpn_client_cert_and_key_$1.p12 -deststoretype PKCS12 -srcstorepass secret -storepass secret
openssl pkcs12 -in $OUTPUT/openvpn_client_cert_and_key_$1.p12 -nocerts -nodes -passin pass:secret -out $OUTPUT/openvpn_client_key_$1.pem
cat $SOURCE/openvpn_base.conf > ${OUTPUT}/openvpn_$1.ovpn
echo '<ca>' >> ${OUTPUT}/openvpn_$1.ovpn
cat ${SOURCE}/openvpn_ca_cert.pem >> ${OUTPUT}/openvpn_$1.ovpn
echo '</ca>\n<cert>' >> ${OUTPUT}/openvpn_$1.ovpn
cat ${OUTPUT}/openvpn_client_cert_$1.pem >> ${OUTPUT}/openvpn_$1.ovpn
echo '</cert>\n<key>' >> ${OUTPUT}/openvpn_$1.ovpn
cat ${OUTPUT}/openvpn_client_key_$1.pem >> ${OUTPUT}/openvpn_$1.ovpn
echo '</key>\n<tls-auth>' >> ${OUTPUT}/openvpn_$1.ovpn
cat ${SOURCE}/openvpn_ta.pem >> ${OUTPUT}/openvpn_$1.ovpn
echo '</tls-auth>' >> ${OUTPUT}/openvpn_$1.ovpn
echo "done."
else
echo "Missing client name. Skipping!"
fi
| true |
144aa09632591ca60b62c2a4eb05427872108d88 | Shell | rodrimmb/a_star | /util/install.sh | UTF-8 | 1,842 | 3.15625 | 3 | [] | no_license | #!/bin/bash
sudo apt-get update
clear
echo "Este es le instalador de Visual Search"
echo
echo "Instalamos CURL"
echo
# Instalamos curl que sera necesario para instalar ciertos componentes
sudo apt-get install curl
clear
echo
echo "Intalamos RVM"
echo
\curl -sSL https://get.rvm.io | bash
source ~/.rvm/scripts/rvm
clear
echo
echo "Intalamos Ruby"
echo
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
rvm get stable
rvm install 2.0.0
rvm reload
rvm use 2.0.0
ruby -v
clear
echo
echo "Intalamos MongoDB"
echo
# Descargamos e instalamos MongoDB
curl -O http://downloads.mongodb.org/linux/mongodb-linux-x86_64-2.6.2.tgz
tar -zxvf mongodb-linux-x86_64-2.6.2.tgz
mkdir -p mongodb
cp -R -n mongodb-linux-x86_64-2.6.2/ mongodb
rm -rf mongodb-linux-x86_64-2.6.2
rm -rf mongodb-linux-x86_64-2.6.2.tgz
export PATH=~/mongodb/mongodb-linux-x86_64-2.6.2/bin:$PATH
# Creamos carpeta con permisos donde iran las DBs
sudo mkdir -p /data/db
sudo chmod 774 /data
sudo chmod 774 /data/db
# Arrancamos por primeravez mongoDB que insertara una serie de parametros y DBs necesarias
#cd ~/
#sudo ./mongodb/mongodb-linux-x86_64-2.6.2/bin/mongod --fork --logpath /var/log/mongodb.log
# Paramos MongoDB para seguir con la instalacion
#sudo ./mongodb/mongodb-linux-x86_64-2.6.2/bin/mongod --shutdown
clear
echo
echo "Intalamos Graphviz"
echo
# Instalacion de la libreria que usamos para dibujar los arboles
sudo apt-get install graphviz
clear
echo
echo "Descargar codigo fuente de AStar"
echo
# Ponemos el codigo fuente en la carpeta del usuario
cd ~/
git clone https://github.com/rodrimmbdev/a_star.git
cd ~/a_star
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
rvm use 2.0.0
# Instalamos la gema Bundle que nos ayuda a gestionar las gemas
gem install bundler
# Instalamos las gemas necesarias
cd ~/a_star
bundle | true |
866916828dedbbef3a758b7eaa0af991ad85d30e | Shell | veiairn/shellscipt | /webBackup.sh | UTF-8 | 501 | 3.34375 | 3 | [] | no_license | #!/bin/bash
export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
src=/data/www/XXX
dest=/root/alan/XXX
pkg_name=XXX-`date +%F`
backup_count=4
[[ ! -d $src ]] && exit 255
mkdir -p $dest
tar -czf $dest/${pkg_name}.tar.gz -C /data/www/ html
size=$(du -hsm $dest/${pkg_name}.tar.gz | awk '{print $1}')
if [[ $? -eq 0 ]] && [[ $size -gt 10 ]];then
echo "success"
find $dest -type f -iname *.gz -mtime +${backup_count} -delete
else
echo "failed"
rm -rf $dest/${pkg_name}.tar.gz
fi
| true |
50cc7a8ff837f72cfe56cdb3afdbd0e2ebbb6219 | Shell | YaN-3k/arch_install | /arch_install.sh | UTF-8 | 15,699 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env sh
# Drives to install to.
DRIVE='/dev/sda'
# Set partitioning method to auto/manual
PARTITIONING='auto'
# Set boot type on efi/legacy
# if blank automaticly detect
BOOT_TYPE=''
# Partitions (only in use if PARTITIONING is set to auto);
# HOME (set 0 or leave blank to not create home partition).
HOME_SIZE='10' #GB (recommend 10GB)
# VAR (set 0 or leave blank to not create var partition).
VAR_SIZE='5' #GB (recommend 5GB)
# SWAP (set 0 or leave blank to not create swap partition).
SWAP_SIZE='2' #GB (recommend square root of ram)
# EFI (set 0 or leave blank to not create efi partition).
# is used if the system is to be installed on "uefi"
EFI_SIZE='512' #MB (recommend (or if not set) 512MB)
# System language.
LANG='en_US'
# System timezone (leave blank to be prompted).
TIMEZONE='America/New_York'
# System hostname (leave blank to be prompted).
HOSTNAME='host'
# Root password (leave blank to be prompted).
ROOT_PASSWORD=''
# Main user to create (by default, added to wheel group, and others).
USER_NAME=''
# The main user's password (leave blank to be prompted).
USER_PASSWORD=''
KEYMAP='us'
#KEYMAP='dvorak'
# Choose your video driver
# For Intel
VIDEO_DRIVER="i915"
# For nVidia
#VIDEO_DRIVER="nouveau"
# For ATI
#VIDEO_DRIVER="radeon"
# For generic stuff
#VIDEO_DRIVER="vesa"
# Choose hosts file type or leave blank
# Credit to https://github.com/StevenBlack/hosts
# Hosts file type:
# unified (adware + malware)
# fakenews
# gambling
# porn
# social
# fakenews-gambling
# fakenews-porn
# fakenews-social
# gambling-porn
# gambling-social
# porn-social
# fakenews-gambling-porn
# fakenews-gambling-social
# fakenews-porn-social
# gambling-porn-social
# fakenews-gambling-porn-social
HOSTS_FILE_TYPE="unified"
# Customize to install other packages
install_packages() {
# General utilities/libraries
packages="pkgfile reflector htop python python-pip rfkill rsync sudo unrar unzip wget zip maim ffmpeg cronie zsh stow xdg-user-dirs libnotify tlp exa"
deamons="pkgfile-update.timer cronie tlp"
# Sounds
packages="$packages alsa-utils pulseaudio pulseaudio-alsa"
# Development packages
packages="$packages git cmake gdb qemu libvirt virt-manager iptables ebtables dnsmasq bridge-utils openbsd-netcat ovmf"
deamons="$deamons iptables libvirtd"
# Network
packages="$packages dhcpcd iwd"
deamons="$deamons dhcpcd iwd"
# Fonts
packages="$packages ttf-inconsolata ttf-dejavu ttf-font-awesome ttf-joypixels"
# Xorg
packages="$packages xorg-server xorg-xinit xorg-xsetroot xwallpaper xcape xclip slock unclutter arc-gtk-theme"
# WM
packages="$packages bspwm sxhkd picom dunst polybar xdo xdotool"
# Browser
packages="$packages qutebrowser"
# Terminal apps
packages="$packages alacritty ranger-git vifm tmux neomutt abook neovim"
# Multimedia
packages="$packages mpv mpd mpc ncmpcpp"
# Communicators
packages="$packages irssi telegram-desktop"
# For laptops
packages="$packages xf86-input-libinput"
# Office
packages="$packages libreoffice-still zathura zathura-pdf-mupdf sxiv"
# Bluetooth
packages="$packages bluez bluez-utils pulseaudio-bluetooth"
deamons="$deamons bluetooth"
# Printers
packages="$packages ghostscript gsfonts gutenprint foomatic-db-gutenprint-ppds cups libcups system-config-printer"
deamons="$deamons cups-browsed"
# Video drivers
if [ "$VIDEO_DRIVER" = "i915" ]; then
packages="$packages xf86-video-intel libva-intel-driver"
elif [ "$VIDEO_DRIVER" = "nouveau" ]; then
packages="$packages xf86-video-nouveau"
elif [ "$VIDEO_DRIVER" = "radeon" ]; then
packages="$packages xf86-video-ati"
elif [ "$VIDEO_DRIVER" = "vesa" ]; then
packages="$packages xf86-video-vesa"
fi
# Python pip
pip_packages="ueberzug pynvim msgpack"
# Install
sudo -u $USER_NAME yay --needed --noconfirm -Syu $packages
sudo -u $USER_NAME pip3 install --user $pip_packages
pip3 install --upgrade msgpack
# Configure
sed -i 's/#AutoEnable=false/AutoEnable=true/g' /etc/bluetooth/main.conf
rfkill unblock bluetooth
# Demons
systemctl enable $deamons
# Groups
usermod -a -G kvm,libvirt $USER_NAME
# Shell
chsh $USER_NAME -s /usr/bin/zsh
}
#=======
# SETUP
#=======
greeter() {
cat <<EOF
/\\
/ \\
/\\ \\ Script written by Cherrry9
/ .. \\ https://github.com/Cherrry9
/ ' ' \\
/ ..' '.. \\
/_\` \`_\\
EOF
}
network() {
ping -c 1 archlinux.org >/dev/null || wifi-menu || {
echo "Can't connect to the internet!"
exit 1
}
timedatectl set-ntp true
}
detect_boot_type() {
BOOT_TYPE=$(ls /sys/firmware/efi/efivars 2>/dev/null)
[ "$BOOT_TYPE" ] &&
BOOT_TYPE='efi' ||
BOOT_TYPE='legacy'
}
format_and_mount() {
# format && mount
mkdir -p /mnt
yes | mkfs.ext4 "$root"
mount "$root" /mnt
[ "$efi" ] && {
mkdir -p /mnt/boot/efi
mkfs.fat -F32 "$efi"
mount "$efi" /mnt/boot/efi
}
[ "$swap" ] && {
mkswap "$swap"
swapon "$swap"
}
[ "$home" ] && {
mkdir -p /mnt/home
yes | mkfs.ext4 "$home"
mount "$home" /mnt/home
}
[ "$var" ] && {
mkdir -p /mnt/var
yes | mkfs.ext4 "$var"
mount "$var" /mnt/var
}
}
auto_partition() {
# calc end
case $(echo "$EFI_SIZE > 0" | bc) in
1) efi_end="$((EFI_SIZE + 1))" ;;
*) efi_end=513 ;;
esac
case $(echo "$SWAP_SIZE > 0" | bc) in
1)
swap_end=$(echo "$SWAP_SIZE * 1024 + $efi_end" | bc)
swap=0
;;
*) swap_end="$efi_end" ;;
esac
case $(echo "$HOME_SIZE > 0" | bc) in
1)
home_end=$(echo "$HOME_SIZE * 1024 + $swap_end" | bc)
home=0
;;
*) home_end="$swap_end" ;;
esac
case $(echo "$VAR_SIZE > 0" | bc) in
1)
var_end=$(echo "$VAR_SIZE * 1024 + $home_end" | bc)
var=0
;;
*) var_end="$home_end" ;;
esac
# label mbr/gpt
next_part=1
if [ "$BOOT_TYPE" = 'efi' ]; then
echo "Detected EFI boot"
parted -s "$DRIVE" mklabel gpt
else
echo "Detected legacy boot"
parted -s "$DRIVE" mklabel msdos
fi
# efi
[ "$BOOT_TYPE" = 'efi' ] && {
parted -s "$DRIVE" select "$DRIVE" mkpart primary fat32 1MiB "${efi_end}MiB"
efi="${DRIVE}$next_part"
next_part=$((next_part + 1))
}
# swap
[ "$swap" ] && {
parted -s "$DRIVE" select "$DRIVE" mkpart primary linux-swap "${efi_end}MiB" "${swap_end}MiB"
swap="${DRIVE}$next_part"
next_part=$((next_part + 1))
}
# home
[ "$home" ] && {
parted -s "$DRIVE" select "$DRIVE" mkpart primary ext4 "${swap_end}MiB" "${home_end}MiB"
home="${DRIVE}$next_part"
next_part=$((next_part + 1))
}
# var
[ "$var" ] && {
parted -s "$DRIVE" select "$DRIVE" mkpart primary ext4 "${home_end}MiB" "${var_end}MiB"
var="${DRIVE}$next_part"
next_part=$((next_part + 1))
}
# root
parted -s "$DRIVE" select "$DRIVE" mkpart primary ext4 "${var_end}MiB" 100%
root="${DRIVE}$next_part"
}
select_disk() {
DISK=''
SIZE=''
type="$1"
# pseudo select loop
while [ ! "$DISK" ] && [ -s "$list" ]; do
i=0
echo
while read -r line; do
i=$((i + 1))
echo "$i) $line"
done <"$list"
if [ "$type" != "root" ]; then
i=$((i + 1))
echo "$i) Don't create $type partitions."
refuse="$i"
fi
printf "\nEnter disk number for %s: " "$type"
read -r choice
if [ "$refuse" ] && [ "$refuse" = "$choice" ]; then
DISK=''
break
elif [ "$choice" ]; then
DISK=$(sed -n "${choice}p" "$list" | awk '{print $1}')
SIZE=$(sed -n "${choice}p" "$list" | awk '{print $2}')
sed -i "${choice}d" "$list"
fi
done
}
manual_partition() {
while [ ! "$next" ] || [ "$next" != "y" ]; do
# part
if [ "$BOOT_TYPE" = 'efi' ]; then
cat <<EOF
Please create root partition (/) and efi partition (/boot/efi), optional home (/home), var (/var) or swap
Example:
# label
mklabel gpt
# swap
mkpart primary linux-swap 1MiB 2G
# home
mkpart primary ext4 2G 12G
# root
mkpart primary ext4 12G 100%
If finished, enter - "quit"
EOF
else
cat <<EOF
Please create root partition (/) and optional home (/home), var (/var) or swap
Example:
# label
mklabel msdos
# swap
mkpart primary linux-swap 1MiB 2G
# home
mkpart primary ext4 2G 12G
# root
mkpart primary ext4 12G 100%
If finished, enter - "quit"
EOF
fi
parted "$DRIVE"
# select disks
list="/disks.list"
lsblk -nrp "$DRIVE" | awk '/part/ { print $1" "$4 }' >"$list"
[ "$BOOT_TYPE" = 'efi' ] && {
select_disk "efi"
efi="$DISK"
EFI_SIZE="$SIZE"
}
select_disk "root"
root="$DISK"
ROOT_SIZE="$SIZE"
select_disk "home"
home="$DISK"
HOME_SIZE="$SIZE"
select_disk "swap"
swap="$DISK"
SWAP_SIZE="$SIZE"
select_disk "var"
var="$DISK"
VAR_SIZE="$SIZE"
rm "$list"
echo
echo "root: $root $ROOT_SIZE"
[ "$efi" ] && echo "efi: $efi $EFI_SIZE"
[ "$home" ] && echo "home: $home $HOME_SIZE"
[ "$swap" ] && echo "swap: $swap $SWAP_SIZE"
[ "$var" ] && echo "var: $var $VAR_SIZE"
echo
printf "Continue? [y/n] "
read -r next
done
}
set_mirrorlist() {
pacman --noconfirm -Sy reflector
reflector --verbose --latest 200 --age 24 --sort rate --save /etc/pacman.d/mirrorlist
}
install_base() {
pacstrap /mnt base linux linux-firmware base-devel
pacstrap /mnt git grub
genfstab -U /mnt >/mnt/etc/fstab
}
unmount_filesystems() {
swap=$(lsblk -nrp | awk '/SWAP/ {print $1}')
[ "$swap" ] && swapoff "$swap"
umount -R /mnt
}
arch_chroot() {
cp "$0" /mnt/setup.sh
cp /etc/pacman.d/mirrorlist /mnt/etc/pacman.d/mirrorlist
arch-chroot /mnt bash -c "./setup.sh chroot $BOOT_TYPE"
if [ -f /mnt/setup.sh ]; then
echo 'ERROR: Something failed inside the chroot, not unmounting filesystems so you can investigate.'
echo 'Make sure you unmount everything before you try to run this script again.'
else
echo 'Unmounting filesystems'
unmount_filesystems
echo 'Done! Reboot system.'
fi
}
#===========
# CONFIGURE
#===========
set_locale() {
lang="$1"
echo "${lang}.UTF-8 UTF-8" >/etc/locale.gen
echo "LANG=${lang}.UTF-8" >/etc/locale.conf
locale-gen
}
set_hostname() {
hostname="$1"
echo "$hostname" >/etc/hostname
}
set_hosts() {
hostname="$1"
hosts_file_type="$2"
url="https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/$hosts_file_type/hosts"
if curl --output /dev/null --silent --head --fail "$url"; then
curl "$url" >/etc/hosts
elif [ "$hosts_file_type" = "unified" ]; then
curl "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >/etc/hosts
else
cat >/etc/hosts <<EOF
127.0.0.1 localhost.localdomain localhost $hostname
::1 localhost.localdomain localhost $hostname
EOF
fi
}
set_keymap() {
keymap="$1"
cat >/etc/vconsole.conf <<EOF
KEYMAP=$keymap
FONT=Lat2-Terminus16.psfu.gz
FONT_MAP=8859-2
EOF
}
set_timezone() {
timezone="$1"
ln -sf /usr/share/zoneinfo/"$timezone" /etc/localtime
hwclock --systohc
}
set_root_password() {
root_password="$1"
printf "%s\n%s" "$root_password" "$root_password" | passwd >/dev/null 2>&1
}
create_user() {
name="$1"
password="$2"
useradd -m -G adm,systemd-journal,wheel,rfkill,games,network,video,audio,optical,floppy,storage,scanner,power,sys,disk "$name"
printf "%s\n%s" "$password" "$password" | passwd "$name" >/dev/null 2>&1
}
set_sudoers() {
cat >/etc/sudoers <<EOF
# /etc/sudoers
#
# This file MUST be edited with the 'visudo' command as root.
#
# See the man page for details on how to write a sudoers file.
#
Defaults env_reset
Defaults pwfeedback
Defaults lecture="always"
Defaults lecture_file="/home/$USER_NAME/.local/share/sudoers.bee"
# Host alias specification
# User alias specification
# Cmnd alias specification
# User privilege specification
root ALL=(ALL) ALL
%wheel ALL=(ALL) ALL
%wheel ALL=(ALL) NOPASSWD: /bin/makepkg , /bin/pacman
EOF
}
set_boot() {
boot_type="$1"
if [ "$boot_type" = 'efi' ]; then
pacman -Sy --noconfirm efibootmgr
grub-install --target=x86_64-efi --efi-directory=/boot/efi --bootloader-id=GRUB
else
grub-install --target=i386-pc "$DRIVE"
fi
grub-mkconfig -o /boot/grub/grub.cfg
}
install_yay() {
git clone https://aur.archlinux.org/yay.git /yay
cd /yay
chown $USER_NAME:$USER_NAME /yay
sudo -u $USER_NAME makepkg -si --noconfirm
cd ..
rm -r /yay
}
update_pkgfile() {
pkgfile -u
}
disable_pc_speaker() {
echo "blacklist pcspkr" >>/etc/modprobe.d/nobeep.conf
}
clean_packages() {
yes | pacman -Scc
}
set_pacman() {
cat >/etc/pacman.conf <<EOF
#
# /etc/pacman.conf
#
# See the pacman.conf(5) manpage for option and repository directives
[options]
#RootDir = /
#DBPath = /var/lib/pacman/
#CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/
#HookDir = /etc/pacman.d/hooks/
HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -L -C - -f -o %o %u
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
#CleanMethod = KeepInstalled
Architecture = auto
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
#IgnorePkg =
#IgnoreGroup =
#NoUpgrade =
#NoExtract =
# Misc options
#UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
ILoveCandy
SigLevel = Required DatabaseOptional
LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required
#[testing]
#Include = /etc/pacman.d/mirrorlist
[core]
Include = /etc/pacman.d/mirrorlist
[extra]
Include = /etc/pacman.d/mirrorlist
#[community-testing]
#Include = /etc/pacman.d/mirrorlist
[community]
Include = /etc/pacman.d/mirrorlist
#[multilib-testing]
#Include = /etc/pacman.d/mirrorlist
[multilib]
Include = /etc/pacman.d/mirrorlist
EOF
}
set_makepkg() {
numberofcores
numberofcores=$(grep -c ^processor /proc/cpuinfo)
sed -i "s/#MAKEFLAGS=\"-j2\"/MAKEFLAGS=\"-j$((numberofcores + 1))\"/g" /etc/makepkg.conf
sed -i "s/COMPRESSXZ=(xz -c -z -)/COMPRESSXZ=(xz -c -T $numberofcores -z -)/g" /etc/makepkg.conf
}
setup() {
greeter
echo "Setting network"
network
if [ -e "$DRIVE" ]; then
printf "%s :: Are you sure? This disk will be formatted: [y/n] " "$DRIVE"
read -r choice
[ ! "$choice" = "y" ] && exit
else
echo "$DRIVE :: Device doesn't exist!"
exit 1
fi
mkdir -p /mnt
if [ ! "$BOOT_TYPE" ]; then
detect_boot_type
elif [ "$BOOT_TYPE" != 'efi' ] && [ "$BOOT_TYPE" != 'legacy' ]; then
echo "Wrong boot type: $BOOT_TYPE"
echo "Set to efi or legacy"
exit
fi
if [ "$PARTITIONING" = auto ]; then
auto_partition
else
manual_partition
fi
format_and_mount
echo "Setting mirrorlist"
set_mirrorlist
echo "Installing base package"
install_base
echo "Chrooting to new system"
arch_chroot
}
configure() {
# efi or legacy
BOOT_TYPE="$1"
echo "Setting locale"
set_locale "$LANG"
echo "Setting time zone"
[ ! -f "/usr/share/zoneinfo/$TIMEZONE" ] && TIMEZONE=$(tzselect)
set_timezone "$TIMEZONE"
echo "Setting hostname"
[ ! "$HOSTNAME" ] && {
printf "Enter the hostname: "
read -r HOSTNAME
}
set_hostname "$HOSTNAME"
echo "Setting hosts"
set_hosts "$HOSTNAME" "$HOSTS_FILE_TYPE"
echo "Setting keymap"
set_keymap $KEYMAP
echo 'Setting bootloader'
set_boot "$BOOT_TYPE"
echo 'Setting root password'
[ ! "$ROOT_PASSWORD" ] && {
printf "Enter the root password: "
read -r ROOT_PASSWORD
}
set_root_password "$ROOT_PASSWORD"
echo 'Creating initial user'
[ ! "$USER_NAME" ] && {
printf "Enter the user name: "
read -r USER_NAME
}
[ ! "$USER_PASSWORD" ] && {
printf "Enter the password for user %s: " "$USER_NAME"
read -r USER_PASSWORD
}
create_user "$USER_NAME" "$USER_PASSWORD"
echo 'Setting sudoers'
set_sudoers
echo "Setting pacman"
set_pacman
echo "Setting makepkg"
set_makepkg
echo 'Installing yay'
install_yay
echo 'Installing additional packages'
install_packages
echo 'Clearing package tarballs'
clean_packages
echo 'Updating pkgfile database'
update_pkgfile
echo 'Disabling PC speaker'
disable_pc_speaker
rm /setup.sh
}
if [ "$1" = "chroot" ]; then
configure "$2"
else
setup
fi
| true |
f9149d9444e61767fecfeaa712409e565b609a7f | Shell | michaelilyin/env | /install-polybar.sh | UTF-8 | 401 | 2.828125 | 3 | [] | no_license | #!/usr/bin/zsh
VER=3.4.2
mkdir -p /tmp/polybar && cd /tmp/polybar || exit
rm "polybar-$VER.tar"
rm -r "polybar-$VER"
mkdir -p "polybar-$VER"
wget "https://github.com/polybar/polybar/releases/download/$VER/polybar-$VER.tar"
tar -xvf "polybar-$VER.tar" -C "polybar-$VER"
cd "polybar-$VER/polybar" || exit
./build.sh --all-features
cd "$HOME" || exit
sudo rm -r /tmp/polybar
sudo apt autoremove -y | true |
81e3faa472b5ed1ba9a961f5c8521df7527f12f0 | Shell | lijiunderstand/losha | /script/evaluate.sh | UTF-8 | 565 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | # only supports lines of triplets in the format of (srcId, dstId, dist)
mkdir tmp
hadoop dfs -cat /losha/output/* > tmp/output.txt
# lshbox_file="../data/idfvecs/audio/audio_groundtruth.lshbox"
lshbox_file="../gqr/data/audio/audio_groundtruth.lshbox"
triplets_file="tmp/output.txt"
cd ../build
# cmake ../ -DCMAKE_BUILD_TYPE=Debug
cmake ../ -DCMAKE_BUILD_TYPE=Release
make evaluate_triplets ${format} 2>&1 | tee ../script/log.txt
cd ../script
log=`grep error log.txt`
if [ "$log" != "" ]; then
exit
fi
../build/evaluate_triplets $lshbox_file $triplets_file
| true |
2fb9fd0d18ac9137849e96a3a9716d03b4c664d0 | Shell | m9ffk4/konsul | /.githooks/pre-commit.d/pre-commit-01-ktlint | UTF-8 | 659 | 3.578125 | 4 | [] | no_license | #!/bin/sh
echo "Running ktlint check..."
# Получаем файлы, которые были изменены
STAGED_PROJECT_FILES=$(git diff --name-only --cached --relative)
# Выполняем gradle ktlintFormat
OUTPUT=$(gradle ktlintFormat)
EXIT_CODE=$?
echo "$STAGED_PROJECT_FILES" | xargs git add || true
if [ $EXIT_CODE -ne 0 ]; then
echo "$OUTPUT"
echo "***********************************************"
echo " ktlintFormat failed "
echo " Please fix the above issues before committing "
echo "***********************************************"
exit $EXIT_CODE
else
echo "ktlint completed successfully"
fi
| true |
d09690d75c3f40d93179f3df1a9c51ba3af9fbdd | Shell | etcd-io/etcd | /tools/rw-heatmaps/rw-benchmark.sh | UTF-8 | 5,684 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#set -x
RATIO_LIST="1/128 1/8 1/4 1/2 2/1 4/1 8/1 128/1"
VALUE_SIZE_POWER_RANGE="8 14"
CONN_CLI_COUNT_POWER_RANGE="5 11"
REPEAT_COUNT=5
RUN_COUNT=200000
KEY_SIZE=256
KEY_SPACE_SIZE=$((1024 * 64))
BACKEND_SIZE="$((20 * 1024 * 1024 * 1024))"
RANGE_RESULT_LIMIT=100
CLIENT_PORT="23790"
COMMIT=
ETCD_ROOT_DIR="$(cd $(dirname $0) && pwd)/../.."
ETCD_BIN_DIR="${ETCD_ROOT_DIR}/bin"
ETCD_BIN="${ETCD_BIN_DIR}/etcd"
ETCD_BM_BIN="${ETCD_BIN_DIR}/tools/benchmark"
WORKING_DIR="$(mktemp -d)"
CURRENT_DIR="$(pwd -P)"
OUTPUT_FILE="${CURRENT_DIR}/result-$(date '+%Y%m%d%H%M').csv"
trap ctrl_c INT
CURRENT_ETCD_PID=
function ctrl_c() {
# capture ctrl-c and kill server
echo "terminating..."
kill_etcd_server ${CURRENT_ETCD_PID}
exit 0
}
function quit() {
if [ ! -z ${CURRENT_ETCD_PID} ]; then
kill_etcd_server ${CURRENT_ETCD_PID}
fi
exit $1
}
function check_prerequisite() {
# check initial parameters
if [ -f "${OUTPUT_FILE}" ]; then
echo "file ${OUTPUT_FILE} already exists."
exit 1
fi
pushd ${ETCD_ROOT_DIR} > /dev/null
COMMIT=$(git log --pretty=format:'%h' -n 1)
if [ $? -ne 0 ]; then
COMMIT=N/A
fi
popd > /dev/null
cat >"${OUTPUT_FILE}" <<EOF
type,ratio,conn_size,value_size$(for i in $(seq 1 ${REPEAT_COUNT});do echo -n ",iter$i"; done),comment
PARAM,,,$(for i in $(seq 1 ${REPEAT_COUNT});do echo -n ","; done),"key_size=${KEY_SIZE},key_space_size=${KEY_SPACE_SIZE},backend_size=${BACKEND_SIZE},range_limit=${RANGE_RESULT_LIMIT},commit=${COMMIT}"
EOF
}
function run_etcd_server() {
if [ ! -x ${ETCD_BIN} ]; then
echo "no etcd binary found at: ${ETCD_BIN}"
exit 1
fi
# delete existing data directories
[ -d "db" ] && rm -rf db
[ -d "default.etcd" ] && rm -rf default.etcd/
echo "start etcd server in the background"
${ETCD_BIN} --quota-backend-bytes=${BACKEND_SIZE} \
--log-level 'error' \
--listen-client-urls http://0.0.0.0:${CLIENT_PORT} \
--advertise-client-urls http://127.0.0.1:${CLIENT_PORT} \
&>/dev/null &
return $!
}
function init_etcd_db() {
#initialize etcd database
if [ ! -x ${ETCD_BM_BIN} ]; then
echo "no etcd benchmark binary found at: ${ETCD_BM_BIN}"
quit -1
fi
echo "initialize etcd database..."
${ETCD_BM_BIN} put --sequential-keys \
--key-space-size=${KEY_SPACE_SIZE} \
--val-size=${VALUE_SIZE} --key-size=${KEY_SIZE} \
--endpoints http://127.0.0.1:${CLIENT_PORT} \
--total=${KEY_SPACE_SIZE} \
&>/dev/null
}
function kill_etcd_server() {
# kill etcd server
ETCD_PID=$1
if [ -z "$(ps aux | grep etcd | awk "{print \$2}")" ]; then
echo "failed to find the etcd instance to kill: ${ETCD_PID}"
return
fi
echo "kill etcd server instance"
kill -9 ${ETCD_PID}
wait ${ETCD_PID} 2>/dev/null
sleep 5
}
while getopts ":w:c:p:l:vh" OPTION; do
case $OPTION in
h)
echo "usage: $(basename $0) [-h] [-w WORKING_DIR] [-c RUN_COUNT] [-p PORT] [-l RANGE_QUERY_LIMIT] [-v]" >&2
exit 1
;;
w)
WORKING_DIR="${OPTARG}"
;;
c)
RUN_COUNT="${OPTARG}"
;;
p)
CLIENT_PORT="${OPTARG}"
;;
v)
set -x
;;
l)
RANGE_RESULT_LIMIT="${OPTARG}"
;;
\?)
echo "usage: $(basename $0) [-h] [-w WORKING_DIR] [-c RUN_COUNT] [-p PORT] [-l RANGE_QUERY_LIMIT] [-v]" >&2
exit 1
;;
esac
done
shift "$((${OPTIND} - 1))"
check_prerequisite
pushd "${WORKING_DIR}" > /dev/null
# progress stats management
ITER_TOTAL=$(($(echo ${RATIO_LIST} | wc | awk "{print \$2}") * \
$(seq ${VALUE_SIZE_POWER_RANGE} | wc | awk "{print \$2}") * \
$(seq ${CONN_CLI_COUNT_POWER_RANGE} | wc | awk "{print \$2}")))
ITER_CURRENT=0
PERCENTAGE_LAST_PRINT=0
PERCENTAGE_PRINT_THRESHOLD=5
for RATIO_STR in ${RATIO_LIST}; do
RATIO=$(echo "scale=4; ${RATIO_STR}" | bc -l)
for VALUE_SIZE_POWER in $(seq ${VALUE_SIZE_POWER_RANGE}); do
VALUE_SIZE=$((2 ** ${VALUE_SIZE_POWER}))
for CONN_CLI_COUNT_POWER in $(seq ${CONN_CLI_COUNT_POWER_RANGE}); do
# progress stats management
ITER_CURRENT=$((${ITER_CURRENT} + 1))
PERCENTAGE_CURRENT=$(echo "scale=3; ${ITER_CURRENT}/${ITER_TOTAL}*100" | bc -l)
if [ "$(echo "${PERCENTAGE_CURRENT} - ${PERCENTAGE_LAST_PRINT} > ${PERCENTAGE_PRINT_THRESHOLD}" |
bc -l)" -eq 1 ]; then
PERCENTAGE_LAST_PRINT=${PERCENTAGE_CURRENT}
echo "${PERCENTAGE_CURRENT}% completed"
fi
CONN_CLI_COUNT=$((2 ** ${CONN_CLI_COUNT_POWER}))
run_etcd_server
CURRENT_ETCD_PID=$!
sleep 5
init_etcd_db
START=$(date +%s)
LINE="DATA,${RATIO},${CONN_CLI_COUNT},${VALUE_SIZE}"
echo -n "run with setting [${LINE}]"
for i in $(seq ${REPEAT_COUNT}); do
echo -n "."
QPS=$(${ETCD_BM_BIN} txn-mixed "" \
--conns=${CONN_CLI_COUNT} --clients=${CONN_CLI_COUNT} \
--total=${RUN_COUNT} \
--endpoints "http://127.0.0.1:${CLIENT_PORT}" \
--rw-ratio ${RATIO} --limit ${RANGE_RESULT_LIMIT} \
--val-size ${VALUE_SIZE} \
2>/dev/null | grep "Requests/sec" | awk "{print \$2}")
if [ $? -ne 0 ]; then
echo "benchmark command failed: $?"
quit -1
fi
RD_QPS=$(echo -e "${QPS}" | sed -n '1 p')
WR_QPS=$(echo -e "${QPS}" | sed -n '2 p')
if [ -z "${RD_QPS}" ]; then
RD_QPS=0
fi
if [ -z "${WR_QPS}" ]; then
WR_QPS=0
fi
LINE="${LINE},${RD_QPS}:${WR_QPS}"
done
LINE="${LINE},"
END=$(date +%s)
DIFF=$((${END} - ${START}))
echo "took ${DIFF} seconds"
cat >>"${OUTPUT_FILE}" <<EOF
${LINE}
EOF
kill_etcd_server ${CURRENT_ETCD_PID}
done
done
done
popd > /dev/null
| true |
861ccfc47b68660d539e5d62afc69f28706ff4ba | Shell | nanyang2016/hids | /auto_update.sh | UTF-8 | 175 | 2.671875 | 3 | [] | no_license | #!/bin/bash
time=`date +"%Y%m%d %H:%M:%S"`
content=$1
update_content="Update time:"$time" Update comment:"$content
git add *
git commit -m "update $update_content"
git push
| true |
c494f5b6080e279eaf9cbdcfb480b48de6a9f258 | Shell | MartinPlantinga/Edrive | /ebox/scripts2/checkedrive.sh | UTF-8 | 722 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# This script runs the startup scripts to:
# 1. check if Edrive has been inserted into another computer
# 2. check if ethernet port number is correct.
# LOG
# M. Plantinga (13 April 2016): script creation
filepath='/home/ebox/.scripts/eth_port' # obtaining eth name and mac address
LAST_STATUS=$(tail -1 $filepath/log.txt) #extract last line
LAST_STATUS=${LAST_STATUS##*eth} #extract last status
LAST_STATUS=$(echo $LAST_STATUS | cut -c3-40) #remove first 2 characters
if [ $LAST_STATUS == "changed" ]; then
echo
echo "YOU HAVE TO REBOOT YOUR COMPUTER BEFORE RUNNING MATLAB"
echo
else
echo
echo "You can proceed to run Matlab by typing:"
echo "sudo su"
echo "matlab"
echo
fi
exit 0
| true |
646ab54341246e18f2a3993631bbec34d70bcff9 | Shell | L4STeam/l4sdemo | /iproute2-addons/build.sh | UTF-8 | 1,064 | 3.53125 | 4 | [] | no_license | #!/bin/bash
HERE=$(realpath $(dirname $0))
DEST="${HERE}/../iproute2-l4s"
TCDIR="${DEST}/tc"
echo "Building iproute2"
if [ ! -d "$DEST" ]; then
git clone \
--depth 1 --single-branch \
-b $(uname -r | awk -F '.' '{ printf "v%d.%d.0", $1, $2 }') \
git://git.kernel.org/pub/scm/network/iproute2/iproute2.git "$DEST"
fi
pktsch_h="${DEST}/include/uapi/linux/pkt_sched.h"
if [ ! -f "$pktsch_h" ]; then
pktsch_h="${DEST}/include/linux/pkt_sched.h"
fi
pushd "$DEST"
git checkout "include/"
popd
cat >> "$pktsch_h" << EOF
#ifndef DUALPI2_DEV
#include "${HERE}/../kernel_modules/sch_dualpi2/compat-pkt_sched.h"
#else
#include "${HERE}/../kernel_modules/sch_dualpi2_dev/compat-pkt_sched.h"
#endif
EOF
for qdisc in ${HERE}/*.c; do
qdisc_o="$(basename $qdisc)"
qdisc_o="${qdisc_o/%.c/.o}"
if ! grep "TCMODULES +=${qdisc_o}" "${TCDIR}/Makefile"; then
sed -i "/^TCMODULES :=/a TCMODULES += ${qdisc_o}" "${TCDIR}/Makefile"
fi
cp "$qdisc" "${TCDIR}"
done
"${HERE}/patch_fq_codel.sh"
pushd "${DEST}"
./configure
make -j$(nproc)
popd
| true |
46d2b916210f853a72bbeec7ed4959fe4f926ec6 | Shell | ahltorp/arlanohist | /scripts/run-tests.sh | UTF-8 | 2,446 | 3.921875 | 4 | [] | no_license | #!/bin/sh
#
# $Id: run-tests.sh,v 1.2 2002/06/03 16:59:18 lha Exp $
#
# This script will:
# - cd to the build directory and run the regression suite ther
# - build a summery of the test results
#
ppath=`dirname $0`
while test $# != 0; do
case $1
-no-start-arla) startarla=no ;;
-no-run-tests) runtests=no ;;
esac
shift;
done
test -f ${ppath}/test-config && . ${ppath}/test-config
if [ X$ADIR = X ]; then
ADIR=/nfsafs/e.kth.se/home/staff/lha/src/cvs/arla-0.35
fi
USER="-user nobody"
eval `grep '^VERSION=' $ADIR/configure.in`
if [ X$VERSION = X ]; then
echo "Failed to find version of arla"
exit 1
fi
WORKDIR=/afs/e.kth.se/home/staff/lha/TEST
export VERSION ADIR WORKDIR
OBJDIR=/usr/obj/arla-$VERSION
if [ ! -d $OBJDIR -o ! -d $OBJDIR/tests ] ; then
echo "Failed to find \$OBJDIR or \$OBJDIR/tests"
exit 1
fi
cd $OBJDIR/tests
if [ ! -d /afs/stacken.kth.se ] ; then
echo "/afs already exists, refusing to start"
exit 1
fi
if [ X$startarla != Xno ]; then
echo "Starting arla"
/usr/arla/sbin/startarla
sleep 10
else
echo "Not starting arla"
fi
if [ ! -d /afs/stacken.kth.se ] ; then
echo "/afs does not exists, refusing to run tests"
exit 1
fi
if [ X$runtests != Xno ] ; then
echo WORKDIR is $WORKDIR
echo WORKDIR is $WORKDIR >> rlog
echo "Running fast tests"
echo "Running fast tests" >> rlog
date >> rlog
./run-tests $USER -all -fast >> rlog-fast 2>&1
echo "Running slow tests"
echo "Running slow tests" >> rlog
date >> rlog
./run-tests $USER -all >> rlog-slow 2>&1
date >> rlog
fi
echo Creating report
cat > rlog-report <<EOF
. Test report for arla-$VERSION
EOF
uname -a | sed 's/^/ /' >> rlog-report
echo " Summery created:" >> rlog-report
TZ=UTC date | sed 's/^/ /' >> rlog-report
cat >> rlog-report <<EOF
. Result times
EOF
cat rlog | sed 's/^/ /' >> rlog-report
cat >> rlog-report <<EOF
. Fast tests - summery
EOF
${ppath}/extract-result.sh rlog-fast >> rlog-report
cat >> rlog-report <<EOF
. Fast tests - summery
EOF
${ppath}/extract-result.sh rlog-slow >> rlog-report 2>&1
cat >> rlog-report <<EOF
. Report log
EOF
sed 's/^/ /' < rlog >> rlog-report
cat >> rlog-report <<EOF
. Complete tests below
+ Fast tests
EOF
sed 's/^/ /' < rlog-fast >> rlog-report
cat >> rlog-report <<EOF
+ Slow tests
EOF
sed 's/^/ /' < rlog-slow >> rlog-report
exit 0
| true |
29834be84ea30597879cbaca78dd4edd49b296cf | Shell | jjgomezcadenas/PESOA | /PETALOS/job/pde/createjobs.sh | UTF-8 | 606 | 2.96875 | 3 | [] | no_license | #!/bin/bash
N=0
FILENUMBER=0
for i in `ls /home/jmbenlloch/next/petalo/PESOA/PETALOS/job/pde/jobs/*`
do
if [ $(($N % 5)) = 0 ]; then
FILENUMBER=$(($FILENUMBER + 1))
FILE="scripts/$FILENUMBER.sh"
echo "#!/bin/bash" >> $FILE
echo "#PBS -N petalo"$N >> $FILE
echo "#PBS -q short" >> $FILE
echo "#PBS -M jmbenlloch@ific.uv.es" >> $FILE
# echo "#PBS -m bae" >> $FILE
echo "source /data4/NEXT/sw/Releases/NEXT_v0_05_05/setup.sh" >> $FILE
fi
FILE="scripts/$FILENUMBER.sh"
N=$(($N+1))
NAME=`echo $i | cut -d'/' -f 10`
echo "runCNTjob $i >> /home/jmbenlloch/next/petalo/work/log/pde/$NAME.log" >> $FILE
done
| true |
6a75f840abf9cb10c879e576477ff3582ac3457e | Shell | gwindlord/uber-saber | /aosp_august_2016.sh | UTF-8 | 8,777 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# patching android-5.1.1_r37 with Google August 2016 security fixes wherever possible
LOCAL_REPO="$1"
if [[ "$#" != "1" ]]; then
echo "usage: $0 LOCAL_REPO" >&2
exit 1
fi
# errors on
set -e
pushd "$LOCAL_REPO/build"
sed -i 's#PLATFORM_SECURITY_PATCH := 2016-07-01#PLATFORM_SECURITY_PATCH := 2016-08-01#' core/version_defaults.mk
git add $(git status -s | awk '{print $2}') && git commit -m "Updating security string patch to 2016-08-01"
popd
pushd "$LOCAL_REPO/frameworks/av"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/av/
git fetch aosp
git cherry-pick 590d1729883f700ab905cdc9ad850f3ddd7e1f56
git cherry-pick 42a25c46b844518ff0d0b920c20c519e1417be69
#git cherry-pick 1f24c730ab6ca5aff1e3137b340b8aeaeda4bdbc || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
git cherry-pick 9cd8c3289c91254b3955bd7347cf605d6fa032c6
git cherry-pick 8e438e153f661e9df8db0ac41d587e940352df06
git remote rm aosp
# DO NOT MERGE: Camera: Adjust pointers to ANW buffers to avoid infoleak (http://review.cyanogenmod.org/#/c/155621/) (https://review.lineageos.org/#/c/62642/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_frameworks_av refs/changes/21/155621/2 && git cherry-pick FETCH_HEAD
git fetch https://review.lineageos.org/LineageOS/android_frameworks_av refs/changes/42/62642/2 && git cherry-pick FETCH_HEAD
# DO NOT MERGE omx: check buffer port before using (http://review.cyanogenmod.org/#/c/155622/) (https://review.lineageos.org/#/c/62641/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_frameworks_av refs/changes/22/155622/2 && git cherry-pick FETCH_HEAD
git fetch https://review.lineageos.org/LineageOS/android_frameworks_av refs/changes/41/62641/2 && git cherry-pick FETCH_HEAD
popd
pushd "$LOCAL_REPO/frameworks/base"
#git remote add aosp https://android.googlesource.com/platform/frameworks/base/
#git fetch aosp
# Don't trust callers to supply app info to bindBackupAgent() (http://review.cyanogenmod.org/#/c/155624/) (https://review.lineageos.org/#/c/65880/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_frameworks_base refs/changes/24/155624/2 && git cherry-pick FETCH_HEAD
git fetch https://review.lineageos.org/LineageOS/android_frameworks_base refs/changes/80/65880/2 && git cherry-pick FETCH_HEAD
#git cherry-pick e7cf91a198de995c7440b3b64352effd2e309906 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
# DO NOT MERGE: Reduce shell power over user management. (http://review.cyanogenmod.org/#/c/155625/) (https://review.lineageos.org/#/c/65879/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_frameworks_base refs/changes/25/155625/2 && git cherry-pick FETCH_HEAD
git fetch https://review.lineageos.org/LineageOS/android_frameworks_base refs/changes/79/65879/2 && git cherry-pick FETCH_HEAD
#git cherry-pick 01875b0274e74f97edf6b0d5c92de822e0555d03 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
# DO NOT MERGE Fix intent filter priorities (http://review.cyanogenmod.org/#/c/155623/) (https://review.lineageos.org/#/c/65881/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_frameworks_base refs/changes/23/155623/1 && git cherry-pick FETCH_HEAD
git fetch https://review.lineageos.org/LineageOS/android_frameworks_base refs/changes/81/65881/1 && git cherry-pick FETCH_HEAD
#git cherry-pick a75537b496e9df71c74c1d045ba5569631a16298
# DO NOT MERGE: Add pm operation to set user restrictions. (http://review.cyanogenmod.org/#/c/155626/) (https://review.lineageos.org/#/c/65878/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_frameworks_base refs/changes/26/155626/2 && git cherry-pick FETCH_HEAD
git fetch https://review.lineageos.org/LineageOS/android_frameworks_base refs/changes/78/65878/2 && git cherry-pick FETCH_HEAD
#git cherry-pick 4e4743a354e26467318b437892a9980eb9b8328a || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
#git remote rm aosp
popd
pushd "$LOCAL_REPO/frameworks/native"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/native
git fetch aosp
git cherry-pick 3bcf0caa8cca9143443814b36676b3bae33a4368
#git cherry-pick 9f590df0b73d14e0c30e970098f2369403eb2617
#git cherry-pick 3454f123d0a10bd0ce0760828996aa26c80a8fd4 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
#git cherry-pick a8c2454d52d3c23bd53b4a172eff8e5f4af30168
#git cherry-pick d910f3cf78ae878b1b86ead7ca837004c3a25aaa
git remote rm aosp
popd
pushd "$LOCAL_REPO/frameworks/opt/telephony"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/opt/telephony
git fetch aosp
git cherry-pick f47bc301ccbc5e6d8110afab5a1e9bac1d4ef058
git remote rm aosp
popd
pushd "$LOCAL_REPO/frameworks/opt/net/wifi"
[ $(git remote | egrep \^aosp) ] && git remote rm aosp
git remote add aosp https://android.googlesource.com/platform/frameworks/opt/net/wifi
git fetch aosp
git cherry-pick a209ff12ba9617c10550678ff93d01fb72a33399
git remote rm aosp
popd
pushd "$LOCAL_REPO/kernel/oneplus/msm8974/"
# Sultanxda merged it
#[ $(git remote | egrep \^linux) ] && git remote rm linux
#git remote add linux https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
#git fetch linux
# net: fix infoleak in rtnetlink
#git cherry-pick 5f8e44741f9f216e33736ea4ec65ca9ac03036e6 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
# ALSA: seq: Fix race at timer setup and close
#git cherry-pick 3567eb6af614dac436c4b16a8d426f9faed639b3
#git remote rm linux
[ $(git remote | egrep \^CAF) ] && git remote rm CAF
git remote add CAF https://source.codeaurora.org/quic/la/kernel/msm
git fetch CAF
# [media] media: Init the reserved fields of struct media_link_desc
#git cherry-pick cc4b26575602e492efd986e9a6ffc4278cee53b5 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
# msm: vidc: Check validity of userspace address
git cherry-pick f2a3f5e63e15e97a66e8f5a300457378bcb89d9c
# msm: camera: Check stats index MAX in ISP driver
#git cherry-pick 8d1f7531ff379befc129a6447642061e87562bca || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
git remote rm CAF
popd
pushd "$LOCAL_REPO/system/netd"
[ $(git remote | egrep \^CAF) ] && git remote rm CAF
git remote add CAF https://source.codeaurora.org/quic/la/platform/system/netd
git fetch CAF
# Close the opened pipe correctly
git cherry-pick cc2853e6cec8ca2cf92430ad9a83358b131fc417 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
# Add isIfaceName check to addUpstreamInterface
git cherry-pick e9925f5acb4401588e23ea8a27c3e318f71b5cf8
git remote rm CAF
popd
exit 0
# added by CM
pushd "$LOCAL_REPO/external/bluetooth/bluedroid"
# DO NOT MERGE Fix potential DoS caused by delivering signal to BT process (http://review.cyanogenmod.org/#/c/155612/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_external_bluetooth_bluedroid refs/changes/12/155612/1 && git cherry-pick FETCH_HEAD
popd
pushd "$LOCAL_REPO/external/conscrypt"
git remote add aosp https://android.googlesource.com/platform/external/conscrypt && git fetch aosp
git cherry-pick 5af5e93463f4333187e7e35f3bd2b846654aa214 || git add $(git status -s | awk '{print $2}') && git cherry-pick --continue
git remote rm aosp
popd
pushd "$LOCAL_REPO/external/jhead/"
git remote add aosp https://android.googlesource.com/platform/external/jhead && git fetch aosp
git cherry-pick bae671597d47b9e5955c4cb742e468cebfd7ca6b
git remote rm aosp
popd
# have to take this one from https://git.openssl.org/?p=openssl.git;a=commit;h=578b956fe741bf8e84055547b1e83c28dd902c73
# because 6.0.1 has BoringSSL instead of OpenSSL and there is no OpenSSL fix from Google
pushd "$LOCAL_REPO/external/openssl"
# Fix memory issues in BIO_*printf functions (http://review.cyanogenmod.org/#/c/155616/)
#git fetch http://review.cyanogenmod.org/CyanogenMod/android_external_openssl refs/changes/16/155616/1 && git cherry-pick FETCH_HEAD
#git apply $HOME/uber-saber/patches/CVE-2016-2842.patch
#git add $(git status -s | awk '{print $2}') && git commit -m "Fix memory issues in BIO_*printf functions"
popd
#https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c (external/bluetooth/bluedroid)
#https://source.codeaurora.org/quic/la/platform/system/netd/commit/?h=LA.BR.1&id=568ef402f6d5a7a50c126aafc78c4edf59abba1c
| true |
8a6d2698709079347b789a46d649c1e1d567d1fd | Shell | Azure/azure-quickstart-templates | /application-workloads/postgre/postgresql-standalone-server-ubuntu/scripts/install_postgresql.sh | UTF-8 | 1,804 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
sudo apt-get update
#install the necessary tools
sudo apt-get install gcc make libreadline6-dev zlib1g-dev -y
#download postgresql source code
wget https://ftp.postgresql.org/pub/source/v9.3.5/postgresql-9.3.5.tar.bz2 -P /tmp 2>/dev/null
cd /tmp
tar jxvf postgresql-9.3.5.tar.bz2
cd postgresql-9.3.5
#install postgresql
./configure --prefix=/opt/postgresql-9.3.5
sudo make install-world 2> /dev/null
#create postgres user for postgresql
sudo ln -s /opt/postgresql-9.3.5 /opt/pgsql
sudo mkdir -p /opt/pgsql_data
sudo useradd -m postgres
sudo chown -R postgres.postgres /opt/pgsql_data
#/tmp/postgres.sh contains the steps of setup the user postgres' environment, initialize the database
sudo touch /home/postgres/.bash_profile
sudo chown postgres.postgres /home/postgres/.bash_profile
cat >> /tmp/postgres.sh <<EOF
cat >> /home/postgres/.bash_profile <<EOFF
export PGPORT=1999
export PGDATA=/opt/pgsql_data
export LANG=en_US.utf8
export PGHOME=/opt/pgsql
export PATH=\\\$PATH:\\\$PGHOME/bin
export MANPATH=\\\$MANPATH:\\\$PGHOME/share/man
export DATA=\`date +"%Y%m%d%H%M"\`
export PGUSER=postgres
alias rm='rm -i'
alias ll='ls -lh'
EOFF
source /home/postgres/.bash_profile
#initialize the database
initdb -D \$PGDATA -E UTF8 --locale=C -U postgres 2> /dev/null
EOF
#su to postgres to execute /tmp/postgres.sh
sudo su - postgres -s /bin/bash /tmp/postgres.sh
#instead we can use sudo su - postgres -c "initdb -D \$PGDATA -E UTF8 --locale=C -U postgres"
#postgresql configuration
cd /tmp/postgresql-9.3.5/contrib/start-scripts
sudo cp linux /etc/init.d/postgresql
sudo sed -i '32s#usr/local#opt#' /etc/init.d/postgresql
sudo sed -i '35s#usr/local/pgsql/data#opt/pgsql_data#' /etc/init.d/postgresql
sudo chmod +x /etc/init.d/postgresql
#start postgresql
sudo /etc/init.d/postgresql start
| true |
18e1ba3eebf5909a6fcfbe4db2081cb869a025d3 | Shell | mikeintoch/insurance-demo | /frontend-insurance/src/main/webapp/vendor/angular-patternfly/scripts/publish.sh | UTF-8 | 1,111 | 3.671875 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #/bin/sh
set -o errexit -o nounset
# User info
git config user.name "patternfly-build"
git config user.email "patternfly-build@redhat.com"
git config --global push.default simple
# Add upstream authentication token
git remote add upstream https://$AUTH_TOKEN@github.com/$TRAVIS_REPO_SLUG.git
# Commit generated files
git add dist --force
git commit -m "Added files generated by Travis build"
# Must commit changes for pushing to OpenShift, but
# only deploy to the dist branch for tags.
if [ -z "$TRAVIS_TAG" -a "$TRAVIS_BRANCH" != "master" ]
then
echo "This commit was made against $TRAVIS_BRANCH and not the master or tag! Do not deploy!"
exit 0
fi
# Push to dist branch
EXISTING=`git ls-remote --heads https://github.com/"$TRAVIS_REPO_SLUG".git "$TRAVIS_BRANCH"-dist`
if [ -z "$TRAVIS_TAG" ]
then
if [ -n "$EXISTING" ]
then
git fetch upstream $TRAVIS_BRANCH-dist:$TRAVIS_BRANCH-dist
git checkout $TRAVIS_BRANCH-dist
git merge -Xtheirs $TRAVIS_BRANCH --no-edit --ff
git push upstream $TRAVIS_BRANCH-dist --force -v
else
git push upstream $TRAVIS_BRANCH:$TRAVIS_BRANCH-dist --force -v
fi
fi
| true |
2ad61ddd6b006122493ec00a8791a39a61479200 | Shell | luiseduardohdbackup/mtk | /asd2nb/asdbless.sh | UTF-8 | 1,263 | 3.625 | 4 | [
"WTFPL"
] | permissive | #!/bin/bash
# asdbless.sh
# Pick correct ASD image for this machine
# @author Filipp Lepalaan <filipp@mcare.fi>
# @package mtk
SERVER_IP=192.168.1.10 # The IP of the NetBoot server
SERVER_URL="http://example.com/mtk/asd2nb/server.php" # URL of the server-side script
NBI_PATH="/data/nb" # Path to the ASD image repository
ASD_ROOT="/asd" #
MODEL=$(/usr/sbin/sysctl -n hw.model)
MACHINE=$(/usr/sbin/sysctl -n hw.machine)
RESULT=$(/usr/bin/curl -s ${SERVER_URL} -d m=${MODEL})
if [[ -z ${RESULT} ]]; then
echo "${MODEL} not found on server, exiting" 2>&1
exit 1;
fi
ASD=$(echo $RESULT | awk 'BEGIN { FS = "/" } ; { print $1 }')
DMG=$(echo $RESULT | awk 'BEGIN { FS = "/" } ; { print $2 }')
if [[ $1 != "efi" ]]; then
/usr/sbin/bless --netboot \
--booter tftp://${SERVER_IP}${ASD_ROOT}/${ASD}/${MACHINE}/booter \
--kernel tftp://${SERVER_IP}${ASD_ROOT}/${ASD}/${MACHINE}/mach.macosx \
--options "rp=nfs:${SERVER_IP}:${NBI_PATH}:${ASD_ROOT}/${RESULT}" \
--nextonly
else
/usr/sbin/bless --netboot \
--booter tftp://${SERVER_IP}${ASD_ROOT}/${ASD}/efi/boot.efi \
--nextonly
fi
echo "Boot volume set to ${ASD}"
exit 0
| true |
c4ae896a5a346c8902844c934ce18cc8b0f8e866 | Shell | khm915/www.renfei.net | /build_push.sh | UTF-8 | 2,325 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#########################################
## Maven构建、Docker镜像制作、Docker仓库推送
## Author RenFei(i@renfei.net)
## 公网地址:registry.cn-hangzhou.aliyuncs.com/privately/renfei
## 专有网络:registry-vpc.cn-hangzhou.aliyuncs.com/privately/renfei
## 经典网络:registry-internal.cn-hangzhou.aliyuncs.com/privately/renfei
#########################################
PASSWORD=$1
PROJECT_VERSION=$(mvn -Dexec.executable='echo' -Dexec.args='${project.version}' --non-recursive exec:exec -q)
REGISTRY=registry.cn-hangzhou.aliyuncs.com
NAMESPACES=privately
REPOSITORIES=www.renfei.net
#########################################
echo "#########################################"
echo "# RENFEI.NET 编译构建生产环境Docker镜像文件"
echo "# Build Version: $REPOSITORIES:$PROJECT_VERSION"
echo "#########################################"
if [ "$PASSWORD" == "" ]; then
read -p "请输入 Docker 仓库密码:" PASSWORD
fi
mvn clean package -Dmaven.test.skip=true -P prod
echo "#########################################"
echo "# Docker 构建开始 >>>>"
echo "#########################################"
docker build -t $REPOSITORIES:"$PROJECT_VERSION" .
IMAGEID=$(docker images -q --filter reference=$REPOSITORIES:"$PROJECT_VERSION")
echo "构建完成 >>>> IMAGE ID:$IMAGEID"
echo "#########################################"
echo "# 登陆 Docker 仓库 >>>>"
echo "#########################################"
docker login --username=i@renfei.net --password="$PASSWORD" $REGISTRY
docker tag "$IMAGEID" $REGISTRY/$NAMESPACES/$REPOSITORIES:"$PROJECT_VERSION"
echo "#########################################"
echo "# 开始推送 Docker 镜像到仓库 >>>>"
echo "#########################################"
docker push $REGISTRY/$NAMESPACES/$REPOSITORIES:"$PROJECT_VERSION"
echo "#########################################"
echo "# 删除本地 Docker 镜像到仓库 >>>>"
echo "#########################################"
docker rmi "$IMAGEID"
echo "#########################################"
echo "# 全部构建完成! Version: $REPOSITORIES:$PROJECT_VERSION"
echo "# 镜像地址: $REGISTRY/$NAMESPACES/$REPOSITORIES:$PROJECT_VERSION"
echo "# 镜像地址: registry-vpc.cn-hangzhou.aliyuncs.com/$NAMESPACES/$REPOSITORIES:$PROJECT_VERSION"
echo "#########################################"
| true |
de118cd07b156fbaabfc3c5d1e29b407f2b19218 | Shell | gchamp20/home | /bin/_gc-sync-home | UTF-8 | 188 | 3.234375 | 3 | [] | no_license | #!/bin/bash
CONF_FOLDER="$(dirname "$0")/../skel"
for dst in $(find ${CONF_FOLDER} -type f)
do
src=${dst/$CONF_FOLDER/$HOME}
dirname=$(dirname ${dst})
cp --verbose ${src} ${dst}
done
| true |
530d70faf2afb7679ae6bb79a08db476c97eddf3 | Shell | Spottybadrabbit/blobio | /pgsql/fs-xor-service | UTF-8 | 1,725 | 3.625 | 4 | [] | no_license | #!/bin/bash
#
# Synopsis:
# Report on symmetric difference between the file system and service view.
# Usage:
# fs-xor-service >fs-xor-service.log 2>&1 &
# 32 4 * * Sun sbin/fs-xor-service >>support/fs-xor-service/fsc.out 2>&1
# Note:
# Would be nice to query service table and file system for timestamps,
# so blobs created during the scan could be quickly identified.
#
# Be wary of the LC_COLLATE environment variable. Setting value to 'C'
# forces sorting case sensitive ascii, I (jmscott) think.
#
PROG=$(basename $0)
#
# Force sort command to honor case.
#
export LC_COLLATE=C
log()
{
echo "$(date +'%Y/%m/%d %H:%M:%S'): #$$: $@"
}
die()
{
log "ERROR: $@" >&2
exit 1
}
leave()
{
log 'good bye, cruel world'
}
log hello, world
trap leave EXIT INT QUIT TERM
test -n "$BLOBIO_ROOT" || die 'BLOBIO_ROOT environment variable not defined'
cd $BLOBIO_ROOT || die "cd $BLOBIO_ROOT failed"
log "BLOBIO_ROOT=$BLOBIO_ROOT"
test -f etc/profile || die "expected file $BLOBIO_ROOT/etc/profile"
. etc/profile
log "PGHOST=$PGHOST"
log "PGPORT=$PGPORT"
log "PGUSER=$PGUSER"
log "PGDATABASE=$PGDATABASE"
log "starting scan of $(pwd)/data/ ..."
log "file system only in column <, service only in column >"
comm -3 <(
cd data || die "cd $(pwd)/data failed"
find *_fs -follow -type f -print |
fgrep -v '/tmp/' |
fgrep -v '.DS_Store' |
sed 's/\.\///' |
sed 's/_fs\//:/' | sed 's/\///g' |
sort
#
# Need to check error condition!!!
#
) <(
psql --no-align --tuples-only --command '
select
blob::text
from
blobio.service
order by
1 asc
') |
sed 's/^\t/> /' |
sed 's/^\([^>]\)/< \1/' |
while read LINE; do
log $LINE
done
log 'done with xor of file system and service table'
| true |
06bf511491930581e8f09e2e7983eda362e641e9 | Shell | ATB-UQ/gromos2amber_tests | /make_imd | UTF-8 | 339 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
cnf_file=$1
atoms_per_solvent=$2
numatoms=$(( $(grep -v '#' < $cnf_file |sed -n '/POSITION/,/END/p' |wc -l) - 2))
numsolv=$(( $(grep -v '#' < $cnf_file |\
sed -n '/POSITION/,/END/p' |grep SOL |wc -l) / ${atoms_per_solvent} ))
sed "s/{{num_atoms}}/${numatoms}/g" <&0 | \
sed "s/{{num_solvent}}/${numsolv}/g"
| true |
14b58c851d6137ce74900c2f4ac5548f210771be | Shell | luapower/luapower-all | /csrc/nginx/get-it.sh | UTF-8 | 789 | 3.015625 | 3 | [] | no_license | NGINX=release-1.17.4
NGX_DEVEL_KIT=v0.3.1
LUA_NGINX_MODULE=v0.10.15
[ -d src ] || git clone https://github.com/nginx/nginx --depth 1 --branch $NGINX src
[ -d openresty ] || git clone https://github.com/openresty/openresty --depth 1
[ -d ndk ] || git clone https://github.com/simplresty/ngx_devel_kit --depth 1 --branch $NGX_DEVEL_KIT ndk
[ -d lua ] || git clone https://github.com/openresty/lua-nginx-module --depth 1 --branch $LUA_NGINX_MODULE lua
(cd src && {
git reset --hard $NGINX
git apply ../src-*.patch
cat ../openresty/patches/nginx-1.17.4-ssl_*.patch | patch -N -p1
})
(cd openresty && git reset --hard)
(cd ndk && git reset --hard $NGX_DEVEL_KIT)
(cd lua && {
git reset --hard $LUA_NGINX_MODULE
git apply ../lua-*.patch
})
| true |
c9a3e50b7112bbf87e994f6e36c7d50f3a71dfe8 | Shell | poppabear8883/UNIT3D-INSTALLER | /install.sh | UTF-8 | 1,718 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env bash
source tools/colors.sh
# Detect OS
case $(head -n1 /etc/issue | cut -f 1 -d ' ') in
Ubuntu) type="ubuntu" ;;
*) type='' ;;
esac
# Unable to detect OS Properly
# Note: OVH and other providers remove the contents of /etc/issue in their OS templates
# so we need to ask the user manually to tell us what the OS is as a Fallback
# Ref: https://github.com/ServNX/UNIT3D-INSTALLER/issues/8
if [ "$type" = '' ]; then
echo -e "\n$Red We was unable to automatically determine your OS! $Color_Off"
echo -e "\n$Purple This can happen if you are using an OS template from a provider like OVH amongst others. $Color_Off\n"
PS3='Please select the # for your OS: '
options=("Ubuntu 22.04" "Ubuntu 20.04" "Ubuntu 18.04" "Ubuntu 16.04" "Quit")
select opt in "${options[@]}"
do
case $opt in
"Ubuntu 22.04")
echo 'Ubuntu 22.04 LTS \n \l' > /etc/issue
type='ubuntu'
break
;;
"Ubuntu 20.04")
echo 'Ubuntu 20.04 LTS \n \l' > /etc/issue
type='ubuntu'
break
;;
"Ubuntu 18.04")
echo 'Ubuntu 18.04 LTS \n \l' > /etc/issue
type='ubuntu'
break
;;
"Ubuntu 16.04")
echo 'Ubuntu 16.04 LTS \n \l' > /etc/issue
type='ubuntu'
break
;;
"Quit")
exit 0
;;
*)
echo -e "$Red Invalid Option $REPLY $Color_Off"
;;
esac
done
fi
if [ -e $type.sh ]; then
bash ./$type.sh
fi
| true |
803d5b3c58bca2818105cbd1591c51140e0aeca3 | Shell | jibe-b/uc-tdm-AS-D | /plans/run_WoS_abstracts_batch.sh | UTF-8 | 504 | 2.609375 | 3 | [] | no_license | #!/bin/bash
batch=$1 # get the number of the batch to process
CONFIG_FILE=$2 # get the configuration file to use
CORPUSNAME=WoS-ref_gene_marker_wheat-janv-17
RAM=30g
# import variables
. $CONFIG_FILE
# run the pipeline
$ALVISDIR/bin/alvisnlp -J "-Xmx30g" -verbose -entity inputfile $CORPUSDIR/$CORPUSNAME-part$batch -inputDir $PLANDIR -inputDir $SOFTWAREDIR -outputDir $OUTPUTDIR/part$batch -entity outdir $OUTPUTDIR/part$batch -entity ontology $ONTOLOGY_NAME plans/tag_and_index_WoS_abstracts.plan
| true |
07ab785f4bd68dec7be183d92a82abb3aff9c733 | Shell | alerque/aur | /picosvg/PKGBUILD | UTF-8 | 836 | 2.78125 | 3 | [] | no_license | # Maintainer: Caleb Maclennan <caleb@alerque.com>
pkgname=picosvg
pkgver=0.22.1
pkgrel=1
pkgdesc='CLI tool to simplify SVG files, intended for use as part of a font build'
arch=(any)
url="https://github.com/googlefonts/$pkgname"
license=(Apache)
_py_deps=(lxml
skia-pathops)
depends=(absl-py
python
"${_py_deps[@]/#/python-}")
makedepends=(python-{build,installer,wheel}
python-setuptools-scm)
checkdepends=(python-pytest)
_archive="$pkgname-$pkgver"
source=("https://files.pythonhosted.org/packages/source/${pkgname::1}/$pkgname/$_archive.tar.gz")
sha256sums=('d9d7d9ecbdef53cab1493f283545163b32844dd8d2ed9e1471671c7ce817618d')
build() {
cd "$_archive"
python -m build -wn
}
check() {
cd "$_archive"
PYTHONPATH=src pytest tests
}
package() {
cd "$_archive"
python -m installer -d "$pkgdir" dist/*.whl
}
| true |
fab5122f918a1edaed8175037fe0c3b8f3c6923d | Shell | BismarkCT/SOE | /1_Respostas/Shell_Ex/Meu_Ex1.sh | UTF-8 | 276 | 3.765625 | 4 | [] | no_license | #!/bin/bash
echo O nome desse script é $0
echo $# argumentos passados pelo usuário
if [ $# -ge 1 ] ; then
echo 'Os parâmetros de entrada foram:'
echo $@
echo 'Em particular...'
for i in $*; do
echo "Arg = $i"
done
else
echo Não tem nenhum argumento
fi
| true |
5229cb6f20262dc59f8211c9235de265e265b4a3 | Shell | ericwu1997/ShellScript | /Filesharing/Smb-apache-auto-setup/executable/install-script.sh | UTF-8 | 806 | 3.84375 | 4 | [] | no_license | # installation script
package=(
httpd # apache
nfs-utils # NFS
samba # SAMBA
)
while true; do
clear
for i in "${!package[@]}"; do
printf "$i)..... ${package[$i]}\n"
done
printf "${#package[@]})..... other\n"
printf "q)..... quit\n"
printf 'Press number for choice, then Return\n'
read -p " > " ltr rest
case ${ltr} in
[0-2])
printf "installing ${package[${ltr}]}...\n"
dnf install -y ${package[${ltr}]}
;;
[3])
printf "enter name of the package you would like to install\n"
read -p " > " name
printf "installing ${name}...\n"
dnf install -y ${name}
;;
[Qq])
exit
;;
*)
echo Unrecognized choice: ${ltr}
;;
esac
echo
echo -n ' Press Enter to continue.....'
read rest
done
| true |
efdcd2751059efc3b164019f3621900c2d2e8eb7 | Shell | maniaabdi/system-config | /bin/windows/run-locate.sh | UTF-8 | 246 | 3.28125 | 3 | [] | no_license | #!/bin/bash
mkdir -p ~/.cache/.run-locate
file=~/.cache/.run-locate/"$1"
if ! test -f "$file" || find ~/.cache/locate -type f -newer "$file"|grep . -q; then
locateEmacs.sh -i $1 |sed -e 's/^/"/; s/$/"/'| tee "$file"
else
cat "$file"
fi
| true |
e6d7734ccd03b67a5bc25073b0f886ea8c717186 | Shell | pratikkuyate96/EmailValidationProblem | /EmailValidation.sh | UTF-8 | 415 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#Function to checked email validation
function emailValidation() {
EMAIL_PATTERN="^([a-z]{1,}[0-9a-z]{0,}([_+-.]{0,1}[a-z0-9]{1,}){0,1}[@]{1}[a-z0-1]{1,}[.]{1}[a-z]{2,4}([.]{0,1}[a-z]{2}){0,1})$"
read -p "Enter Email Id : " emailId
if [[ $emailId =~ $EMAIL_PATTERN ]]
then
echo "$emailId is a valid Email address"
else
echo "$emailId is not a valid Email address"
fi
}
#Main
emailValidation
| true |
1109492b19cf43ec2082c3fe63fea948726da29d | Shell | davejachimiak/fm | /asm/tests/data_section | UTF-8 | 672 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env bash
asm_file=$(mktemp)
program=$(mktemp)
echo -e \
' mkstringd write_flag
mkstringd stdout
fopen
mkstringd message
loadd float
f64tos 1
appends
loadd integer
itos 1
appends
fputs
loadc 0
halt
---
write_flag = "w"
stdout = "/dev/stdout"
float = 3.3
integer = 10
message = "<<<theedge>>>"
' > "$asm_file"
./build/fm-asm "$asm_file" > "$program"
#cat "$program" | hexdump -e '9/1 " %02X" "\n"'
#./build/fm "$program - 1"
result=$(./build/fm "$program")
expected="103.3<<<theedge>>>"
if [[ "$result" != "$expected" ]]; then
echo "asm/tests/labels failure: expected \"$result\" to equal \"$expected\""
exit 1
fi
| true |
d12d125142e02098e475522736021fc42aa8e9a3 | Shell | brygga-dev/workdir2 | /server/base/backup/restore.sh | UTF-8 | 2,209 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Passing and argument will clone a certain
# commit, and set it up as the contents of
# the `repo` folder, then run the mysqldump
# from the commit
. /opt/lib.sh
wait_for_lock_file
touch "$LOCK_FILE"
ensure_git_repo
# Cloning repo, this adds the checkout as a new
# commit, but there should be better ways to do this
# probably..
# Not sure git reset --hard is good as keeping the history
# can be an advantage
echo "Cloning repo"
git clone $GIT_REPO "$VAR_DIR/restore_repo"
cd "$VAR_DIR/restore_repo"
# Check if a has was provided as argument
if [ ! -z "$1" ]; then
git checkout $1
fi
rm -rf .git
# Just replacing all files. Possibly some option to "merge",
# but doesn't always make sense, there wouldn't be database
# entries any more to other files.
# todo, possibly could do a stash, pull, pop, push for
# any last changes. Would be nice with code sharing
echo "Restoring files from git"
# Move .git out so we can simply move repository file
#if [ -d "$VAR_DIR/repo/.git" ]; then
# echo "Moving .git out of /repo/"
# mv "$VAR_DIR/repo/.git" "$VAR_DIR/.git"
#fi
# In dev mode, uploads is currently mounted, so
# it's delete gives errors.
# Deleting current files in `repo`
echo "Current uploads"
ls "$VAR_DIR/repo/uploads"
rm -rf "$VAR_DIR/repo/uploads/"*
echo "Removed uploads"
ls "$VAR_DIR/repo/uploads"
# Moving the cloned/restored files to `repo` folder
if [ -d "uploads" ]; then
echo "Moving uploads"
mv uploads/* "$VAR_DIR/repo/uploads/"
echo "After move"
ls "$VAR_DIR/repo/uploads"
fi
if [ -f "mysqldump.sql" ]; then
echo "Moving mysqldump"
rm -f "$MYSQLDUMP_FILE"
mv mysqldump.sql "$MYSQLDUMP_FILE"
fi
# Permissions
ensure_repo_permissions
# Then moving the .git in again
#if [ -d "$VAR_DIR/.git" ]; then
# echo "Moving .git back to /repo/"
# mv "$VAR_DIR/.git" "$VAR_DIR/repo/.git"
#fi
# Now deleting the empty restore folder
cd ..
rm -rf restore_repo
cd "$VAR_DIR/repo"
# Now run mysqldump file
restore_db
# TODO: This could be from an earlier wp version, so we should run wp upgrade/migrate
# Quick commit, todo: modularize to some functions
push_git "Restore to $1"
rm "$LOCK_FILE"
echo "Restored from commit $1" | true |
a18fe3601611c8b0aadfe43f297365e5d007e5d7 | Shell | bmzhao/CS431OperatingSystemSetup | /install.sh | UTF-8 | 4,314 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#this script assume you have already downloaded all the files from Nima's blackboard
#and that they are all located in ~/Downloads
printMsg(){
sleep 3
echo '---------------------------------'
echo $1
echo
echo
}
cd ~/Downloads
printMsg 'Attempting to unpack binutils'
tar -zxf 'os161-binutils.gz'
printMsg 'Finished untarring binutils'
cd *2.0.1
printMsg "Now in directory: `pwd`"
printMsg 'Configuring binutils'
./configure --nfp --disable-werror --target=mips-harvard-os161 --prefix=$HOME/sys161/tools
pringtMsg 'Making binutils'
make
printMsg 'Running make install....'
make install
printMsg 'Making directory for toolchain binaries'
mkdir $HOME/sys161/bin
printMsg 'moving to homedirectory'
cd
pwd
printMsg 'editing path within bashrc and profile'
echo 'export PATH="$PATH:$HOME/sys161/bin:$HOME/sys161/tools/bin"' >> .bashrc
echo 'export PATH="$PATH:$HOME/sys161/bin:$HOME/sys161/tools/bin"' >> .profile
export PATH="$PATH:$HOME/sys161/bin:$HOME/sys161/tools/bin"
cd ~/Downloads
printMsg 'Unpacking gcc'
tar -zxf os161-gcc.gz
cd gcc-4.1.2+os161-2.0
printMsg 'Configuring GCC'
./configure -nfp --disable-shared --disable-threads --disable-libmudflap --disable-libssp --target=mips-harvard-os161 --prefix=$HOME/sys161/tools
find . -name *.o
find . -name *.o -exec rm -rf {} \;
printMsg 'Making'
make
printMsg 'Installing'
make install
find . -name *.o
find . -name *.o -exec rm -rf {} \;
printMsg 'Making'
make
printMsg 'Installing'
make install
cd ~/Downloads
printMsg 'Unpacking gdb'
tar -zxf os161-gdb.gz
cd gdb-6.6+os161-2.0
printMsg 'Now configuring gdb'
./configure --target=mips-harvard-os161 --prefix=$HOME/sys161/tools --disable-werror
printMsg 'Making'
make
printMsg 'Installing'
make install
cd ~/Downloads
printMsg 'Unpacking bmake'
tar -zxf os161-bmake.gz
printMsg 'Entering bmake directory'
cd bmake
pwd
printMsg 'unpacking mk within bmake directory'
tar -zxf ../os161-mk.gz
printMsg 'Running bmake bootstrap script'
./boot-strap --prefix=$HOME/sys161/tools | tee bootstrapOutput.txt
printMsg 'Obtaining list of commands to run after bootstrap script'
tail -7 bootstrapOutput.txt | tee bootstrapCommandsScript.sh
chmod 755 bootstrapCommandsScript.sh
printMsg 'executing commands after bootstrap script'
./bootstrapCommandsScript.sh
printMsg 'Copied over myscript.sh'
cd $HOME/sys161/tools/bin
cp ~/Downloads/myscript.sh .
chmod 755 myscript.sh
mkdir $HOME/sys161/bin
printMsg 'Executing myscript.sh'
./myscript.sh
printMsg 'Do not worry if you see two error messages saying FILE EXISTS, thats OK!!!'
cd ~/Downloads
printMsg 'Untarring sys161'
tar -xzf sys161.gz
printMsg 'Moving into directory...'
cd sys161-1.99.06
pwd
printMsg 'Configuring sys161...'
./configure --prefix=$HOME/sys161 mipseb
printMsg 'Making'
make
printMsg 'Make Installing'
make install
printMsg 'Creating symlink...'
cd $HOME/sys161
ln -s share/examples/sys161/sys161.conf.sample sys161.conf
printMsg 'Intalling OS/161 under proper directory...'
cd
mkdir cs431-os161
cd Downloads
mv os161.gz $HOME/cs431-os161
cd ~/cs431-os161
tar -xzf os161.gz
echo 'Directory Installed To: '
DIRECTORY=`pwd`
pwd
echo 'contents of that directory: '
ls -la $DIRECTORY
printMsg 'editing configure file'
cd $HOME/cs431-os161/os161-1.99
cp ~/Downloads/configure .
chmod 775 configure
printMsg 'ASST0...'
./configure --ostree=$HOME/cs431-os161/root --toolprefix=cs431-
cd kern/conf
./config ASST0
cd ../compile/ASST0
bmake depend
bmake
bmake install
printMsg 'Building OS/161 User programs'
cd $HOME/cs431-os161/os161-1.99
bmake
bmake install
printMsg 'Copying configuration file'
cd $HOME/cs431-os161/root
cp $HOME/sys161/sys161.conf sys161.conf
echo 'YOU MUST QUIT THE TERMINAL AND REOPEN THE TERMINAL FOR THE PATH TO BE MODIFIED'
echo 'LITERALLY QUIT ALL TERMINALS BY CLICKING THE X BUTTON IN THE TOP RIGHT CORNER'
echo 'Then reopen the terminal program and run the next setup script'
printMsg 'CONGRATULATIONS YOU FINISHED THE INSTALLATION PROCESS!!'
echo 'Close the terminal, then reopen it, then...'
echo 'To check if everything worked, execute the following two commands: '
echo 'cd $HOME/cs431-os161/root'
echo 'sys161 kernel-ASST0'
echo 'You should see a terminal pop up, if so, whoo-hoo it worked!'
echo 'If not, better luck next time!'
| true |
74465a3a53ecd40382320578ef6d22066e173863 | Shell | Viviane788/bin | /color.sh | UTF-8 | 1,618 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Dependencies: wal
#
# This is script which allows me to change the background color
# of kitty terminal and my notification daemon, dunst
# depending on the current wallpaper. This script is used
# in my wall.sh script which I run when I change my wallpaper
# from ranger.
WALLPAPER="$HOME/Pictures/current/"
COLOR_FILE="$HOME/.cache/wal/colors"
# Generate the colors from current wallpaper
wal -e -n -s -q -i "$WALLPAPER"*
BG_COLOR=$(sed '1q;d' "$COLOR_FILE")
# just for debugging...
#echo "using $WALLPAPER as wallpaper"
#echo "bg $BG_COLOR"
set_kitty() {
KITTY_CONFIG="$HOME/.config/kitty/kitty.conf"
BASHRC="$HOME/.bashrc"
# Set the color
sed -i s/"^background.*/background $BG_COLOR"/g $KITTY_CONFIG
sed -i "s/BG_COLOR=\".*/BG_COLOR=\"$BG_COLOR\"/g" $BASHRC
for tty in /dev/pts/[0-9]*; do
[[ -w $tty ]] &&
printf "\\e]11;${BG_COLOR}\\e\\\\" > "$tty" &
done
}
set_dunst(){
# The regex is really bad, but it works :)
# Path to dunst config file
DUNSTRC="$HOME/.config/dunst/dunstrc"
# Change the background of urgency low
sed -z -i "s/ background............/ background = \"$BG_COLOR\"/1" $DUNSTRC
# Change the background of urgency normal
sed -z -i "s/ background............/ background = \"$BG_COLOR\"/2" $DUNSTRC
# Change the border color of both urgncy low and normal
sed -z -i "s/ frame_color = ........./ frame_color = \"$BG_COLOR\"/1" $DUNSTRC
# Kill dunst so that it restart and use the updated config file
killall dunst &> /dev/null
}
set_kitty
set_dunst
| true |
1436a3ee043abaf16511a0ca079dbeb8825659bc | Shell | diffblue/cbmc | /scripts/format_classpath.sh | UTF-8 | 258 | 3.21875 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-4-Clause"
] | permissive | #!/bin/bash
unameOut="$(uname -s)"
case "${unameOut}" in
CYGWIN*) separator=";";;
MINGW*) separator=";";;
MSYS*) separator=";";;
Windows*) separator=";";;
*) separator=":"
esac
echo -n `IFS=$separator; echo "$*"`
| true |
5d98208fd50dc1967be7177f1dd8404f0aea9752 | Shell | iBLISSLabs/letsencryptrenew | /certautorenew.sh | UTF-8 | 405 | 3.015625 | 3 | [] | no_license | #!/bin/bash
lepath="/usr/bin/letsencrypt"; #Change Letsencrypt path
email="email@example.com"; #Change email
domain="$(/bin/hostname)"; #Change domain
webservice="nginx"; #Change the webserver
# Update the cert
echo "Starting renewal script..."
$le_path certonly -n --renew-by-default --nginx --agree-tos -m "$email" -d "$domain"
# Reload nginx
echo "Reloading $web_service"
/usr/sbin/service $webservice reload
| true |
dd108c23404112c73a0e018dfc82f0978386843d | Shell | Jreilly8/checks | /checks.sh | UTF-8 | 936 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Navigate to backups, list dates of directories, list slected files, and output to /share/MD0_DATA/server_backups/checks.
# For faster human checking of current backups. - 12/17/2013
# refactored 02/03/2014
MAIN=/path/to/backups/server_backups
CHECKS=/path/to/backups/checks
TIMFS=filesystem
TIMSQL=mysql
# Step 1 list filesystem backup folders (make sure the dates are correct)
# Step 2 list contents of a folder in latest filesystem backup (make sure content exists)
# Step 3 list mysql backup folders (make sure date is correct)
# Step 4 list contents of latest mysql backup (make sure backups exist)
ls $MAIN/fermat.$TIMFS/ > $CHECKS/check_files_dir_list.txt
ls $MAIN/fermat.$TIMFS/current/var/www/sites/path/htdocs/ > $CHECKS/check_files_list.txt
ls $MAIN/fermat.$TIMSQL/ > $CHECKS/check_mysql_dir_list.txt
ls "$(\ls -1dt ${MAIN}/backup.${TIMSQL}/* | head -n 1)" > $CHECKS/check_mysql_list.txt
| true |
9a12530905b537e8da83f51ec3621f2d311b81d4 | Shell | thomasgsmth/dotfiles | /.bashrc | UTF-8 | 697 | 3.25 | 3 | [
"MIT"
] | permissive | # if not an interactive shell, skip
if [ -z "$PS1" ]; then
return
fi
# Mac OS X specific configuration
if [ $(uname -s) = "Darwin" ]; then
source ~/.darwin_bashrc
fi
# Aliases
alias ls='ls -F'
alias ll='ls -AFlth'
alias sha1='openssl sha1'
alias grep='grep --color=auto'
# Bash history
shopt -s histappend
export HISTSIZE=5000
export HISTFILESIZE=250000
export HISTCONTROL=ignoredups
shopt -s cmdhist
PROMPT_COMMAND='history -a'
# Python development
# pip should only run if there is a virtualenv currently activated
export PIP_REQUIRE_VIRTUALENV=true
# Functions
# Function to run pip to install or upgrade global (python) package
gpip(){
PIP_REQUIRE_VIRTUALENV="" pip "$@"
}
| true |
f7f5efe6478bf6bef7d32bc6b9e46b956e811536 | Shell | hayduke19us/dotfiles | /configs/tab.sh | UTF-8 | 328 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# Opens a new tab in iterm
function tab () {
for i in $*; do
osascript <<EOS
tell application "iTerm2"
tell current window
create tab with default profile
tell current session of current tab
write text "$i"
end tell
end tell
end tell
EOS
done
}
| true |
96ca01c27f06f5e447877d8ca3ae8e0dd85e9f24 | Shell | matlow/easybib-cookbooks | /loggly/templates/default/loggly.sh.erb | UTF-8 | 1,589 | 3.484375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh -e
### BEGIN INIT INFO
# Provides: loggly integration
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Register this server with loggly.
# Description: Makes a HTTP call (via curl) to register
# this server as a device on loggly.
### END INIT INFO
CUSTOMER="<%=node[:loggly][:domain]%>"
INPUT=<%=node[:loggly][:input]%>
USER="<%=node[:loggly][:user]%>"
PASS="<%=node[:loggly][:pass]%>"
MASCHINE="<%=@instance[:hostname]%>"
IP="<%=@instance[:ip]%>"
ENDPOINT="https://${CUSTOMER}.loggly.com/api/devices/"
CURL=`which curl`
CURL_OPT="-o /dev/null -L -w \"%{http_code}\" -u ${USER}:${PASS}"
CURL_OPT="${CURL_OPT} -d ip=${IP} -d input_id=${INPUT}"
CURL_OPT="${CURL_OPT} -S -s"
loggly_start () {
local call="${CURL} -X POST ${CURL_OPT} -d name=${MASCHINE} ${ENDPOINT}"
loggly_exec "$call"
echo "Registered '${MASCHINE}' with '${IP}' in loggly."
}
loggly_stop () {
ID=`/usr/local/bin/deviceid ${USER} ${PASS} ${MASCHINE} ${CUSTOMER}`
local call="${CURL} -X DELETE ${CURL_OPT} ${ENDPOINT}${ID}"
loggly_exec "$call"
echo "Unregistered '${MASCHINE}' in loggly."
}
loggly_exec () {
if [ "x$1" = "x" ]; then
echo "Missing param."
exit 1
fi
local resp=$($1)
if [ "$?" -gt 0 ]; then
echo "Errorz! ${resp}"
exit 1
fi
}
case "$1" in
start|stop)
loggly_$1
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
;;
esac
exit 0
| true |
2b23d7ec01488b7ddfeefe48bda333e54959c0eb | Shell | smartgamer/BG-4j | /BG-4j_installer.sh | UTF-8 | 3,055 | 3.46875 | 3 | [] | no_license | #check latest dataset against current if == then !
wget -q -O Stats.txt https://wiki.thebiogrid.org/doku.php/statistics
grep -i "Current Build Statistics (" Stats.txt | head -1 > tbuild
rm Stats.txt
grep -o -P '(?<=\().*(?=\))' tbuild > build
rm tbuild
#download latest dataset
echo Downloading latest build...
BUILD=$(<build)
mkdir BioGRID-build-"$BUILD"
cd BioGRID-build-"$BUILD"
wget -q "https://thebiogrid.org/downloads/archives/Release%20Archive/BIOGRID-$BUILD/BIOGRID-ORGANISM-$BUILD.tab2.zip"
echo Unzipping build...
unzip -u -qq "BIOGRID-ORGANISM-$BUILD.tab2.zip"
printf "Updated build unzipped.\nPreparing files for import\n"
#prep files for import
CURR=BIOGRID-ORGANISM-Homo_sapiens-"$BUILD".tab2.txt
cp $CURR BioGRID-relations-"$BUILD".tab2.txt
#create nodes and relations
n=$(hexdump -n 16 -e '4/4 "%08X"' /dev/random)
m=$(hexdump -n 16 -e '4/4 "%08X"' /dev/random)
rfile1="$n".txt
rfile2="$m".txt
cut -f 2,4,6,8,10,16 $CURR > $rfile1
cut -f 3,5,7,9,11,17 $CURR | tail -n +2 > $rfile2
cat $rfile1 $rfile2 > BioGRID-nodes-"$BUILD".tab2.txt
NODES=BioGRID-nodes-"$BUILD".tab2.txt
rm $rfile1 $rfile2
(head -n 1 $NODES && tail -n +2 $NODES | sort -u ) > temp
mv temp $NODES
#prep headers, labels & types
sed -i 's/$/\tProtein/' $NODES
tail -n +2 $NODES > temp
sed -i '1i nodeid:ID\tBioGRID ID Interactor A\tSystematic Name Interactor A\tOfficial Symbol Interactor A\tSynonyms Interactor A\tOrganism Interactor A\t:Label' temp
mv temp $NODES
RELAS=BioGRID-relations-"$BUILD".tab2.txt
sed -i 's/$/\tpp/' $RELAS
tail -n +2 $RELAS > temp
sed -i '1i BioGRID Interaction ID\t:START_ID\t:END_ID\tBioGRID ID Interactor A\tBioGRID ID Interactor B\tSystematic Name Interactor A\tSystematic Name Interactor B\tOfficial Symbol Interactor A\tOfficial Symbol Interactor B\tSynonyms Interactor A\tSynonyms Interactor B\tExperimental System\tExperimental System Type\tAuthor\tPubmed ID\tOrganism Interactor A\tOrganism Interactor \tThroughput\tScore\tModification\tPhenotypes\tQualifications\tTags\tSource Database\t:TYPE' temp
mv temp $RELAS
#pass nodes and relations to cypher-shell or neo4j-import
neo4j stop
#neo4j-admin dump --database=BioGRID.db --to=/root/Research/Neo4j/Backup #Dump current db
#neo4j-admin import --database="BioGRID-$BUILD.db" --mode=csv --delimiter="TAB" --nodes:ID="$NODES" --relationships:TYPE="$RELAS"
echo "Importing data to /var/lib/neo4j/data/databases/BioGRID-"$BUILD".db"
mkdir /var/lib/neo4j/data/databases/BioGRID-$BUILD.db
cp "$NODES" /var/lib/neo4j/data/databases/BioGRID-$BUILD.db
cp "$RELAS" /var/lib/neo4j/data/databases/BioGRID-$BUILD.db
sudo neo4j-import --into /var/lib/neo4j/data/databases/BioGRID-$BUILD.db/ --delimiter 'TAB' --nodes "$NODES" --relationships "$RELAS"
echo 'BioGRID 4 Neo4j successfully installed'
echo 'NOTE: you will now need to change your active database within the neo4j.conf file to load from the updated database (default path: "/etc/neo4j/neo4j.conf") i.e. redefine "dbms.active_database=graph.db" to "dbms.active_database=BioGRID-'"$BUILD"'" before starting neo4j.'
exit
| true |
8cfea6fae59f8196c1d5ed149f779d5c2558371b | Shell | seanbechhofer/etree | /scripts/grab-all-files.sh | UTF-8 | 629 | 3.375 | 3 | [] | no_license | #!/bin/sh
for index in $*
do
for x in ./files/$index/*
do
echo $x
pushd "$x"
echo `wc performances.txt`
cat performances.txt | while read LINE
do
if [[ $LINE =~ "Artist: " ]]; then
echo $LINE
else
if [ -s ${LINE}_meta.xml ]; then
echo ${LINE}_meta.xml exists
else
wget http://www.archive.org/download/${LINE}/${LINE}_meta.xml
fi
if [ -f ${LINE}_files.xml ]; then
echo ${LINE}_files.xml exists
else
wget http://www.archive.org/download/${LINE}/${LINE}_files.xml
fi
fi
done
popd
done
done
| true |
43f93e0c06ec8eec563fb3da07712b2fe70c6a61 | Shell | dczhu/cxpgrep | /test_cases/ch_tests_automated.sh | UTF-8 | 1,081 | 3.59375 | 4 | [
"MIT"
] | permissive | ch_tests()
{
pad()
{
if [ "$1" -lt 10 ]; then
echo "00""$1"
elif [ "$1" -lt 100 ]; then
echo "0""$1"
fi
}
[ ! -f "$1" ] && { echo "File \"$1\" doesn't exist"! ; echo "Usage: $FUNCNAME \$test_case_file"; return 1; }
echo "-------------------------------------------------------------------------------"
local LINE LINEP idx=0 pidx
# Use raw mode of the built-in read.
while read -r LINE; do
if [ "${LINE//[ ]/}" == "" ] || [ "$(echo "${LINE}" | sed -e "s/^[ \t]*#//")" != "${LINE}" ]; then
echo " === $LINE"
continue
fi
let idx++
pidx=$(pad $idx)
echo "$pidx === $LINE"
eval "$LINE"
# To test h, replace ' ' with | and substitute grep -P for h.
LINEP="$LINE"
LINEP="${LINEP//| h/| /bin/grep -P --color=always}"
LINEP="${LINEP//| cxpgrep?/| /bin/grep -P --color=always }"
LINEP="${LINEP//\' \'/|}"
LINEP="${LINEP//\" \"/|}"
if [ "$LINEP" != "$LINE" ]; then
echo " === $LINEP"
eval "$LINEP"
fi
echo "-------------------------------------------------------------------------------"
done <$1
}
ch_tests $1
| true |
431d24c0d7c1d4baa315f6bd8f7a1304f7b0be72 | Shell | jellytronics/rfidquizgame | /setup/depreciated/ssh_setup.sh | UTF-8 | 578 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin.bash
systemctl start sshd
systemctl enable sshd
systemctl is-enabled sshd
if cat 2> /dev/null
then
echo "ssh keys available"
else
echo "ssh keys unavailable. making one now"
cp ~/.ssh/id_rsa ~/.ssh/id_rsa.bu
cp ~/.ssh/id_rsa.pub ~/.ssh/id_rsa.pub.bu
ssh-keygen -q -f ~/.ssh/id_rsa -P ""
fi
echo "Bringing ssh-agent to sys env"
eval $(ssh-agent)
chmod 0600 /root/.ssh/id_rsa.pub
echo "Please enter "'""'" as passphrase in next line."
ssh-add ~/.ssh/id_rsa.pub
echo "copy what you are about to see into the ssh-keys of your github account"
cat ~/.ssh/id_rsa.pub
| true |
58bcabeda727c233120e36933d7fc79f4bdce937 | Shell | harshal2802/mybinaries | /ImageMagick/build_script.sh | UTF-8 | 832 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | sudo yum update
#install development tools
sudo yum groupinstall "Development tools"
#Install dependencies for jpeg, tiff, png, freetype fontconfig bzip2 xml X11 Xt ghostscript
sudo yum install libjpeg-devel libtiff-devel libpng-devel \
freetype-devel fontconfig-devel bzip2-devel \
libxml2-devel libX11-devel libXt-devel ghostscript
#Download ImageMagick
wget https://www.imagemagick.org/download/ImageMagick.tar.gz
#Untar Imagemagick
tar xvzf ImageMagick.tar.gz
#Access the working directory
cd ImageMagick-[version_number]
#Configure and make sure to assign prefix as install path
./configure --prefix=<install-path>
#Make
make
#Install to install path
make install
#Check install
make Check
#create backup of install as tar file
cd <install-path>/..
tar -cvzf magick.tar.gz magick/ | true |
5f95d4e23add38b8f7f52385afe3afa3d232d6d7 | Shell | maxheadroom2/contador_bash_linux_pruebas | /zipeador.sh | UTF-8 | 1,892 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# -*- ENCODING: UTF-8 -*-
# Author: Victor Ruben Farias Rolon
# creado 26 febrero 2016
# Modificado 6 de noviembre 2018
# rev-1
###############################################################################
# descripcion: Script para relizar una compresion de archivos, en el cual reliza por medio de un for "bucle" compresion y rescribe los metadatos de fecha de modificacion al original #
# #
###############################################################################
# LISTADO DE PROCESOS -----------------------------------------------s-----------
#
################################################################################
# notas
## funciones
function BORRADO(){ # Esta función ejecuta un scritp hijo en el cual hace una funcion de comprimir los archivos
#statements
xterm -e bash /home/maxheadroom/Scripts/Control_de_pruebas/borrado.sh;
}
RUTA1=/home/maxheadroom/Documentos/RAW/series_sin_com/series/
WHILE=0
while [ $CONTROL=0 ] ; do
VERF=$(ls -l $RUTA1 | wc -l)
VERF1=${VERF/.*} # lo hacemos un entero ya que puede estar con decimales :(
if [ $VERF1 -gt 1 ];
then
notify-send -i applications-engineering "Aviso" "Se ha encontrado que existen $VERF1 archivos los cuales se zipearan"
echo "si"
echo $VERF
sleep 1s
notify-send -i goterminal "Aviso" "Inicia Script de zipeador" && sleep 1s &&
cd /home/maxheadroom/Documentos/RAW/series_sin_com/series/;
for file in *; do zip -r ${file%.*}.zip $file && touch -d "$(date -R -r $file)" ${file%.*}.zip && rm -r ${file%.*}; done &&
notify-send -i guake "Aviso" "Ha terminado el sistema de zipear los archivos de manera correcta" ;
sleep 3s && notify-send -i cs-cat-admin "Aviso" "Se cerrara el Script automaticamente" && BORRADO; kill -9 $PPID
else
echo "no"
sleep 1s
echo $VERF
fi
done
| true |
a3026853831f6727765c03e2da5e83d86661f09d | Shell | TuxShadow/allinone | /campur.sh | UTF-8 | 3,369 | 2.859375 | 3 | [] | no_license | #!/bin/bash
#rdl
merah=$(tput setaf 1)
ijo=$(tput setaf 2)
dasar=$(tput sgr0)
biru=$(tput setaf 4)
xterm='xterm -hold -fa monaco -fs 10 -bg black -e nmap'
xterma='xterm -hold -fa monaco -fs 10 -bg black -e wpscan'
xtermb='xterm -hold -fa monaco -fs 10 -bg black -e whatweb'
echo -e "${biru} 0000_____________0000________0000000000000000__000000000000000000+\n 00000000_________00000000______000000000000000__0000000000000000000+\n 000____000_______000____000_____000_______0000__00______0+\n 000______000_____000______000_____________0000___00______0+\n 0000______0000___0000______0000___________0000_____0_____0+\n 0000______0000___0000______0000__________0000___________0+\n 0000______0000___0000______0000_________000___0000000000+\n 0000______0000___0000______0000________0000+\n 000______000_____000______000________0000+\n 000____000_______000____000_______00000+\n 00000000_________00000000_______0000000+\n 0000_____________0000________000000007\n Created By Aku;${dasar}"
echo -e "${ijo}1. Base 64 ${dasar}"
echo -e "${ijo}2. Base 32${dasar}"
echo -e "${ijo}3. SQLi scanner ${dasar}"
echo -e "${ijo}4. Enumerate User Wordpress ${dasar}"
echo -e "${ijo}5. Information Gathering Web ${dasar}"
echo -n "Masukan Pilihan : "
read pilihan
if [[ $pilihan = "1" ]]; then
echo "${biru}1.Decode ${dasar}"
echo "${merah}2.Encode ${dasar}"
echo "Anda akan apa ?"
read akan
if [[ $akan -eq 1 ]]; then
echo "Masukan yang akan di Decode : "
read decode
echo ${ijo} && echo $decode | base64 --decode && echo ${dasar}
elif [[ $akan -eq 2 ]]; then
echo "Masukan yang akan di encode : "
read encode
clear
echo ${ijo} && echo $encode | base64 && echo ${dasar}
fi
fi
if [[ $pilihan = "2" ]]; then
echo "${biru}1.Decode ${dasar}"
echo "${merah}2.Encode ${dasar}"
echo "Anda akan apa ?"
read akan
if [[ $akan -eq 1 ]]; then
echo "Masukan yang akan di Decode : "
read decode
echo ${ijo} && echo $decode | base32 --decode && echo ${dasar}
elif [[ $akan -eq 2 ]]; then
echo "Masukan yang akan di encode : "
read encode
clear
echo ${ijo} && echo $encode | base32 && echo ${dasar}
fi
fi
if [[ $pilihan = "3" ]]; then
which nmap > /dev/null 2>&1
if [ "$?" -eq "0" ]; then
echo Nmap di temukan;
clear
echo "Masukan Target : "
read inject
$xterm -sV --script=http-sql-injection $inject
else
echo Nmap Tidak Ditemukan ;
echo ""
echo Harap Tunggu ;
apt-get update
apt-get install nmap
echo ""
sleep 2
exit
fi
fi
if [[ $pilihan = "4" ]]; then
which wpscan > /dev/null 2>&1
if [[ "$?" -eq "0" ]]; then
echo "Wpscan ditemukan"
sleep 2
clear
echo "Masukan target : "
read site
$xterma -u $site --enumerate u
else
echo Harap Tunggu....;
echo ""
apt-get update
apt-get install wpscan
echo ""
sleep 2
exit
fi
fi
if [[ $pilihan = "5" ]]; then
which whatweb > /dev/null 2>&1
if [[ "$?" -eq "0" ]]; then
echo "Whatweb Ditemukan"
sleep 2
clear
echo "Masukan Target : "
read situs
$xtermb -v $situs
else
echo Harap Tunggu....;
echo ""
add-apt-repository ppa:pi-rho/security
apt-get update
apt-get install whatweb
echo ""
sleep 2
exit
fi
fi
| true |
e4a8e77a53a1e23820ce002e51c9e00612fbe5ba | Shell | shatll-s/SobakaOS | /netdd | UTF-8 | 17,767 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
#screen -dmS netdd bash -c "wget -O /tmp/netdd https://os.dog/downloads/other/netdd && chmod 700 /tmp/netdd && /tmp/netdd -y -l"
export PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin:$PATH"
bname=`basename $0`
# prevent running from pipe
if [ $0 = "sh" ] || [ $0 = "bash" ]; then
sleep 1
echo "$bname does not work from pipe!"
exit 1
fi
SCRIPT_PATH=`dirname $0`
SCRIPT_NAME=`basename $0`
cd $SCRIPT_PATH
mydir=`pwd`
myself="$mydir/$SCRIPT_NAME"
#config partition options
skip=2048
count=45056
#LATEST_URL="https://os.dog/downloads/iso/dog-1.47.zip"
# download timeout
TIMEOUT=20
TMPDIR="/tmp/dog"
url=
yes=
sshPass=
LIST=
LATEST=
STABLE=
RED="\e[31m"
WHITE="\e[0m"
GREEN="\e[32m"
BROWN="\e[33m"
BLUE="\e[34m"
PURPLE="\e[35m"
CYAN="\e[36m"
GRAY="\e[37m"
LIGHTRED="\e[1;31m" #good for AMD
YELLOW="\e[1;33m"
LIGHTGREEN="\e[1;32m" #good for Nvidia
LIGHTBLUE="\e[1;34m"
LIGHTPURPLE="\e[1;35m" #comments?
LIGHTCYAN="\e[1;36m" #very bright
LIGHTWHITE="\e[1;37m" #bold white
[[ -f /dog/boot ]] && thisDog=1
mkdir -p $TMPDIR
#Get root
if [[ $(id -u) -ne 0 ]]; then
echo "Root privileges required"
sudo chmod +x $0
exec sudo $myself $*
fi
#function checkIp () {
# echo $1 | awk 'BEGIN { FS = "." }; /^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$/ { if (($1 <= 255) && ($2 <= 255) && ($3 <= 255) && ($4 <= 255)) print("1") } '
#}
#checkIp 192.168.88.255
#exit
function showHelp {
echo -e "${CYAN}Usage: netdd [option] <Path or URL to ZIP file with os.dog image>${WHITE}"
echo -e
echo -e "Common options:"
echo -e "${GREEN} -y|--yes ${WHITE}Do not ask for confirmation, answer yes to all questions"
echo -e "${GREEN} --list ${WHITE}Show images list"
echo -e "${GREEN} -l|--latest ${WHITE}Select latest image"
echo -e "${GREEN} -s|--stable ${WHITE}Select stable image"
echo -e "${GREEN} -h|--help ${WHITE}Show this message"
echo -e "Image download options:"
echo -e "${GREEN} -sshpass|--sshpassword ${WHITE}Specify password for download via ssh"
echo -e "Rig configuration options:"
echo -e "${GREEN} -id|--rigid ${WHITE}Specify rig id"
echo -e "${GREEN} -password|--rigpassword ${WHITE}Specify rig password"
echo -e "${GREEN} -host|--righost ${WHITE}Specify rig host"
echo -e "\nExamples:"
echo -e "${GREEN} netdd -y --latest${WHITE}"
echo -e "${GREEN} netdd -y --rigid 118 --rigpassword qwerty --sshpassword 1 user@192.168.88.105:/dog/dog-1.39.zip${WHITE}"
}
function check_packages {
[[ $checked -eq 1 ]] && return
local need_install=(lsof curl wget unzip unzip gdisk libpcre2-8-0)
[[ $sshPass -eq 1 ]] && need_install+=(expect)
for idx in "${!need_install[@]}"; do
dpkg -s "${need_install[$idx]}" > /dev/null 2>&1 && unset 'need_install[idx]'
done
if [[ ! -z "${need_install[@]}" ]]; then
echo -e "${CYAN}> Installing required packages: ${WHITE}${need_install[@]}${WHITE}"
apt update
apt install -y "${need_install[@]}"
[[ $? -ne 0 ]] && echo -e "${RED}Failed to install required packages${WHITE}" && exit
echo -e "${GREEN}> Installed required packages successfully${WHITE}\n"
fi
checked=1
}
# Gets/downloads image
function get {
cd $TMPDIR
rm -f $TMPDIR/*dog*
basename=`basename -s .zip "$url"`
archname=`basename "$url"`
# stop miner here to free some memory and prevent rig from rebooting on errors
[[ $thisDog -eq 1 ]] && systemctl stop mining && systemctl stop wd && systemctl stop wd-load
#Copy|download image to tmpfs
echo -e "\n${CYAN}> Downloading image file${WHITE}"
if [[ ! -z $sshPass ]]; then #need to scp
expect -c "
set timeout 1200
spawn scp $url $TMPDIR/$archname
while { true } {
expect {
-re \".*you sure you want to continue connecting.*\" { send \"yes\r\" }
-re \".*ermission denied*\" { exit 1 }
-re \".*assword*\" { send \"${sshPass}\r\" }
}
}
"
exitcode=$?
echo ""
[[ $exitcode -ne 0 ]] && echo -e "${RED}> Something wrong while downloading image via ssh${WHITE}" && return 1
elif [[ "$url" == *http* || "$url" == *ftp* ]]; then
echo ""
wget -t 0 -T $TIMEOUT "$url"
[[ $? -ne 0 ]] && echo -e "${RED}> Image download failed. Check url${WHITE}" && return 1
else
[[ ! -f $url ]] && echo -e "${RED}> Image not found. Check path${WHITE}" && return 1
cp -v $url $TMPDIR
[[ $? -ne 0 ]] && echo -e "${RED}> Copy image failed${WHITE}" && return 1
fi
#check zip integrity
echo -e "\n${CYAN}> Checking ZIP file integrity${WHITE}"
unzip -t $TMPDIR/$archname > /dev/null
[[ $? -ne 0 ]] && echo -e "${RED}> ZIP file is damaged${WHITE}" && return 1
echo -e "${GREEN}> Image is ready${WHITE}"
return 0
}
# Starting actions
function prepare {
pstree -As $$ | grep -q "xinit" && echo -e "${RED}$bname does not work in X server console! Run it from text local or remote shell${WHITE}" && exit 1
pstree -As $$ | grep -q -P "(ssh|shellinabox|hive-console)" || (echo -e "${YELLOW}$bname does not work in X server console! Make sure it is not running from it${WHITE}"; sleep 5)
mem=$(free -m | awk 'NR == 2{print$2}')
echo -e "${CYAN}***********************************${BROWN}
.--~~,__
:-....,-------\`~~\`._.'
\`-,,, ,_ ;'~U'
_,-' ,'\`-__; \'--.
(_/'~~ ''''(;"
echo -e "${YELLOW} OS.dog Image Installation${WHITE}"
echo -e "${CYAN}***********************************${WHITE}"
echo
echo -e "${GREEN}$url${WHITE}"
echo "Total RAM=$mem Mb"
echo
[[ $mem -lt 3600 ]] && echo -e "${YELLOW}4 GB RAM is required, exiting${WHITE}" && exit 1
PART_UUID=`cat /proc/cmdline | tr " " "\n" | grep "UUID" | sed 's/\(^root=UUID=\)//'`
DISK_PART=`blkid | grep -m1 $PART_UUID | awk '{ print $1 }' | sed 's/://' | sed 's/\(^\/dev\/\)//'`
DISK_NAME=`echo ${DISK_PART} | sed 's/\([[:digit:]]\)//'`
DISK_SIZE=`cat /proc/partitions | grep -w "${DISK_NAME}" | awk '{ printf("%.f",$3/1024)}'`
PART_SIZE=`cat /proc/partitions | grep -w "${DISK_PART}" | awk '{ printf("%.f",$3/1024)}'`
echo -e "Current OS booted from ${LIGHTPURPLE}$DISK_NAME${WHITE} and whole drive size is ${CYAN}$DISK_SIZE${WHITE} Mb"
[[ $DISK_SIZE -lt 7300 ]] && echo -e "\n${BROWN}The minimum disk size for OS.dog is 8 Gb${WHITE}" && exit
if [[ $yes -ne 1 ]]; then
echo -e "${RED}Warning: After os.dog installation, all your data on ${LIGHTPURPLE}$DISK_NAME ${RED}will be lost!${WHITE}"
echo -en "\nType ${GREEN}\"yes\"${WHITE} if you want to install os.dog on ${LIGHTPURPLE}$DISK_NAME${WHITE}: "
read -t 90 answer
[[ $answer != "yes" ]] && echo -e "${YELLOW}\nBye, bye!${WHITE}" && exit
fi
#check programs
check_packages
#create tmpfs
cat /proc/mounts | grep $TMPDIR > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
mkdir $TMPDIR > /dev/null 2>&1
mount none $TMPDIR -t tmpfs -o size=3000m
fi
#get old config
mkdir -p $TMPDIR/old-config
if [[ $thisDog -eq 1 ]]; then
[[ -f /dog/cfg/rig.cfg ]] && cp /dog/cfg/rig.cfg ${TMPDIR}/old-config/
[[ -f /dog/cfg/network.txt ]] && cp /dog/cfg/network.txt ${TMPDIR}/old-config/
sync
fi
if [[ ! -z $RIG_ID && ! -z $PASSWD ]]; then
[[ -z $HOST ]]&& HOST="https://os.dog/message.php"
cfg="#Rig configuration file created by netdd at `date -R`\n"
cfg+="RIG_ID=\"$RIG_ID\"\n"
cfg+="PASSWD=\"$PASSWD\"\n"
cfg+="HOST=\"$HOST\"\n"
echo -e $cfg > ${TMPDIR}/old-config/rig.cfg
sync
fi
#get image zip
get || exit
#Disable kernel message to tty
echo 0 > /proc/sys/kernel/printk
echo 1 > /proc/sys/kernel/sysrq
echo 0 > /proc/sysrq-trigger
#stop services and remount RO
swapoff -a
if [[ $thisDog -eq 1 ]]; then
systemctl stop dogx 2>&1
systemctl stop mining 2>&1
systemctl stop wd 2>&1
systemctl stop wd-la 2>&1
systemctl stop wd-load 2>&1
systemctl stop wd-temp 2>&1
systemctl stop af 2>&1
systemctl stop mb-af 2>&1
systemctl stop remotessh 2>&1
[[ `screen-check asw` -gt 0 ]] && screen-kill asw
pkill -9 xinit > /dev/null 2>&1
else
echo -e "\n${CYAN}> Stopping services${WHITE}"
fi
term=`tty | grep -oP "\Ktty[0-9]+"`
[[ ! -z $term ]] && term="|$term"
for service in `initctl list 2>/dev/null | grep "/running" | grep -v -P "network|ssh|shellinabox|openvpn|remotessh$term" | awk '{print $1}'`; do
initctl stop $service > /dev/null 2>&1 && echo "> stop $service";
sleep 0.1
done
for service in `systemctl list-units 2>/dev/null | grep -oP "\K[^\s]+\.(service|socket)" | grep -v -P "ssh|openvpn|shellinabox|remotessh|network|ifup|user|hive$term"`; do
systemctl stop $service > /dev/null 2>&1 && echo "> stop $service";
sleep 0.1
done
for i in {1..3}; do
for pid in `lsof / | grep -v -P "^COMMAND|$SCRIPT_NAME| (mem|txt|rtd|cwd) |network|telec|hssh|watchdog|hl340|srrv2|hive-console|remotessh" | awk '{print $2}'`; do
cmd=`ps -p $pid -o args | tail -n 1`
kill -9 $pid 2>/dev/null && echo -e "> kill $pid: $cmd" && sleep 0.1
done
sleep 1
done
# Readonly remount
for MOUNTS in `cat /proc/mounts | grep $DISK_NAME | awk '{print $2}'`; do
mount -n -o remount,ro $MOUNTS > /dev/null 2>&1 || echo "Remounting $MOUNTS failed"
done
#create temp root
echo -e "\n${CYAN}> Creating temporary root filesystem${WHITE}"
mkdir -p $TMPDIR/{proc,sys,run,dev,usr,var,oldroot,bin,sbin,lib,tmp,usr/lib,usr/share,usr/lib/x86_64-linux-gnu,lib/lsb}
cp -aR /{bin,sbin,etc} $TMPDIR > /dev/null 2>&1
cp -aR /usr/{bin,sbin} $TMPDIR/usr > /dev/null 2>&1
cp -aR /lib/x86_64-linux-gnu $TMPDIR/lib > /dev/null 2>&1
cp -aR /lib64 $TMPDIR > /dev/null 2>&1
cp -aR /usr/lib/sudo $TMPDIR/usr/lib > /dev/null 2>&1
cp -a /usr/lib/x86_64-linux-gnu/libmpfr* $TMPDIR/usr/lib/x86_64-linux-gnu > /dev/null 2>&1
cp -a /usr/lib/x86_64-linux-gnu/libsigsegv* $TMPDIR/usr/lib/x86_64-linux-gnu > /dev/null 2>&1
cp -a /usr/lib/x86_64-linux-gnu/libgmp* $TMPDIR/usr/lib/x86_64-linux-gnu > /dev/null 2>&1
cp -a /usr/lib/x86_64-linux-gnu/libstdc++* $TMPDIR/usr/lib/x86_64-linux-gnu > /dev/null 2>&1
cp -a /usr/lib/x86_64-linux-gnu/libpopt.so* $TMPDIR/usr/lib/x86_64-linux-gnu > /dev/null 2>&1
cp -a /usr/lib/x86_64-linux-gnu/libicu*.so* $TMPDIR/usr/lib/x86_64-linux-gnu > /dev/null 2>&1
cp -aR /dev $TMPDIR > /dev/null 2>&1
cp $myself $TMPDIR > /dev/null 2>&1
chmod +x $TMPDIR/$SCRIPT_NAME > /dev/null 2>&1
mount --bind /proc $TMPDIR/proc
mount --bind /sys $TMPDIR/sys
mount --bind /dev $TMPDIR/dev
#create conf for second part
echo "wr=1" > $TMPDIR/replace.conf
echo "archname=$archname" >> $TMPDIR/replace.conf
echo "root_dev=$DISK_NAME" >> $TMPDIR/replace.conf
echo "conf_loop=$conf_loop" >> $TMPDIR/replace.conf
#chroot to temp root
mount --bind /proc $TMPDIR/proc
mount --bind /sys $TMPDIR/sys
mount --bind /dev $TMPDIR/dev
sync
cd $TMPDIR
exec chroot . /bin/bash /$SCRIPT_NAME
}
# Writing to disk
function write {
cd /
#Copy config partition
if [[ -f old-config/rig.cfg ]]; then
echo -e "\n${CYAN}> Copying os.dog config to file system${WHITE}"
unzip -p $archname | dd of=newConf.img skip=$skip count=$count
LOOPDEV=$(losetup --find --show newConf.img)
partprobe ${LOOPDEV}
mkdir tmpdogcfg
mount ${LOOPDEV} tmpdogcfg
if [[ $? -eq 0 ]]; then
cp -R old-config/* tmpdogcfg
echo -e "Copying config to image"
echo -e "${GREEN}Config was successfilly copied and will be written on ${LIGHTPURPLE}$root_dev${WHITE}${GREEN} after writing image${WHITE}"
ddTrigger=1
sync
else
echo -e "${RED}Config was not written file system${WHITE}"
fi
fi
echo -e "\n${CYAN}> Writing OS.dog filesystem to ${LIGHTPURPLE}$root_dev${WHITE}"
echo -e "Please wait, this can take long"
echo -e "To prevent damage to your disk device\ndo not turn off your computer until the end of the process.\n"
if dd --help | grep -q "'progress'"; then
unzip -p $archname | dd of=/dev/$root_dev bs=1M status=progress
exitcode=$?
else
# show progress
( sleep 2; while true; do sleep 1; pkill -USR1 -f "dd of=/dev/" || break; echo -en "\r$(tail -n 1 ./progress 2>/dev/null ) "; done) &
unzip -p $archname | dd of=/dev/$root_dev bs=1M 2>./progress
exitcode=$?
[[ $exitcode -eq 0 ]] && echo -e "\r$(tail -n 1 ./progress 2>/dev/null) "
fi
[[ $exitcode -ne 0 ]] && echo -e "${RED}Write image failed, exiting${WHITE}" && return 1
# Fix GPT table
sgdisk -e /dev/$root_dev > /dev/null 2>&1
sgdisk -C /dev/$root_dev > /dev/null 2>&1
partprobe /dev/$root_dev > /dev/null 2>&1
#Rewrite config partition
if [[ $ddTrigger -eq 1 ]]; then
echo -e "\n${CYAN}> Writing os.dog config to ${LIGHTPURPLE}$root_dev${WHITE}"
dd if=/newConf.img of=/dev/${root_dev}1 bs=1M
if [[ $? -eq 0 && -f old-config/rig.cfg ]]; then
echo -e "${BROWN}Applying next config:${WHITE}"
echo -e "${BROWN}=====================${WHITE}"
cat old-config/rig.cfg
echo -e "${BROWN}=====================${WHITE}"
else
echo -e "${RED}Config was not written to ${LIGHTPURPLE}$root_dev${WHITE}"
fi
fi
sync
echo -e "${GREEN}> Image writing to ${LIGHTPURPLE}$root_dev${GREEN} is successfull!${WHITE}"
echo -e "Your rig is configured and ready to work under os.dog"
echo -e "After reboot it should appear on the web."
echo -e "${YELLOW}> Rebooting in 15 seconds${WHITE}"
sleep 15
#Reboot
echo 1 > /proc/sys/kernel/sysrq
echo b > /proc/sysrq-trigger
}
function list () {
#echo "LATEST $LATEST"
#echo "STABLE $STABLE"
echo -e "> Getting image versions"
local versions_url="https://os.dog/downloads/iso/versions.php"
local versions=`curl -sLk $versions_url | jq '.iso'`
[[ -z $versions ]] && echo -e "${RED}Error downloading versions from $versions_url${WHITE}" && exit 1
local header="${BROWN}#${WHITE} ${CYAN}Version${WHITE}\t\t"
header+="${PURPLE}System${WHITE} | ${YELLOW}Kernel${WHITE} | ${RED}Drivers${WHITE}\n"
header+="======================================================================="
echo -e "$header"
for (( i=0; i < `echo $versions | jq '. | length'`; i++ )); do
local isoData=`echo $versions | jq ".[$i]"`
for option in version name description kernel system amd nvidia stable latest; do
if [[ `echo $isoData | jq ".$option"` != null ]]; then
option_value="`echo $isoData | jq -r \".$option\"`" #double slash, because eval open it
eval "local ${option}=\"$option_value\""
elif [[ $option == 'name' ]]; then
local name=' '
fi
done
#local name=`echo $isoData | jq -r ".name"`
#local description=`echo $isoData | jq -r ".description"`
#local kernel=`echo $isoData | jq -r ".kernel"`
#local system=`echo $isoData | jq -r ".system"`
#local amd=`echo $isoData | jq -r ".amd"`
#local nvidia=`echo $isoData | jq -r ".nvidia"`
#local stable=`echo $isoData | jq -r ".stable"`
#local latest=`echo $isoData | jq -r ".latest"`
local string="${BROWN}$i${WHITE} ${CYAN}$version $name${WHITE}\t"
[[ `expr length $version` -lt 7 ]] && string+="\t"
string+="${PURPLE}$system${WHITE}\t| ${YELLOW}$kernel${WHITE}\t | "
string+="${RED}$amd${WHITE} | ${GREEN}$nvidia${WHITE}"
[[ $stable == true ]] && string+=" ${CYAN}stable${WHITE}" || string+=" "
[[ $latest == true ]] && string+=" ${CYAN}latest${WHITE}" || string+=" "
echo -e "$string"
done
if [[ $LIST -eq 1 ]]; then
echo -e "Select version to install:"
read i
numberFormat='^[0-9]+$'
if ! [[ $i =~ $numberFormat ]] ; then
echo -e "${RED}Error: you need to enter number here${WHITE}"
exit 1
fi
local newIso=`echo $versions | jq ".[$i]"`
elif [[ $LATEST -eq 1 ]]; then
local latestVersions=`echo $versions | jq "[.[] | select(.latest == true)]"`
[[ $latestVersions == "[]" ]] && echo -e "${RED}Can\`t find compatible image version${WHITE}" && exit 1
local newIso=`echo $latestVersions | jq .[-1]`
elif [[ $STABLE -eq 1 ]]; then
local stableVersions=`echo $versions | jq "[.[] | select(.stable == true)]"`
[[ $stableVersions == "[]" ]] && echo -e "${RED}Can\`t find compatible image version${WHITE}" && exit 1
local newIso=`echo $stableVersions | jq .[-1]`
fi
local version=`echo $newIso | jq -r ".version"`
[[ `echo $newIso | jq ".name"` != null ]] && local selectedName=`echo $newIso | jq -r ".name"`
local localUrl=`echo $newIso | jq -r ".url"`
echo -e "Selected version: ${CYAN}${version} ${selectedName}${WHITE} (${GREEN}$localUrl${WHITE})"
url=$localUrl
#echo -e "Download url: ${CYAN}${url}${WHITE}"
}
function start {
wr=0
[[ -f "/replace.conf" ]] && source "/replace.conf"
if [[ $wr -ne 1 ]]; then
#echo "@"$url
[[ $LATEST -eq 1 || $STABLE -eq 1 || $LIST -eq 1 ]] && list
#echo "@"$url
#exit
[[ -z $url ]] && echo -e "${BROWN}No path or URL to os.dog image provided${WHITE}\n" && showHelp && exit 1
prepare #1 part
else
write #2 part
fi
}
while [ -n "$1" ]; do
option=$1
if [[ $option == '--help' || $option == '-h' ]]; then
showHelp
exit 0
elif [[ $option == '--yes' || $option == '-y' ]]; then
yes=1
elif [[ $option == '--latest' || $option == '-l' ]]; then
LATEST=1
#url=$LATEST_URL
elif [[ $option == '--stable' || $option == '-s' ]]; then
STABLE=1
elif [[ $option == '--list' ]]; then
LIST=1
elif [[ $option == '--rigid' || $option == '-id' ]]; then
shift
[[ -z $1 ]] && echo "Uncorrect argument value" && exit 1
RIG_ID=$1
elif [[ $option == '--rigpassword' || $option == '-password' ]]; then
shift
[[ -z $1 ]] && echo "Uncorrect argument value" && exit 1
PASSWD=$1
elif [[ $option == '--righost' || $option == '-host' ]]; then
shift
[[ -z $1 ]] && echo "Uncorrect argument value" && exit 1
HOST=$1
elif [[ $option == '--sshpassword' || $option == '-sshpass' ]]; then
shift
[[ -z $1 ]] && echo "Uncorrect argument value" && exit 1
sshPass=$1
elif [[ $option == *".zip"* ]]; then #$option == "http"*"//"*".zip"*
url=$option
else
echo "Uncorrect argument $option"
fi
shift
done
start
| true |
ed65eb6be979cf05796810530992d7ea18e58188 | Shell | MageJohn/zsh-config | /.zshrc | UTF-8 | 2,520 | 2.78125 | 3 | [] | no_license | declare -A ZINIT
export ZINIT[HOME_DIR]="$HOME/Library/Zinit"
### Added by Zinit's installer
if [[ ! -f $ZINIT[HOME_DIR]/bin/zinit.zsh ]]; then
print -P "%F{33}▓▒░ %F{220}Installing %F{33}DHARMA%F{220} Initiative Plugin Manager (%F{33}zdharma/zinit%F{220})…%f"
command mkdir -p "$ZINIT[HOME_DIR]" && command chmod g-rwX "$ZINIT[HOME_DIR]"
command git clone https://github.com/zdharma/zinit "$ZINIT[HOME_DIR]/bin" && \
print -P "%F{33}▓▒░ %F{34}Installation successful.%f%b" || \
print -P "%F{160}▓▒░ The clone has failed.%f%b"
fi
source "$ZINIT[HOME_DIR]/bin/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
### End of Zinit's installer chunk
## Homebrew
if type brew &>/dev/null; then
FPATH=$(brew --prefix)/share/zsh/site-functions:$FPATH
fi
## Zinit annexes
zinit light-mode for \
zinit-zsh/z-a-bin-gem-node \
zinit-zsh/z-a-patch-dl
## ZSH-z
export ZSHZ_OWNER=yuripieters
export ZSHZ_DATA="$ZDOTDIR/.z"
zinit wait lucid for agkozak/zsh-z
## ls
export EXA_COLORS="da=38;5;214"
zinit wait pack"no-dir-color-swap" for ls_colors
alias ls="exa"
zstyle ':completion:*' menu select
## Prompt and keybindings
# apply substitution to the prompt
setopt promptsubst
# this function outputs the variable name, not it's value, and since
# promptsubst is on the value will be substituted when the prompt is rendered.
# the vim plugin then ensures the prompt is re-rendered.
function show_vim_mode() { echo '$MODE_INDICATOR_PROMPT' }
zinit load geometry-zsh/geometry
GEOMETRY_PROMPT=(geometry_echo geometry_status geometry_path show_vim_mode)
GEOMETRY_RPROMPT=(geometry_exec_time geometry_exitcode geometry_git geometry_hg geometry_echo)
# ansi is a function from geometry
MODE_INDICATOR_VIINS=$(ansi 10 '>>')
MODE_INDICATOR_VICMD=$(ansi 9 '<<')
MODE_INDICATOR_REPLACE=$(ansi 11 '>>')
MODE_INDICATOR_SEARCH=$(ansi 12 '<<')
MODE_INDICATOR_VISUAL=$(ansi 14 '<<')
MODE_INDICATOR_VLINE=$(ansi 8 '<<')
MODE_CURSOR_VIINS="block"
MODE_CURSOR_VICMD="underline"
setopt autocd
zinit wait lucid for \
atload"_zsh_autosuggest_start" \
zsh-users/zsh-autosuggestions \
atinit"zicompinit; zicdreplay" \
zdharma/fast-syntax-highlighting \
blockf atpull'zinit creinstall -q .' \
zsh-users/zsh-completions
## vim bindings
zinit wait lucid for softmoth/zsh-vim-mode
## FZF
zinit wait pack"bgn-binary+keys" for fzf
## iTerm integration
source "$ZDOTDIR/.iterm2_shell_integration.zsh"
## fnm
eval "$(fnm env)"
## Git
alias g=git
| true |
7e5ea49b8a8354f9889029c051ced27af154817c | Shell | ChaoyiHuang/trio2o | /tricircle/tempestplugin/post_test_hook.sh | UTF-8 | 3,335 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -xe
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside post_test_hook function in devstack gate.
export DEST=$BASE/new
export DEVSTACK_DIR=$DEST/devstack
export TRICIRCLE_DIR=$DEST/tricircle
export TRICIRCLE_DEVSTACK_PLUGIN_DIR=$TRICIRCLE_DIR/devstack
export TRICIRCLE_TEMPEST_PLUGIN_DIR=$TRICIRCLE_DIR/tricircle/tempestplugin
export TEMPEST_DIR=$DEST/tempest
export TEMPEST_CONF=$TEMPEST_DIR/etc/tempest.conf
# use admin role to create Tricircle top Pod and Pod1
source $DEVSTACK_DIR/openrc admin admin
token=$(openstack token issue | awk 'NR==5 {print $4}')
echo $token
curl -X POST http://127.0.0.1:19999/v1.0/pods \
-H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods \
-H "Content-Type: application/json" \
-H "X-Auth-Token: $token" \
-d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
# the usage of "nova flavor-create":
# nova flavor-create [--ephemeral <ephemeral>] [--swap <swap>]
# [--rxtx-factor <factor>] [--is-public <is-public>]
# <name> <id> <ram> <disk> <vcpus>
# the following command is to create a flavor wih name='test',
# id=1, ram=1024MB, disk=10GB, vcpu=1
nova flavor-create test 1 1024 10 1
image_id=$(openstack image list | awk 'NR==4 {print $2}')
# preparation for the tests
cd $TEMPEST_DIR
if [ -d .testrepository ]; then
sudo rm -r .testrepository
fi
sudo chown -R jenkins:stack $DEST/tempest
# sudo chown -R jenkins:stack $BASE/data/tempest
# change the tempest configruation to test Tricircle
env | grep OS_
# import functions needed for the below workaround
source $DEVSTACK_DIR/functions
# designate is a good example how to config TEMPEST_CONF
iniset $TEMPEST_CONF auth admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONF auth admin_project_name admin
iniset $TEMPEST_CONF auth admin_password $OS_PASSWORD
iniset $TEMPEST_CONF identity uri $OS_AUTH_URL
iniset $TEMPEST_CONF identity-feature-enabled api_v3 false
iniset $TEMPEST_CONF compute region RegionOne
iniset $TEMPEST_CONF compute image_ref $image_id
iniset $TEMPEST_CONF compute image_ref_alt $image_id
iniset $TEMPEST_CONF volume region RegionOne
iniset $TEMPEST_CONF volume catalog_type volumev2
iniset $TEMPEST_CONF volume endpoint_type publicURL
iniset $TEMPEST_CONF volume-feature-enabled api_v1 false
# Run the Compute Tempest tests
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_compute.sh
# Run the Volume Tempest tests
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_volume.sh
# Run the Network Tempest tests
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_network.sh
# Run the Scenario Tempest tests
# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
# sudo BASE=$BASE ./tempest_scenario.sh
| true |
a9daa1762060580ad8a3609c806ffad67f97c30c | Shell | bbxyard/bbxyard | /yard/grammar/shell/trap/trap_debug.sh | UTF-8 | 361 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# trap debug
function debug_handler() {
echo "before execute line: $LINENO,a=$a,b=$b,c=$c"
}
function test_function() {
a=0
b=2
c=100
while :
do
if ((a >= 10)); then
break;
fi;
let "a=a+2"
let "b=b*2"
let "c=c-10"
echo "*****:$a,$b,$c"
done;
}
# Registry
trap debug_handler DEBUG
# Run
test_function
| true |
b30c82f7048e2a3e725c74b9f96a04dd4a3944e5 | Shell | servicenowcmf/shellscripts | /installwebsite2.sh | UTF-8 | 614 | 2.921875 | 3 | [] | no_license | #!/bin/sh -e
#install Apache Web Server on Ubuntu 16 LTS
#update repositories on server
sudo apt-get -y update
#install git
sudo apt-get -y install git-core
#install apache webserver
sudo apt-get -y install apache2
#set permissions on the html folder so files can be copied
sudo chmod 777 var/www/html/
#download website files from git repository
sudo git clone https://github.com/servicenowcmf/website2
#copy the downloaded content to the Apache www directory
mv /website2/*.* /var/www/html
mv /website2/css /var/www/html
mv /website2/img /var/www/html
#restart the webserver
sudo systemctl start apache2.service
| true |
51372f169f1f18fbeb1b34820e21f8db062dba56 | Shell | marques-work/PeerToPeer | /Sample/Windows/build_x64_PPCS.sh | UTF-8 | 216 | 2.515625 | 3 | [] | no_license | #!/bin/sh
OS=Windows
BOARD=x64
P2P=PPCS
CC=x86_64-w64-mingw32-gcc
CXX=x86_64-w64-mingw32-g++
STRIP=x86_64-w64-mingw32-strip
make -f ./Makefile OS=${OS} BOARD=${BOARD} P2P=${P2P} CC=${CC} CXX=${CXX} STRIP=${STRIP} | true |
2d6cd40226984e08ea2856c1f70d9d32e41cb73f | Shell | dewanz/Fabric101Workshop | /start.sh | UTF-8 | 2,363 | 2.8125 | 3 | [] | no_license | #!/bin/bash
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
# Exit on first error, print all commands.
set -ev
export FABRIC_CFG_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# don't rewrite paths for Windows Git Bash users
export MSYS_NO_PATHCONV=1
export COMPOSE_PROJECT_NAME=fabric101
docker-compose -f ${FABRIC_CFG_PATH}/docker-compose.yml down
docker-compose -f ${FABRIC_CFG_PATH}/docker-compose.yml up -d
docker ps -a
# wait for Hyperledger Fabric to start
# incase of errors when running later commands, issue export FABRIC_START_TIMEOUT=<larger number>
export FABRIC_START_TIMEOUT=10
#echo ${FABRIC_START_TIMEOUT}
sleep ${FABRIC_START_TIMEOUT}
# Create the channel
docker exec -e "CORE_PEER_LOCALMSPID=Org1MSP" -e "CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/msp/users/Admin@org1.example.com/msp" peer0.org1.example.com peer channel create -o orderer.example.com:7050 --tls --cafile /etc/hyperledger/fabric/orderer/orderer-ca.crt -c mychannel -f /etc/hyperledger/configtx/channel.tx --outputBlock /etc/hyperledger/configtx/mychannel.block
# Join peer0.org1.example.com to the channel.
docker exec -e "CORE_PEER_LOCALMSPID=Org1MSP" -e "CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/msp/users/Admin@org1.example.com/msp" peer0.org1.example.com peer channel join --tls --cafile /etc/hyperledger/fabric/orderer/orderer-ca.crt -b /etc/hyperledger/configtx/mychannel.block
# Join peer0.org2.example.com to the channel.
docker exec -e "CORE_PEER_LOCALMSPID=Org2MSP" -e "CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/msp/users/Admin@org2.example.com/msp" peer0.org2.example.com peer channel join --tls --cafile /etc/hyperledger/fabric/orderer/orderer-ca.crt -b /etc/hyperledger/configtx/mychannel.block
docker exec -e "CORE_PEER_LOCALMSPID=Org1MSP" -e "CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/msp/users/Admin@org1.example.com/msp" peer0.org1.example.com peer channel update -o orderer.example.com:7050 -c mychannel -f /etc/hyperledger/configtx/Org1MSPanchors.tx --tls --cafile /etc/hyperledger/fabric/orderer/orderer-ca.crt
docker exec -e "CORE_PEER_LOCALMSPID=Org2MSP" -e "CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/msp/users/Admin@org2.example.com/msp" peer0.org2.example.com peer channel update -o orderer.example.com:7050 -c mychannel -f /etc/hyperledger/configtx/Org2MSPanchors.tx --tls --cafile /etc/hyperledger/fabric/orderer/orderer-ca.crt
| true |
90c0d9b062bd791dd0e115716915ff27836ca37d | Shell | TileDB-Inc/TileDB-R | /tools/ci/valgrind/buildTileDB.sh | UTF-8 | 1,323 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -u
## check for argument
if [ $# -ne 1 ]; then
echo "Need version argument. Exiting."
exit 1
fi
## cmdline arg gives us desired release version, most recent could be read off GitHub Releases tag too
version="$1"
## check if version is of the form release-*; use grep here to not use bash [[ ]] notation
echo "${version}" | grep -q "^release-" -
isrelease=$?
## fetch appropriate sources
echo "::group::Setup sources"
if [ ${isrelease} -eq 0 ]; then
git clone --single-branch --branch ${version} https://github.com/TileDB-Inc/TileDB.git TileDB-${version}
git log --graph --pretty=format:'%h - %d %s (%cr) <%an>' --abbrev-commit | head
elif [ ${version} = "dev" ]; then
wget https://github.com/TileDB-Inc/TileDB/archive/refs/heads/${version}.zip
unzip ${version}.zip
rm ${version}.zip
else
wget https://github.com/TileDB-Inc/TileDB/archive/${version}.tar.gz
tar xaf ${version}.tar.gz
rm ${version}.tar.gz
fi
mv TileDB-${version} tiledb
cd tiledb
echo "::endgroup::"
## standard build off source, enabling s3 and serialization
echo "::group::Build from source"
mkdir build
cd build
export AWSSDK_ROOT_DIR=/usr
../bootstrap --prefix=/usr/local --enable-s3 --enable-serialization
make -j 8
make -C tiledb install
ldconfig
cd ../..
rm -rf tiledb
echo "::endgroup::"
| true |
7b231ad0c0e9cf24fe2ce2200b6024147a6ac28b | Shell | Sword-Smith/EggsML | /concieggs/cmds/dirch | UTF-8 | 1,846 | 3.140625 | 3 | [] | no_license | #!/bin/sh
#
# Jeg... jeg ka' osse sige ligesom en prut. IuuuuuuuuuUUH. Brug: dirch [ANTAL ORD]
antal_ord="$1"
if ! [ "$antal_ord" ]; then
antal_ord=30;
fi
tmpfile=$(mktemp)
case $(random 0 2) in
0)
markov "$antal_ord" dirch-monolog \
"$CONCIEGGS_DB_DIR/dirchpasser/monologer.txt" > $tmpfile
;;
1)
markov "$antal_ord" dirch-sketch \
"$CONCIEGGS_DB_DIR/dirchpasser/sketcher.txt" > $tmpfile
;;
2)
markov "$antal_ord" dirch-vise \
"$CONCIEGGS_DB_DIR/dirchpasser/viser.txt" > $tmpfile
;;
esac
python3 <<EOF
with open(1, 'wb') as out:
with open('$tmpfile', 'rb') as f:
prev_was_token = False
for token in f.read().strip().split():
if token.startswith(b'w'):
if prev_was_token:
out.write(b' ')
out.write(token[1:])
prev_was_token = True
else:
prev_was_token = False
if token == b',':
out.write(b', ')
elif token == b'..':
out.write(b'.\n') # Not optimal
elif token == b'.':
out.write(b'. ')
elif token == b'...':
out.write(b'...\n') # Not optimal
elif token == b':':
out.write(b': ')
elif token == b"'<":
out.write(b" '")
elif token == b"'>":
out.write(b"' ")
elif token == b'"<':
out.write(b' "')
elif token == b'">':
out.write(b'" ')
elif token == b'n':
out.write(b'\n')
else:
out.write(token)
EOF
rm $tmpfile
| true |
0daaff656ee8ea17f32cec0c759d5c75e186b39e | Shell | gilesgamon/ecs-camel-sqs | /jks_generate | UTF-8 | 1,244 | 2.765625 | 3 | [] | no_license | #!/bin/bash
rm -f client.jks server-chain.jks
cp `find /Library/Java/JavaVirtualMachines/jdk1.8.0_241.jdk /etc -name cacerts -type f -print 2>/dev/null` server-chain.jks ; chmod +w server-chain.jks
echo "Adding intermediate certificate to server-chain.jks"
keytool -import -file intermediate.pem -alias intermediate -trustcacerts -keystore server-chain.jks -storepass changeit -noprompt
echo "Adding root certificate to server-chain.jks"
keytool -import -file caCert.pem -alias root -trustcacerts -keystore server-chain.jks -storepass changeit -noprompt
# echo "Adding mq_server certificate to server-chain.jks"
# keytool -import -file mq_server.pem -alias mq_server -trustcacerts -keystore server-chain.jks -storepass changeit -noprompt
openssl pkcs12 -export -in client.crt -inkey client.key -certfile client.crt -out clientKeyStore.p12 -passout pass:changeit
echo "Adding user's private key to client.jks"
keytool -importkeystore -srckeystore clientKeyStore.p12 -srcstoretype pkcs12 -destkeystore client.jks -deststoretype JKS -noprompt -storepass changeit -srcstorepass changeit
keytool -changealias -keystore client.jks -alias 1 -destalias client -storepass changeit
mv *jks src/main/resources
rm -f *pem *crt *key *p12
echo "Good to GO!" | true |
5c59ad54beae9204b1fdbe8ebb80dea4f9874068 | Shell | PsychoAdmin/YunixShells | /04-Yunix-FindSUID/FindSUID.sh | UTF-8 | 459 | 3.046875 | 3 | [] | no_license | #!/bin/bash
#Thanks go to for the SUID part: http://www.wangproducts.net/article.php?id=33
N=0
find / -type f \( -perm -4000 -o -perm -2000 \) -exec ls {} \; | while read LINE ; do
N=$((N+1))
strings $LINE | grep /bin | while read WOAH ; do
echo "FOUND containing /bin ===> $LINE contains $WOAH"
done
done
N=0
find / -type f \( -perm -4000 -o -perm -2000 \) -exec ls {} \; | while read LINE ; do
N=$((N+1))
echo "SUID FOUND ===> $LINE"
done
| true |
39e09cee92f36b8e328e5e86564b6d92c733dc2c | Shell | pmem/pmdk | /src/test/pmempool_sync/TEST12 | UTF-8 | 1,094 | 2.890625 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017-2019, Intel Corporation
#
#
# pmempool_sync/TEST12 -- test for checking pmempool sync;
# a case with incorrect part's uuid and correct checksum
#
. ../unittest/unittest.sh
require_test_type medium
require_fs_type any
setup
LOG=out${UNITTEST_NUM}.log
LOG_TEMP=out${UNITTEST_NUM}_part.log
rm -f $LOG && touch $LOG
rm -f $LOG_TEMP && touch $LOG_TEMP
LAYOUT=OBJ_LAYOUT
POOLSET=$DIR/pool0.set
# Create poolset file
create_poolset $POOLSET \
20M:$DIR/testfile1:x \
20M:$DIR/testfile2:x \
21M:$DIR/testfile3:x \
R \
40M:$DIR/testfile4:x \
20M:$DIR/testfile5:x
# Create poolset
expect_normal_exit $PMEMPOOL$EXESUFFIX create --layout=$LAYOUT\
obj $POOLSET
cat $LOG >> $LOG_TEMP
# Corrupt metadata in primary replica, recalculate checksum
expect_normal_exit $PMEMSPOIL $DIR/testfile1 pool_hdr.uuid=0000000000000000\
"pool_hdr.f:checksum_gen" >> $LOG_TEMP
# Try to synchronize replicas
expect_abnormal_exit $PMEMPOOL$EXESUFFIX sync $POOLSET &>> $LOG_TEMP
mv $LOG_TEMP $LOG
check
pass
| true |
7d63cdbe83e9bc16d333a3538be7214287918deb | Shell | juehv/oref0 | /bin/mm-format-ns-glucose.sh | UTF-8 | 1,254 | 3.46875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
# Author: Ben West @bewest
# Maintainer: Chris Oattes @cjo20
# Written for decocare v0.0.17. Will need updating the the decocare json format changes.
source $(dirname $0)/oref0-bash-common-functions.sh || (echo "ERROR: Failed to run oref0-bash-common-functions.sh. Is oref0 correctly installed?"; exit 1)
usage "$@" <<EOT
Usage: $self [--oref0] <medtronic-glucose.json>
Format Medtronic glucose data into something acceptable to Nightscout.
EOT
NSONLY=""
test "$1" = "--oref0" && NSONLY="| .glucose = .sgv" && shift
HISTORY=${1-glucosehistory.json}
OUTPUT=${2-/dev/fd/1}
#TZ=${3-$(date +%z)}
cat $HISTORY | \
jq '[ .[]
| if ._type then .medtronic = ._type else . end
| if ( ( .dateString | not ) and ( .date | tostring | test(":") ) ) then
.dateString = ( [ ( .date | tostring), "'$(date +%z)'" ] | join("") ) else . end
| ( .dateString | sub("Z"; "") | split(".") )[0] as $time
| ( ( .dateString | sub("Z"; "") | split(".") )[1] | tonumber ) as $msec
| .date = ( ( [ $time, "Z" ] | join("") ) | fromdateiso8601 ) * 1000 + $msec
| .type = if .name and (.name | test("GlucoseSensorData")) then "sgv" else "pumpdata" end
| .device = "openaps://medtronic/pump/cgm"
'"$NSONLY"' ]' > $OUTPUT
| true |
e0773985de02190f278355d3984291d21d245357 | Shell | klolos/dotfiles | /make/qtest | UTF-8 | 340 | 3.328125 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
set -e
BUILD_PREFIX="/path/to/build/directory"
for dir in "$@"
do
qmake src/$dir
qmake test/$dir
dir="$BUILD_PREFIX/test/$dir"
cd $dir
echo "Testing $dir ..."
if ctest -C test-all -V; then
echo "Tests for $dir PASSED!"
else
echo "Tests for $dir FAILED!"
exit 1
fi
done
| true |
259178eb3e7598f40c9fd58199eba753577ea8a1 | Shell | dscrane/fullstackTemplate | /fullstack-template.sh | UTF-8 | 1,703 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env bash
outputDir=$(pwd)
cd "$outputDir" || return
# MAKE SERVER DIRECTORY
mkdir server && cd server || return
# MAKE SRC DIRECTORY FOR SERVER
mkdir src && cd src || return
# MAKE THE COMMON SERVER DIRECTORIES & FILES
echo -e "\e[1;33mCreating boiler plate server directories and files\e[0m"
mkdir routes database middleware models public
touch index.js
echo 'Full stack server template README' >> README.md
echo 'node_modules' >> .gitignore
# INITIALIZE GENERAL NPM PROJECT
echo -e "\e[1;33mInitializing npm project\e[0m"
npm init -y;
# INSTALL GENERAL NPM PACKAGES
echo -e "\e[1;33mInstalling dependencies\e[0m"
npm i --silent express cors body-parser dotenv;
echo -e "\e[1;33mInstalling development dependencies\e[0m"
npm i -D --silent nodemon;
echo -e "\e[1;33mSERVER TEMPLATE CREATED\e[0m"
cd ../../
# CREATE THE REACT APP
echo -e "\e[1;33mInstalling Create React App\e[0m"
npx create-react-app client;
cd client || return
# REMOVE BOILERPLATE FROM CREATE REACT APP
echo -e "\e[1;33mRemoving unused create-react-app boiler plate files\e[0m"
rm -r "$(pwd)"/src
mkdir src && cd src || return
# MAKE THE COMMON FRONT END DIRECTORIES
echo -e "\e[1;33mAdding general React App directories"
mkdir components utils pages styles
# ADD THE BOILERPLATE CODE TO index.js AND App.js
echo -e "\e[1;33mAdding boiler plate code to index.js and App.js\e[0m"
printf "import React from 'react'\n import ReactDOM from 'react-dom'\n import App from 'App.js'\n\n ReactDOM.render(<App />, document.querySelector('#root'));" > index.js
printf "import React from 'React'\n\n export const App = () => {\n return(\n<div>From Template</div>\n\n " > App.js
echo -e "\e[1;33mCLIENT TEMPLATE CREATED\e[0m"
| true |
32e9b45879f4e97c3c8d263b12adef3d29b52b60 | Shell | negibokken/sandbox | /atcoder/abc/abc084/questionD/test.sh | UTF-8 | 524 | 3.390625 | 3 | [] | no_license | #!/bin/bash
try() {
expected="$@"
input=`cat -`
actual=`echo $input | ./main`
if [ "$actual" = "$expected" ]; then
# echo "$input => $actual"
echo "OK"
else
echo "==="
echo "$input => $expected expected, but got $actual"
exit 1
fi
}
## test case 1
cat << EOF | try 2
1
3 7
EOF
## test case 2
cat << EOF | try "1 0 0 1"
4
13 13
7 11
7 11
2017 2017
EOF
## test case 2
cat << EOF | try "4 4 1 1 1 2"
6
1 53
13 91
37 55
19 51
73 91
13 49
EOF
## test case 2
cat << EOF | try "2"
1
5 13
EOF
| true |
63ff152c8c3edff1fbd750baf7e9c3bff2f03d36 | Shell | DerekRoberts/gateway | /util/sample10/import.sh | UTF-8 | 306 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Exit on errors or unitialized variables
#
set -o nounset
# Change to script directory
#
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd ${DIR}
# Import sample 10 data
#
mongoimport --host database --db query_gateway_development --collection records sample.json | grep imported
| true |
5f1032a307f5c21a4137c79fbc0ccbc7788ffc5f | Shell | oskarvid/slurm-germline | /steps/01-fastqtosam.sh | UTF-8 | 1,085 | 3.1875 | 3 | [] | no_license | #!/bin/bash
source `pwd`/configuration.sh
OUTPUTDIR=`pwd`/01-FastqToSam/
mkdir -p $OUTPUTDIR
readarray INPUT < <(cat `pwd`/input.tsv)
let i=$1-1
FC[$i]+=$(echo "${INPUT[$i]}" | awk '{ print $1 }')
SM[$i]+=$(echo "${INPUT[$i]}" | awk '{ print $2 }')
LB[$i]+=$(echo "${INPUT[$i]}" | awk '{ print $3 }')
LN[$i]+=$(echo "${INPUT[$i]}" | awk '{ print $4 }')
SAMPLES1[$i]+=$(echo "${INPUT[$i]}" | awk '{ print $5 }')
SAMPLES2[$i]+=$(echo "${INPUT[$i]}" | awk '{ print $6 }')
ID[$i]+=$(echo "${FC[$i]}"."${SM[$i]}"."${LN[$i]}")
READGROUP[$i]+="@RG\tID:"${ID[$i]}"\tSM:"${SM[$i]}"\tLB:"${LB[$i]}"\tPL:ILLUMINA\tPU:NotDefined"
echo "Starting FastqToSam with input files "${SAMPLES1[$i]}" and "${SAMPLES2[$i]}""
gatk --java-options -Djava.io.tempdir=`pwd`/tmp \
FastqToSam \
--FASTQ "${SAMPLES1[$i]}" \
--FASTQ2 "${SAMPLES2[$i]}" \
-O $OUTPUTDIR/FastqToSam_"${ID[$i]}".bam \
--SAMPLE_NAME "${SM[$i]}" \
--READ_GROUP_NAME "${ID[$i]}" \
--LIBRARY_NAME "${LB[$i]}" \
--PLATFORM ILLUMINA \
--TMP_DIR "${OUTPUTDIR}"/FastqToSam-"$i"_TMP
rm -r "${OUTPUTDIR}"/FastqToSam-"$i"_TMP
| true |
443dbbb6338c2d90da73087e91c050a6273e5a9a | Shell | kleopatra999/autobin | /dataserv-client/build-osx-binary.sh | UTF-8 | 7,909 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/bash
apiurl=https://api.github.com/repos/Storj/dataserv-client
repository=$(curl -H "Accept: application/json" -H "Authorization: token $gh_token" $apiurl)
repositoryname=$(echo $repository | jq --raw-output ".name")
repositoryurl=$(echo $repository | jq --raw-output ".html_url")
releasesurl=$(echo $repository | jq --raw-output ".releases_url")
releasesurl=${releasesurl//\{\/id\}/}
pullurl=$(echo $repository | jq --raw-output ".pulls_url")
pullurl=${pullurl//\{\/number\}/}
tagurl=$(echo $repository | jq --raw-output ".tags_url")
#get releases and pull requests from github
releases=$(curl -H "Accept: application/json" -H "Authorization: token $gh_token" $releasesurl)
pulls=$(curl -H "Accept: application/json" -H "Authorization: token $gh_token" $pullurl)
tags=$(curl -H "Accept: application/json" -H "Authorization: token $gh_token" $tagurl)
#build binary for pull request
for ((i=0; i < $(echo $pulls | jq ". | length"); i++)); do
pullnumber=$(echo $pulls | jq --raw-output ".[$i].number")
pullsha=$(echo $pulls | jq --raw-output ".[$i].merge_commit_sha")
pullrepository=$(echo $pulls | jq --raw-output ".[$i].head.repo.html_url")
pullbranch=$(echo $pulls | jq --raw-output ".[$i].head.ref")
releasefound=false
assetfound=false
for ((j=0; j < $(echo $releases | jq ". | length"); j++)); do
releasename=$(echo $releases | jq --raw-output ".[$j].name")
if [ "$releasename" = "autobin pull request $pullnumber" ]; then
releasefound=true
uploadurl=$(echo $releases | jq --raw-output ".[$j].upload_url")
uploadurl=${uploadurl//\{?name,label\}/}
asseturl=$(echo $releases | jq --raw-output ".[$j].assets_url")
assets=$(curl -H "Accept: application/json" -H "Authorization: token $gh_token" $asseturl)
for ((k=0; k < $(echo $assets | jq ". | length"); k++)); do
assetlabel=$(echo $assets | jq --raw-output ".[$k].label")
assetname=$(echo $assets | jq --raw-output ".[$k].name")
if [ "${assetname: -10}" = ".osx64.zip" ]; then
assetstate=$(echo $assets | jq --raw-output ".[$k].state")
if [ "$assetlabel" = "$pullsha.osx64.zip" ] && [ "$assetstate" != "new" ]; then
assetfound=true
else
binaryurl=$(echo $assets | jq --raw-output ".[$k].url")
curl -X DELETE -H "Authorization: token $gh_token" $binaryurl
fi
fi
done
fi
done
if [ $releasefound = false ]; then
echo create release autobin pull request $pullnumber
uploadurl=$(curl -H "Accept: application/json" -H "Content-Type: application/json" -H "Authorization: token $gh_token" -X POST -d "{\"tag_name\":\"\",\"name\":\"autobin pull request $pullnumber\",\"draft\":true}" $releasesurl | jq --raw-output ".upload_url")
uploadurl=${uploadurl//\{?name,label\}/}
fi
if [ $assetfound = false ]; then
mkdir repos
cd repos
rm -rf $repositoryname
echo create and upload binary $pullrepository $pullbranch
git clone $pullrepository -b $pullbranch
cd $repositoryname
virtualenv -p python2.7 pythonenv
source pythonenv/bin/activate
pip2 install py2app
# workaround for http://stackoverflow.com/questions/25394320/py2app-modulegraph-missing-scan-code
sed -i '' 's/scan_code/_scan_code/g' pythonenv/lib/python2.7/site-packages/py2app/recipes/virtualenv.py
sed -i '' 's/load_module/_load_module/g' pythonenv/lib/python2.7/site-packages/py2app/recipes/virtualenv.py
pip2 install -r requirements.txt
python2 setup.py install
rm -r dist
python2 setup.py py2app
deactivate
# workaround for lib2to3 issue (https://github.com/Storj/storjnode/issues/102)
cd dist/$repositoryname.app/Contents/Resources/lib/python2.7/
mv site-packages.zip unzipme.zip
mkdir site-packages.zip
mv unzipme.zip site-packages.zip/
cd site-packages.zip/
unzip unzipme.zip
rm unzipme.zip
cd ../../../../../../
zip -r -9 $repositoryname.osx64.zip $repositoryname.app
filename=$repositoryname.osx64.zip
curl -H "Accept: application/json" -H "Content-Type: application/octet-stream" -H "Authorization: token $gh_token" --data-binary "@$filename" "$uploadurl?name=$filename&label=$pullsha.osx64.zip"
fi
done
for ((j=0; j < $(echo $releases | jq ". | length"); j++)); do
releasename=$(echo $releases | jq --raw-output ".[$j].name")
if [ "$releasename" = "autobin draft release" ]; then
assetfound=false
asseturl=$(echo $releases | jq --raw-output ".[$j].assets_url")
assets=$(curl -H "Accept: application/json" -H "Authorization: token $gh_token" $asseturl)
for ((k=0; k < $(echo $assets | jq ". | length"); k++)); do
assetname=$(echo $assets | jq --raw-output ".[$k].name")
if [ "${assetname: -10}" = ".osx64.zip" ]; then
assetstate=$(echo $assets | jq --raw-output ".[$k].state")
if [ "$assetstate" = "new" ]; then
binaryurl=$(echo $assets | jq --raw-output ".[$k].url")
curl -X DELETE -H "Authorization: token $gh_token" $binaryurl
else
assetfound=true
fi
fi
done
if [ $assetfound = false ]; then
uploadurl=$(echo $releases | jq --raw-output ".[$j].upload_url")
uploadurl=${uploadurl//\{?name,label\}/}
# existing build tag or branch
targetbranch=$(echo $releases | jq --raw-output ".[$j].target_commitish")
targettag=$(echo $releases | jq --raw-output ".[$j].tag_name")
if [ "$targettag" != "null" ]; then
for ((l=0; l < $(echo $tags | jq ". | length"); l++)); do
tag=$(echo $tags | jq --raw-output ".[$l].name")
if [ "$targettag" = "$tag" ]; then
targetbranch=$targettag
fi
done
fi
mkdir repos
cd repos
rm -rf $repositoryname
echo create and upload binary $repositoryurl $targetbranch
git clone $repositoryurl -b $targetbranch
cd $repositoryname
virtualenv -p python2.7 pythonenv
source pythonenv/bin/activate
pip2 install py2app
# workaround for http://stackoverflow.com/questions/25394320/py2app-modulegraph-missing-scan-code
sed -i '' 's/scan_code/_scan_code/g' pythonenv/lib/python2.7/site-packages/py2app/recipes/virtualenv.py
sed -i '' 's/load_module/_load_module/g' pythonenv/lib/python2.7/site-packages/py2app/recipes/virtualenv.py
pip2 install -r requirements.txt
python2 setup.py install
rm -r dist
python2 setup.py py2app
deactivate
# workaround for lib2to3 issue (https://github.com/Storj/storjnode/issues/102)
cd dist/$repositoryname.app/Contents/Resources/lib/python2.7/
mv site-packages.zip unzipme.zip
mkdir site-packages.zip
mv unzipme.zip site-packages.zip/
cd site-packages.zip/
unzip unzipme.zip
rm unzipme.zip
cd ../../../../../../
zip -r -9 $repositoryname.osx64.zip $repositoryname.app
filename=$repositoryname.osx64.zip
curl -H "Accept: application/json" -H "Content-Type: application/octet-stream" -H "Authorization: token $gh_token" --data-binary "@$filename" "$uploadurl?name=$filename"
fi
fi
done
| true |
68658689f5dfc2594bb5ada1a513db1b0fa305f9 | Shell | maniaabdi/system-config | /bin/switch-to-vga | UTF-8 | 2,661 | 3.125 | 3 | [] | no_license | #!/bin/bash
## start generated code
TEMP=$(getopt -o r: --long "res:" -n $(basename $0) -- "$@")
res=
eval set -- "$TEMP"
while true; do
case "$1" in
-r|--res)
res=$2
shift 2
;;
--)
shift
break
;;
*)
die "internal error"
;;
esac
done
## end generated code
xrandr_out=$(xrandr)
lvds=$(echo "$xrandr_out" | grep '^LVDS' | pn 1)
vga=$(echo "$xrandr_out" | grep '^VGA' | pn 1)
hdmi=$(echo "$xrandr_out" | grep '^HDMI' | pn 1)
if echo "$xrandr_out" | grep '^VGA\S* disconnected' -P; then
vga=$hdmi
fi
if test $(basename $0) = switch-to-vga; then
xrandr --output $lvds --auto --below $vga
xrandr --output $vga --auto
elif test $(basename $0) = switch-to-vga-alone; then
xrandr --output $lvds --off
xrandr --output $hdmi --off
xrandr --output $vga --auto
elif test $(basename $0) = switch-to-hdmi-and-laptop; then
touch /tmp/bhj-notify-not-working
xrandr --output $lvds --auto --below $hdmi
xrandr --output $hdmi --auto
xrandr --output $lvds --auto --below $hdmi
xrandr --output $hdmi --auto
elif test $(basename $0) = switch-to-hdmi; then
xrandr --output $lvds --off
xrandr --output $vga --off
xrandr --output $hdmi --auto
elif test $(basename $0) = switch-to-laptop; then
xrandr --output $lvds --auto
xrandr --output $hdmi --off
xrandr --output $vga --off
else
(
set +x
newmode=$(cvt 1360 768 | perl -ne 's/"//g; print $'\'' if m/^Modeline /')
xrandr --newmode $newmode
xrandr --addmode HDMI1 ${newmode%% *}
)
res=$(
xrandr |
pn 1 |
uniq |
grep -P '^\d'|
sort -n -r |
perl -e '
@x = <>;
$last = 0;
for $res (@x) {
$res =~ s/_.*//;
if ($res eq $last) {
print $last;
last;
}
$last = $res
}'
)
xrandr --output $lvds --mode $(xrandr |perl -ne "print if m/\Q$lvds\E.*connected/ .. (m/connected/ and not m/\Q$lvds\E/)" |pn 1 | grep $res|head -n 1) --same-as $vga
xrandr --output $vga --mode $(xrandr |perl -ne "print if m/\Q$vga\E.*connected/ .. (m/connected/ and not m/\Q$vga\E/)" |pn 1 | grep $res|head -n 1)
fi
if test $(basename $0) = switch-to-both-display; then
sawfish-client -e '(bind-keys window-keymap "Super-F1" '\''(system "sawfish-force-maximize&"))'
else
sawfish-client -e '(bind-keys window-keymap "Super-F1" '\''maximize-window-toggle)'
fi
sawfish-re-maximize
| true |
81cfeb9adcf5450bb7c21ec36a25df99ec1493bb | Shell | quattor/configuration-modules-grid | /ncm-glitestartup/src/main/templates/gLite.template | UTF-8 | 2,613 | 3.640625 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"EUDatagrid"
] | permissive | #!/bin/bash
###############################################################################
# IMPORTANT:
# - This script is managed by ncm-glitestartup configuration module: DO NOT EDIT
# - The template is written in ncm-template syntax: be sure to escape \< and \>
#
# chkconfig: 345 96 96
# description: gLite startup script used by WMS and LB services
#
# processname: gLite
#
# Based on script once provided by EGEE.
# Copyright (c) Members of the EGEE Collaboration. 2004
# See http://eu-egee.org/partners/ for details on the copyright holders
# For license conditions see the license file or http://eu-egee.org/license.html
# Original author(s): Marian ZUREK \<Marian.ZUREK@cern.ch\>
# Nuno SILVA \<Nuno.Orestes.Vaz.da.Silva@cern.ch\>
#
###############################################################################
<FOREACH: $script restartEnv>
. <restartEnv/<$script>>;
<ENDFOR>
GLITE_STARTUP_SCRIPT_DIR=${EMI_LOCATION}/etc/init.d/
if [ ! -d ${GLITE_STARTUP_SCRIPT_DIR} ]
then
echo "Directory not found: ( ${GLITE_STARTUP_SCRIPT_DIR}) ... exiting!"
exit 1
fi
GLITE_STARTUP_FILE=${EMI_LOCATION}/etc/gLiteservices
if [ ! -r ${GLITE_STARTUP_FILE} ]
then
echo "File ${GLITE_STARTUP_FILE} not created (or not readable) ... exiting!"
exit 1
fi
case "$1" in
start) SERVICE_LIST=`cat $GLITE_STARTUP_FILE`
for s in `echo ${SERVICE_LIST}`
do
echo "*** `basename ${s}`:";
${s} start
echo""
done;;
stop) SERVICE_LIST=`cat $GLITE_STARTUP_FILE | sort `
for s in `echo ${SERVICE_LIST}`
do
echo "*** `basename ${s}`:";
${s} stop
echo""
done;;
restart) echo "STOPPING SERVICES"; $0 stop;
echo "STARTING SERVICES"; $0 start;;
status) SERVICE_LIST=`cat $GLITE_STARTUP_FILE`
for s in `echo ${SERVICE_LIST}`
do
echo "*** `basename ${s}`:";
${s} status
echo""
done;;
version) echo -n "glite version: "; rpm -q --qf %{V} glite-version; echo
echo -n "yaim version: "; rpm -q --qf %{V} glite-yaim-core; echo ;;
*) echo "====================================================================="
echo "===== Usage: gLite { start | stop | restart | status | version} ====="
echo "====================================================================="
exit 1;;
esac
exit 0
| true |
727bf9dcc8d37bffd0568523ff62579ce6a81a54 | Shell | uk-gov-mirror/UKHomeOffice.kube-hocs-frontend | /deploy.sh | UTF-8 | 2,268 | 3.03125 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
export IP_WHITELIST=${POISE_WHITELIST}
export KUBE_NAMESPACE=${ENVIRONMENT}
export KUBE_TOKEN=${KUBE_TOKEN}
export VERSION=${VERSION}
export DOMAIN="cs"
if [ ${KUBE_NAMESPACE%-*} == "wcs" ]; then
export DOMAIN="wcs"
fi
if [[ ${KUBE_NAMESPACE} == *prod ]]
then
export MIN_REPLICAS="2"
export MAX_REPLICAS="6"
export KUBE_SERVER=https://kube-api-prod.prod.acp.homeoffice.gov.uk
else
export MIN_REPLICAS="1"
export MAX_REPLICAS="2"
export KC_REALM=https://sso-dev.notprod.homeoffice.gov.uk/auth/realms/hocs-notprod
export KUBE_SERVER=https://kube-api-notprod.notprod.acp.homeoffice.gov.uk
fi
if [[ "${KUBE_NAMESPACE}" == "wcs-prod" ]] ; then
export DNS_PREFIX=www.wcs
export KC_REALM=https://sso.digital.homeoffice.gov.uk/auth/realms/HOCS
elif [[ "${KUBE_NAMESPACE}" == "cs-prod" ]] ; then
export DNS_PREFIX=www.cs
export KC_REALM=https://sso.digital.homeoffice.gov.uk/auth/realms/hocs-prod
elif [[ "${KUBE_NAMESPACE}" == "cs-dev" ]] ; then
export DNS_PREFIX=dev.internal.cs-notprod
elif [[ "${KUBE_NAMESPACE}" == "wcs-dev" ]] ; then
export DNS_PREFIX=dev.internal.wcs-notprod
elif [[ "${KUBE_NAMESPACE}" == "cs-qa" ]] ; then
export DNS_PREFIX=qa.internal.cs-notprod
elif [[ "${KUBE_NAMESPACE}" == "wcs-qa" ]] ; then
export DNS_PREFIX=qa.internal.wcs-notprod
elif [[ "${KUBE_NAMESPACE}" == "cs-demo" ]] ; then
export DNS_PREFIX=demo.cs-notprod
elif [[ "${KUBE_NAMESPACE}" == "wcs-demo" ]] ; then
export DNS_PREFIX=demo.wcs-notprod
elif [[ "${KUBE_NAMESPACE}" == "hocs-qax" ]] ; then
export DNS_PREFIX=qax.internal.cs-notprod
else
export DNS_PREFIX=${ENVIRONMENT}.internal.${DOMAIN}-notprod
fi
export DNS_SUFFIX=.homeoffice.gov.uk
export DOMAIN_NAME=${DNS_PREFIX}${DNS_SUFFIX}
export INGRESS_TYPE="external"
if [[ $DNS_PREFIX == *"internal"* ]]; then
export INGRESS_TYPE="internal"
fi
echo
echo "Deploying hocs-frontend to ${ENVIRONMENT}"
echo "Keycloak realm: ${KC_REALM}"
echo "${INGRESS_TYPE} domain: ${DOMAIN_NAME}"
echo
cd kd
kd --insecure-skip-tls-verify \
--timeout 10m \
-f ingress-${INGRESS_TYPE}.yaml \
-f converter-configmap.yaml \
-f configmap.yaml \
-f deployment.yaml \
-f service.yaml \
-f autoscale.yaml
| true |
3a8813e47abd0975532378e0379bf16859ac170c | Shell | AscendNTNU/scripts | /camera/start_stream.sh | UTF-8 | 2,660 | 2.890625 | 3 | [] | no_license | #!/bin/bash
FRONT_PORT=12000
LEFT_PORT=12001
BACK_PORT=12002
RIGHT_PORT=12003
FISHEYE_PORT=12004
RECEIVER_IP="192.168.1.${1:-151}"
cameras=(/dev/videoFront /dev/videoLeft /dev/videoRight /dev/videoBack /dev/videoFisheye)
echo "Receiver IP:$RECEIVER_IP"
for entry in "/dev/"video*; do
if [ "$entry" = "/dev/videoFront" ]; then
echo "Starting stream from $entry"
gst-launch-1.0 v4l2src device=/dev/videoFront ! image/jpeg, width=1920, height=1080, framerate=30/1 ! queue ! rtpjpegpay ! udpsink host=$RECEIVER_IP port=$FRONT_PORT &
delete=(/dev/videoFront)
cameras=( "${cameras[@]/$delete}" )
v4l2-ctl --device=$entry -c power_line_frequency=1
v4l2-ctl --device=$entry -c focus_auto=0
v4l2-ctl --device=$entry -c focus_absolute=0
elif [ "$entry" = "/dev/videoLeft" ]; then
echo "Starting stream from $entry"
gst-launch-1.0 v4l2src device=/dev/videoFront ! image/jpeg, width=1920, height=1080, framerate=30/1 ! queue ! rtpjpegpay ! udpsink host=$RECEIVER_IP port=$LEFT_PORT &
delete=(/dev/videoLeft)
cameras=( "${cameras[@]/$delete}" )
v4l2-ctl --device=$entry -c power_line_frequency=1
v4l2-ctl --device=$entry -c focus_auto=0
v4l2-ctl --device=$entry -c focus_absolute=0
elif [ "$entry" = "/dev/videoRight" ]; then
echo "Starting stream from $entry"
gst-launch-1.0 v4l2src device=/dev/videoFront ! image/jpeg, width=1920, height=1080, framerate=30/1 ! queue ! rtpjpegpay ! udpsink host=$RECEIVER_IP port=$RIGHT_PORT &
delete=(/dev/videoRight)
cameras=( "${cameras[@]/$delete}" )
v4l2-ctl --device=$entry -c power_line_frequency=1
v4l2-ctl --device=$entry -c focus_auto=0
v4l2-ctl --device=$entry -c focus_absolute=0
elif [ "$entry" = "/dev/videoBack" ]; then
echo "Starting stream from $entry"
gst-launch-1.0 v4l2src device=/dev/videoFront ! image/jpeg, width=1920, height=1080, framerate=30/1 ! queue ! rtpjpegpay ! udpsink host=$RECEIVER_IP port=$BACK_PORT &
delete=(/dev/videoBack)
cameras=( "${cameras[@]/$delete}" )
v4l2-ctl --device=$entry -c power_line_frequency=1
v4l2-ctl --device=$entry -c focus_auto=0
v4l2-ctl --device=$entry -c focus_absolute=0
elif [ "$entry" = "/dev/videoFisheye" ]; then
echo "Starting stream from $entry"
gst-launch-1.0 v4l2src device=/dev/videoFront ! image/jpeg, width=1280, height=720, framerate=60/1 ! tee name=fisheyeTee \
fisheyeTee. ! queue ! rtpjpegpay ! udpsink host=localhost port=$FISHEYE_PORT \
fisheyeTee. ! queue ! rtpjpegpay ! udpsink host=$RECEIVER_IP port=$FISHEYE_PORT &
delete=(/dev/videoFisheye)
cameras=( "${cameras[@]/$delete}" )
fi
done
#Warn about missing cameras
for i in ${cameras[@]}; do
echo "$i not started"
done
wait
| true |
14e6a4788a3e61b72896ed19135851ed75c414cf | Shell | petronny/aur3-mirror | /ffdecsawrapper-git-lts/ffdecsawrapper.rc | UTF-8 | 1,354 | 3.53125 | 4 | [] | no_license | #!/bin/bash
. /etc/rc.conf
. /etc/rc.d/functions
[ -f /etc/conf.d/ffdecsawrapper ] && . /etc/conf.d/ffdecsawrapper
PID=$(pidof -o %PPID /usr/bin/ffdecsawrapper)
case $1 in
start)
stat_busy "Loading dvbloopback kernel module"
[[ -z $DVBLOOPBACK_ARGS ]] && stat_die 1
modprobe dvbloopback $DVBLOOPBACK_ARGS
sleep 2
stat_done
stat_busy "Starting FFdecsaWrapper daemon"
[[ -z $FFDECSAWRAPPER_ARGS ]] && stat_die 2
[[ -z $CAMDIR ]] && stat_die 3
[[ -z $LOGFILE ]] && stat_die 4
[[ -z $PID ]] && /usr/bin/ffdecsawrapper -D $FFDECSAWRAPPER_ARGS --cam-dir=$CAMDIR -l $LOGFILE
if [ $? -gt 0 ]; then
stat_die 5
else
add_daemon ffdecsawrapper
stat_done
fi
;;
stop)
stat_busy "Stoping FFdecsaWrapper daemon"
[[ ! -z $PID ]] && kill $PID &> /dev/null
if [ $? -gt 0 ]; then
stat_die 6
else
rm_daemon ffdecsawrapper
stat_done
fi
stat_busy "Unloading dvbloopback kernel module"
sleep 2
modprobe -r dvbloopback
stat_done
;;
restart)
$0 stop
sleep 2
$0 start
;;
*)
echo "usage: $0 {start|stop|restart}" >&2
exit 1
esac
| true |
2b8b643b90a46a01fac5f44bec19f2fc332345e3 | Shell | swflynn/DGB_QMC | /scripts/cluster/Analysis/clean_eig.sh | UTF-8 | 1,141 | 2.796875 | 3 | [] | no_license | #=============================================================================80
# Convergence wrt. Sobol Points
#==============================================================================!
# Discussion:
#Generally only care about the t lowest eigenvalues (not all NG eigenvalues)
#For water ~3000cm-1 above the ground state corresponds to vibrations
#uses xmgrace to plot n eigenvalues for each alpha0 as a function of N-Sobol
#==============================================================================!
# Modified:
# 4 April 2019
# Author:
# Shane Flynn
#==============================================================================!
#eigenvalues.dat ==> Nsobol E0 E1 E2 .... Et ...... NG
#eigs.dat ==> Nsobol E0 E1 E2 .... Et
#t ==> remove all eigenvalues from (t+1) to NG
#==============================================================================!
for d in `find . -type d`
do ( cd "$d"
if test ! -f eigenvalues.dat; then continue; fi
awk -v f=1 -v t=100 '{for(i=f;i<=t;i++) printf("%s%s",$i,(i==t)?"\n":OFS)}' eigenvalues.dat > eigs.dat
) done
| true |
2b91ec184a7eab81bbea3e0a86164845229edcc9 | Shell | beignetbytes/tsxlib-rs | /code_cov.sh | UTF-8 | 1,458 | 3.59375 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/bash
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
EXCLUDE="/.cargo,/examples,/usr/lib"
TARGET="target/cov"
echo -e "${GREEN}*** Set up kcov ***${NC}"
wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz &&
tar xzf master.tar.gz &&
cd kcov-master &&
mkdir build &&
cd build &&
cmake .. &&
make &&
make install DESTDIR=../../kcov-build &&
cd ../.. &&
rm -rf kcov-master &&
TSXLIB_UNIT_TESTS="target/debug/deps/tsxlib-"
export RUSTFLAGS="-C link-dead-code"
echo -e "${GREEN}*** Clean previous coverage results and executables ***${NC}"
rm -rf "$TARGET"
rm -f "$TSXLIB_UNIT_TESTS"*
echo -e "${GREEN}*** Rebuilding tests ***${NC}"
cargo clean
cargo test --no-run
echo -e "${GREEN}*** Run coverage on tsxlib unit tests ***${NC}"
for test_file in `ls "$TSXLIB_UNIT_TESTS"*`
do
if [[ ! -x "$test_file" ]]; then
echo -e "${YELLOW}*** skipping non executable $test_file ***${NC}"
continue
fi
echo -e "${GREEN}*** Running $test_file ***${NC}"
mkdir -p "target/cov/$(basename $test_file)"
./kcov-build/usr/local/bin/kcov --exclude-pattern=$EXCLUDE --verify "target/cov/$(basename $test_file)" "$test_file";
if [ "$?" != "0" ]; then
echo -e "${RED}*** Failure during unit test converage ***${NC}"
exit 1
fi
bash <(curl -s https://codecov.io/bash)
done
echo -e "${GREEN}*** Coverage completed and uploaded successfully ***${NC}"
| true |
20592ecd5c95dbfab4d4e771ef12da2018c96d86 | Shell | malipio/bash-ci | /nginx_cgi/cgi-bin/github_event_dispatcher.sh | UTF-8 | 1,006 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
echo "Content-type: text/plain"
echo
echo Github Event dispatcher
(
case $HTTP_X_GITHUB_EVENT in
check_suite) echo "Check Suite event"
echo "Not Implemented yet"
;;
push ) echo "Push event received"
JSON=$(cat - | tee -a /var/log/nginx/web/bash-ci.log | jq '{ref: .ref, ssh_url: .repository.ssh_url, commit_sha: .after, repo_name: .repository.name}')
echo "$JSON"
export SSH_URL=$(echo "$JSON" | jq -r '.ssh_url')
export COMMIT_SHA=$(echo "$JSON" | jq -r '.commit_sha')
export REPO_NAME=$(echo "$JSON" | jq -r '.repo_name')
echo -n "Forking build runner..."
nohup /opt/bash-ci/bin/build_runner.sh > /dev/null 2>&1 & disown && echo " Build runner forked"
;;
*) echo "Unsupported event type: $HTTP_X_GITHUB_EVENT"
echo Running as $(whoami)
env
echo Echoing body...
cat -
echo
echo "OK. Should be of length = ${CONTENT_LENGTH:=0}"
esac
echo "Event received. All done."
) | tee -a /var/log/nginx/web/bash-ci.log
| true |
772ea84e636d9a742b5026c12d2791025805327e | Shell | liuzikai/lc3-feedback-system-artifact | /klc3/klc3-manual/examples/zjui_ece220_fa20/mp2/run_klc3.sh | UTF-8 | 1,018 | 3.34375 | 3 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Usage: run_klc3.sh <test asm file>, using examples/mp2_gold.asm as gold"
exit 1
fi
if [ ! -f "$1" ]; then
echo "File \"$1\" not found"
exit 1
fi
klc3 \
--use-forked-solver=false \
--copy-additional-file ../../../asserts/replay.sh \
--max-lc3-step-count=100000 \
--max-lc3-out-length=1100 \
mem_alloc_.asm \
test_data.asm \
--test "$1" \
--gold examples/mp2_gold.asm
# klc3
# Turn off forked solver
# Copy the replay script
# Limit on step count
# Limit on output length
# Specify 75 memory slots starting from x3800 for student to use (file not copied to student as it ends with "_")
# Symbolic input space
# Test code
# Gold code
echo "Note: default output directory is klc3-out-* under the same directory as the last loaded test asm file" | true |
6ae2f363429da43a9f38ca826399a2622a57e480 | Shell | nellore/rail | /eval/run_single_sample_rail_sim.sh | UTF-8 | 3,522 | 3.578125 | 4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# $1: number of cores
# $2: output directory -- SPECIFY FULL PATH
# $3: where to find sample fastqs from generate_bioreps.py
# $4: sample name; this is the prefix of "_sim.fastq"
# Ex: taskset -c 0,1,2,3 sh run_single_sample_rail_sim.sh 4 ./myoutput NA11829_male_CEU_UU_6-1-1 /tmp
# See generate_bioreps.py for how sample data was generated.
# Specify number of parallel processes for each program
CORES=$1
# Specify FULL PATH to output directory
MAINOUTPUT=$2
mkdir -p ${MAINOUTPUT}
# Specify data directory; fastqs should be of the form [SAMPLE NAME]_sim.fastq; Flux beds should be
# of form [SAMPLE_NAME]_sim.bed
DATADIR=$3
# Specify sample name at command line
SAMPLE=$4
# Temp dir
SCRATCH=$5
mkdir -p ${SCRATCH}
## Specify locations of executables
# Used version 0.1.8 of Rail-RNA, but wrapped version 2.2.4 of Bowtie2 and version 1.1.1 of Bowtie
# Specify Python executable/loc of get_junctions.py; PyPy 2.5.0 was used
PYTHON=/home/student/anellor1/raildotbio/pypy-2.5-linux_x86_64-portable/bin/pypy
RAILHOME=/home/student/anellor1/rail
RAILRNA=rail-rna
# Samtools v1.2 was used
SAMTOOLS=/home/student/anellor1/raildotbio/samtools-1.2/samtools
# Specify log filename for recording times
TIMELOG=${MAINOUTPUT}/small_data_times.log
## Specify locations of reference-related files
## See create_indexes.sh for index creation script
# Bowtie indexes
BOWTIE1IDX=/dcl01/leek/data/railsims/indexes_for_paper/genome
BOWTIE2IDX=/dcl01/leek/data/railsims/indexes_for_paper/genome
# Generic name of file measuring performance of a given alignment strategy
# Performance is computed with spliced_read_recovery_performance.py; refer to that file for details
PERFORMANCE=perform
echo 'Splitting Flux FASTQs...'
awk '(NR-1) % 8 < 4' $DATADIR/${SAMPLE}_sim.fastq >${SCRATCH}/${SAMPLE}_sim_left.fastq
awk '(NR-1) % 8 >= 4' $DATADIR/${SAMPLE}_sim.fastq >${SCRATCH}/${SAMPLE}_sim_right.fastq
cd $SCRATCH
mkdir -p ${SAMPLE}
cd ${SAMPLE}
mkdir -p rail
cd $MAINOUTPUT
mkdir -p ${SAMPLE}
SAMPLEOUTPUT=${MAINOUTPUT}/${SAMPLE}
# Run simulations
OUTPUT=$SCRATCH/${SAMPLE}
echo 'Running Rail-RNA on sample '${SAMPLE}'...'
echo '#'${SAMPLE}' Rail-RNA' >>$TIMELOG
# Write manifest file
echo -e ${SCRATCH}/${SAMPLE}_sim_left.fastq'\t0\t'${SCRATCH}/${SAMPLE}_sim_right.fastq'\t0\t'${SAMPLE} >${SCRATCH}/${SAMPLE}.manifest
time ($RAILRNA go local -p $CORES -m ${SCRATCH}/${SAMPLE}.manifest -o $OUTPUT/rail --log $OUTPUT/rail.log -x $BOWTIE1IDX,$BOWTIE2IDX -f -d bam >/dev/null 2>&1) 2>>$TIMELOG
echo 'Computing precision and recall...'
(for i in $OUTPUT/rail/alignments/*.bam; do $SAMTOOLS view $i; done | $PYTHON $RAILHOME/eval/spliced_read_recovery_performance.py -t $DATADIR/${SAMPLE}_sim.bed >$OUTPUT/rail/$PERFORMANCE 2>$OUTPUT/rail/${PERFORMANCE}_summary) &
(for i in $OUTPUT/rail/alignments/*.bam; do $SAMTOOLS view $i; done | $PYTHON $RAILHOME/eval/intron_recovery_performance.py -t $DATADIR/${SAMPLE}_sim.bed >$OUTPUT/rail/${PERFORMANCE}_intron_recovery_summary) &
(for i in $OUTPUT/rail/alignments/*.bam; do $SAMTOOLS view $i; done | $PYTHON $RAILHOME/eval/mapping_accuracy.py -t $DATADIR/${SAMPLE}_sim.bed >$OUTPUT/rail/${PERFORMANCE}_mapping_accuracy_summary) &
(for i in $OUTPUT/rail/alignments/*.bam; do $SAMTOOLS view $i; done | $PYTHON $RAILHOME/eval/mapping_accuracy.py -t $DATADIR/${SAMPLE}_sim.bed -c 0.1 >$OUTPUT/rail/${PERFORMANCE}_mapping_accuracy_SC_summary) &
wait
# Move rail results to final destination
rm -rf ${SAMPLEOUTPUT}/rail
cp -r ${OUTPUT}/rail $SAMPLEOUTPUT
rm -rf ${OUTPUT}/rail
| true |
09416cb17840c188e46539511fd907ab07519ba0 | Shell | barmintor/ldpd-hyacinth | /extras/bagit-create.sh | UTF-8 | 4,304 | 3.890625 | 4 | [] | no_license | # Run this script INSIDE of the directory that you want to bag (your "bag directory").
# There must be a "data" directory inside of the "bag directory". This "data" directory holds all of the content that you want to bag.
echo "Bag the content in `pwd` ? [yes]:"
echo "(Typing anything other than 'yes' will exit.)"
echo -ne "> "
read input_variable
if [[ "$input_variable" != "yes" ]]; then
echo "The BagIt process has been canceled."
exit 0
fi
echo ""
# Check for 'data' directory. This is required.
if [[ ! -d "data" ]]; then
echo "Error: Could not find 'data' directory directly under `pwd`. This directory is required, and is where your main bag content must be located."
exit 1
fi
echo "Counting number of files in 'data' directory (and following symlinks)..."
TOTAL_NUMBER_OF_FILES=`find -L data | wc -l`
echo "Total: $TOTAL_NUMBER_OF_FILES"
#TODO: Eventually, it would be nice to use this number to show a checksum progress indicator (i.e. How many checksums have been calculated so far and how many are left? Percentage?)
START_TIME=`date`
START_UNIX_TIMESTAMP_IN_SECONDS="$(date +%s)"
echo "Starting BagIt process..."
echo "Start Data/Time: $START_TIME"
echo ""
echo "Calculating checksums for files in `pwd`/data..."
perform_step=0
if [[ -e "manifest-sha1.txt" ]]; then
echo ""
echo "There is already a manifest-sha1.txt file present. Do you want to regenerate it? [yes]:"
echo "(Typing anything other than 'yes' will skip new checksum generation and will use the existing manifest-sha1.txt file for upcoming bag creation steps.)"
echo -ne "> "
read input_variable
if [[ "$input_variable" == "yes" ]]; then
perform_step=1
echo "Recalculating checksums for files in `pwd`/data..."
fi
else
perform_step=1
fi
if [[ $perform_step -eq 1 ]]; then
#payload-manifest.sh
find data -type f -follow | while read f; do sha1sum "$f"; done > manifest-sha1.txt
fi
echo "Calculating Payload-Oxum: The 'octetstream sum' of the payload in `pwd`/data..."
#perform_step=0
#if [[ -e "oxum.txt" ]]; then
# echo ""
# echo "There is already an oxum.txt file present. Do you want to regenerate it? [yes]:"
# echo "(Typing anything other than 'yes' will skip new oxum generation and will use the existing oxum.txt file for upcoming bag creation steps.)"
# echo -ne "> "
# read input_variable
# if [[ "$input_variable" == "yes" ]]; then
# perform_step=1
# echo "Recalculating Payload-Oxum for files in `pwd`/data..."
# fi
#else
# perform_step=1
#fi
#if [[ $perform_step -eq 1 ]]; then
# #oxum-space-safe.sh
# find data -type f -follow -exec wc -lc "{}" > oxum.txt \;
#fi
TOTAL_OXUM_BYTE_SUM=0
SAVEIFS=$IFS # Save original, default $IFS file separator value
IFS=$(echo -en "\n\b") # Temporarily set file separator value to something that wouldn't show up in our files
files=$(find data -type f -follow)
for f in $files
do
BYTE_SIZE=$(wc -c < "$f")
TOTAL_OXUM_BYTE_SUM=$(($TOTAL_OXUM_BYTE_SUM + $BYTE_SIZE))
done
# restore $IFS default file separator
IFS=$SAVEIFS
echo "Total Oxum byte sum: $TOTAL_OXUM_BYTE_SUM"
# Now that we have the TOTAL_OXUM_BYTE_SUM, we can create bag-info.txt
BAGGING_DATE=`date +%Y-%m-%d`
echo "Bagging-Date: $BAGGING_DATE" > bag-info.txt
echo "Payload-Oxum: $TOTAL_OXUM_BYTE_SUM.$TOTAL_NUMBER_OF_FILES" >> bag-info.txt
# Create bagit.txt file, based on the spec and encoding that we're using
echo 'BagIt-version: 0.97' > bagit.txt
echo 'Tag-File-Character-Encoding: UTF-8' >> bagit.txt
# Create tagmanifest-sha1.txt. LAST STEP. Contains hashes of all non-data-directory files in the bag.
echo "Generating tag manifest..."
# Remove existing tagmanifest-sha1.txt if present
if [[ -e "tagmanifest-sha1.txt" ]]; then
rm tagmanifest-sha1.txt
fi
# Calculate and write out new tagmanifest-sha1.txt
touch tagmanifest-sha1.txt.tmp # We're using .tmp at the end so that this file isn't included in the checksum generation
TAG_MANIFEST_LINES=$(find . -maxdepth 1 -name "*.txt" | while read f; do sha1sum "$f" >> tagmanifest-sha1.txt.tmp; done)
mv tagmanifest-sha1.txt.tmp tagmanifest-sha1.txt
TOTAL_TIME_IN_SECONDS="$(($(date +%s)-START_UNIX_TIMESTAMP_IN_SECONDS))"
echo "BagIt process complete!"
echo "Total script execution time: $TOTAL_TIME_IN_SECONDS seconds"
echo "Enjoy your bag!" | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.