blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0a603e84551e5b2da4c88c478a498ed674f3d516
|
Shell
|
justone/docker-tmux-static
|
/build.sh
|
UTF-8
| 772
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -x
VERSION=2.6
# based on: https://gist.github.com/pistol/5069697
# ncurses
wget http://ftp.gnu.org/pub/gnu/ncurses/ncurses-6.1.tar.gz
tar xvzf ncurses-6.1.tar.gz
cd ncurses-6.1
./configure --prefix=$HOME/local
make -j8
make install
cd ..
# libevent
git clone git://github.com/libevent/libevent.git
cd libevent
git checkout release-2.1.8-stable
./autogen.sh
./configure --prefix=$HOME/local
make -j8
make install
cd ..
# tmux
git clone https://github.com/tmux/tmux.git tmux-src
cd tmux-src
git checkout $VERSION
./autogen.sh
./configure --prefix=$HOME/local CPPFLAGS="-I$HOME/local/include -I$HOME/local/include/ncurses" LDFLAGS="-static -L$HOME/local/include -L$HOME/local/include/ncurses -L$HOME/local/lib"
make -j8
make install
cp tmux ..
| true
|
3808ae264b824209a3d8a30ea6bc872b8a003116
|
Shell
|
Ericmas001/hq-hvac-api-docker
|
/entrypoint.sh
|
UTF-8
| 406
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo ""
echo ">>> VERSION"
uname -a
echo ""
echo ">>> USER GROUPS"
groups
echo "" > /exec/const.py && \
echo "KEY = \"$KEY\"" >> /exec/const.py && \
echo "PIN = $PIN" >> /exec/const.py
cp -n /default_config.json /config/last_config.json
echo ""
echo ">>> CONST"
cat /exec/const.py
echo ""
echo ">>> CONFIG"
cat /config/last_config.json
echo ""
echo ">>> LET'S RUN"
python /exec/hvacws.py
| true
|
214cecc3e339fbeb864e78bab15360fe456dccc6
|
Shell
|
jbalsamo/upLoadLymph
|
/uploadLymphocyteHm.sh
|
UTF-8
| 1,508
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
# bash uploadLymphHm.sh <options>
# Authors: Alina Jasniewski, Joseph Balsamo
# Functions
# function: usage(brief)
function usage() {
echo "Usage: $ ./uploadLymphHm.sh [options] -h <host> -f <filename>"
if [ $1 == false ]
then
echo " Options:"
echo " -f <filename>: filename of the data to be loaded (this parameter required)"
echo " -h <host>: ip or hostname of database (this parameter required)"
echo " -d <database name> (default: quip)"
echo " -p <database port> (default: 27017)"
echo ""
echo " --help Display full help usage."
echo " Notes: requires mongoDB client tools installed on running server"
echo " Notes: If '-f' parameter is *, it must be in quotes."
fi
}
# end functions
# Set Default variables.
database="quip"
port="27017"
FILE=""
HOST=""
errcode=0
brief=true
while [ -n "$1" ]
# while loop starts
do
case "$1" in
-h) HOST="$2"
shift;;
-p) port="$2"
shift ;;
-f) FILE=${2}
shift;;
-d) database=${2}
shift;;
--help)
usage false
exit 0
break ;;
*) usage true ;;
esac
shift
done
if [ -z "${HOST}" ] || [ -z "${FILE}" ]
then
echo "Missing required parameters"
usage true
exit 1
fi
TYPE=${database}
for filename in ${FILE}/heatmap_*.json ; do
mongoimport --port ${port} --host ${HOST} -d ${TYPE} -c objects ${filename}
done
for filename in ${FILE}/meta_*.json ; do
mongoimport --port ${port} --host ${HOST} -d ${TYPE} -c metadata ${filename}
done
exit 0
| true
|
157c1f2539691d96c080bd837cda6313ac5c34c1
|
Shell
|
GeorgePatsias/Remote-User-Creation
|
/add_user.sh
|
UTF-8
| 429
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# Usage: ./add_user.sh <Domain> <User Name> <Default Password> <SSH Key Path or String in quotes>
# Example: ./add_user.sh example.com john Password123 "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA879BJGYlPTLIuc9/R5MYiN4yc/YiCLcdBpSdzgK9Dt0Bkfe3rSz5cPm4wmehdE7GkVFXrBJ2YHqPLuM1yx1AUxIebpwlIl9f/aUHOts9eVnVh4NztPy0iSU/Sv0b2ODQQvcy2vYcujlorscl8JjAgfWsO3W4iGEe6QwBpVomcME8IU35v5VbylM9ORQa6wvZMVrPECBvwItTY8cPWH3MGZiK/74eHbSLKA4PY3gM4GHI450Nie16yggEg2aTQfWA1rry9JYWEoHS9pJ1dnLqZU3k/8OWgqJrilwSoC5rGjgp93iu0H8T6+mEHGRQe84Nk1y5lESSWIbn6P636Bl3uQ== your@email.com"
ssh root@$1 "adduser $2 --gecos '' --disabled-password && echo '$2:$3' | chpasswd && chage -d0 $2 && mkdir /home/$2/.ssh/ && echo $4 > /home/$2/.ssh/authorized_keys && usermod -aG sudo $2"
| true
|
0a4f7daad0beb1ad48cc0fbd40d902467bbddccc
|
Shell
|
florent-engineering/anemomind
|
/www2/synctest/refresh_boxid.sh
|
UTF-8
| 398
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "When running this script, make sure that the box is either on your local network or you are connected to the local network of the box."
echo "If 'ssh box' does not work, then this script won't work."
echo "Fetching box id..."
ssh box "ifconfig wlan0 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}' | sed 's/[:\s]//g'" > boxid.txt
echo "The new box id is $(cat boxid.txt)"
| true
|
d0960456a569d92db4cbeea1da6f1bdb6654f312
|
Shell
|
rancher/image-build-base
|
/scripts/go-build-static.sh
|
UTF-8
| 342
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
export CGO_ENABLED=${CGO_ENABLED:-1}
export GOEXPERIMENT=boringcrypto
if [ "${CGO_ENABLED}" != "1" ]; then
echo "CGO_ENABLED=${CGO_ENABLED}, should be set to 1 for static goboring compilation" >&2
exit 1
fi
set -x
exec go build -ldflags "-linkmode=external -extldflags \"-static -Wl,--fatal-warnings\" ${GO_LDFLAGS}" "${@}"
| true
|
08ed809f69a57ba187134f299a6aacdfd820f3bd
|
Shell
|
pgreze/dotfiles
|
/sh/gnome.sh
|
UTF-8
| 2,996
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
###
### Gnome configuration
###
gnome_how_to="
List installed schemas:
> gsettings list-schemas
Explore gsettings paths:
> dconf list /org/gnome/terminal/legacy/
"
# https://unix.stackexchange.com/a/297660
# We need to define both a schema and a path:
# ```
# GSETTINGS_SCHEMA=org.gnome.Terminal.Legacy.Keybindings
# GSETTINGS_PATH=/org/gnome/terminal/legacy/keybindings/
# SCHEMA_PATH=$GSETTINGS_SCHEMA:$GSETTINGS_PATH
# ```
# gsettings list-recursively org.gnome.Terminal.Legacy.Settings
alias gsettings_set_terminal_keybinding="gsettings set 'org.gnome.Terminal.Legacy.Keybindings:/org/gnome/terminal/legacy/keybindings/'"
# Alternative: dconf write /org/gnome/terminal/legacy/keybindings/$key $value
# Profile https://askubuntu.com/a/733202
gsettings_terminal_profile() {
local profile=$(gsettings get org.gnome.Terminal.ProfilesList default)
echo ${profile:1:-1} # remove leading and trailing single quotes
}
gsettings_terminal_profile_list() {
local profile=$(gsettings_terminal_profile)
echo ">> Available keys:"
gsettings list-keys "org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$profile/"
printf "\n>> Overwriten keys for /org/gnome/terminal/legacy/profiles:/:$profile/:\n"
dconf list "/org/gnome/terminal/legacy/profiles:/:$profile/" | while read key; do
printf "- $key: "
dconf read "/org/gnome/terminal/legacy/profiles:/:$profile/$key"
done
}
alias gsettings_terminal_profile_set='gsettings set "org.gnome.Terminal.Legacy.Profile:/org/gnome/terminal/legacy/profiles:/:$(gsettings_terminal_profile)/"'
##
## Custom config
##
gsettings_my_terminal() {
gsettings set org.gnome.Terminal.Legacy.Settings confirm-close false
gsettings set org.gnome.Terminal.Legacy.Settings new-tab-position 'last'
gsettings set org.gnome.Terminal.Legacy.Settings new-terminal-mode 'tab'
gsettings_set_terminal_keybinding new-tab '<Super>t'
gsettings_set_terminal_keybinding close-tab '<Super>w'
gsettings_set_terminal_keybinding find '<Super>F'
gsettings_set_terminal_keybinding copy '<Super>c'
gsettings_set_terminal_keybinding paste '<Super>v'
gsettings_set_terminal_keybinding zoom-in '<Super>plus'
gsettings_set_terminal_keybinding zoom-normal '<Super>0'
gsettings_set_terminal_keybinding zoom-out '<Super>minus'
gsettings_set_terminal_keybinding move-tab-left '<Super>braceleft'
gsettings_set_terminal_keybinding move-tab-right '<Super>braceright'
gsettings_set_terminal_keybinding prev-tab '<Super>bracketleft'
gsettings_set_terminal_keybinding next-tab '<Super>bracketright'
for i in $(seq 1 8); do
gsettings_set_terminal_keybinding "switch-to-tab-$i" "<Super>$i"
done
gsettings_set_terminal_keybinding switch-to-tab-last '<Super>9'
gsettings_terminal_profile_set use-theme-transparency true
gsettings_terminal_profile_set use-transparent-background true
gsettings_terminal_profile_set background-transparency-percent 10
}
| true
|
7dc62063e24f5fccd5d021fe02df3cae558247f7
|
Shell
|
aaronjwood/ts_sdk
|
/tools/scripts/check-c-style
|
UTF-8
| 430
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (C) 2015 Verizon. All rights reserved.
# Run the checkpatch.pl script against all the (A)dded and (M)odified
# .c and .h files in the current repo.
gittop=$(git rev-parse --show-toplevel)
checkparse=$gittop/tools/scripts/checkpatch.pl
git status -s -uno | sed 's/^.. //' | egrep '\.c$|\.h$|.ino$' | (
while read fn; do
echo Checking $fn: ;
$checkparse --file --terse --no-tree $fn
done )
| true
|
9df3870637a6035c129b65d2842d3ed92fc236a8
|
Shell
|
jinbinhu/CAPS-Mininet
|
/sender.sh
|
UTF-8
| 853
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Please enter ETH name: e.g. eth0"
read ETH
echo "Please enter DST_IP address: e.g. 192.168.188.133"
read DSTIP
echo "Please enter SRC_IP address: e.g. 192.168.188.136"
read SRCIP
echo "Please enter SRC_MAC address: e.g. 0x00,0x0C,0x29,0x7E,0xB1,0x35"
read SMAC
echo "Please enter DST_MAC address: e.g. 0x00,0x0C,0x29,0x7E,0xB1,0x35"
read DMAC
eth="#define ETH \"$ETH\""
dstip="#define DST_IP \"$DSTIP\""
srcip="#define SRC_IP \"$SRCIP\""
smac="unsigned char SMAC[ETH_ALEN] = {$SMAC};"
dmac="unsigned char DMAC[ETH_ALEN] = {$DMAC};"
cd sender
sed -i "/#define ETH/c\\$eth" codemod.c
sed -i "/#define DST_IP/c\\$dstip" codemod.c
sed -i "/#define SRC_IP/c\\$srcip" codemod.c
sed -i "/unsigned char SMAC[ETH_ALEN]/c\\$smac" codemod.c
sed -i "/unsigned char DMAC[ETH_ALEN]/c\\$smac" codemod.c
sudo make
sudo insmod codemod.ko
| true
|
54503d5ee19ea39dcfc14d43da03ef423cb58889
|
Shell
|
wesbragagt/YoutubeMP3Downloader
|
/install
|
UTF-8
| 195
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
set -e
rm -rf node_modules &&
npm install &&
npm run build &&
cp dist/youtube-dw /usr/local/bin/ &&
echo "Success installing youtube-dw. Run youtube-dw <url> <file_name>"
| true
|
744bbe0fea407acff1a4c7d582eb1b87cd54a292
|
Shell
|
blackb1rd/dotfiles
|
/shells/source/transmission.sh
|
UTF-8
| 783
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
TR_OPTS="-n transmission:transmission"
tsmdaemonstart() { sudo service transmission-daemon start;}
tsmdaemonstop() { sudo service transmission-daemon stop;}
tsmdaemonreload() { sudo service transmission-daemon reload;}
tsmstart() { transmission-remote "${TR_OPTS}" -t "$1" -s;}
tsmstop() { transmission-remote "${TR_OPTS}" -t "$1" -s;}
tsmadd() { transmission-remote "${TR_OPTS}" -a "$1";}
tsmremove() {
for number in "$@"
do
transmission-remote "${TR_OPTS}" -t "$number" -r;
done
}
tsmlist() { transmission-remote "${TR_OPTS}" -l;}
tsminfo() { transmission-remote "${TR_OPTS}" -t "$1" -i;}
tsmbasicstats() { transmission-remote "${TR_OPTS}" -st;}
tsmfullstats() { transmission-remote "${TR_OPTS}" -si;}
| true
|
ea89167c4e38ab7c3f284c40c73c3929d18bdce7
|
Shell
|
freebsd/freebsd-ports
|
/x11/budgie/files/xprofile.in
|
UTF-8
| 2,444
| 3.09375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# Use the Xorg backend
if test "x$GDK_BACKEND" = "x"; then
GDK_BACKEND="x11"
fi
export XDG_SESSION_TYPE
# Set $XDG_MENU_PREFIX
if test "x$XDG_MENU_PREFIX" = "x" ; then
XDG_MENU_PREFIX="gnome-"
fi
export XDG_MENU_PREFIX
# Set $DESKTOP_SESSION
if test "x$DESKTOP_SESSION" = "x" ; then
DESKTOP_SESSION="budgie-desktop"
fi
export DESKTOP_SESSION
# Set $XDG_CURRENT_DESKTOP
if test "x$XDG_CURRENT_DESKTOP" = "x" ; then
XDG_CURRENT_DESKTOP="Budgie:GNOME"
fi
export XDG_CURRENT_DESKTOP
# Suppress warning about accessibility bus
#export NO_AT_BRIDGE=1
# Set $XDG_CONFIG_DIRS
if test "x$XDG_CONFIG_DIRS" = "x" ; then
XDG_CONFIG_DIRS="%%PREFIX%%/etc/xdg:/etc/xdg"
fi
export XDG_CONFIG_DIRS
# Set $XDG_DATA_DIRS
if test "x$XDG_DATA_DIRS" = "x" ; then
XDG_DATA_DIRS="%%PREFIX%%/share/gnome:%%PREFIX%%/share:/usr/share"
fi
export XDG_DATA_DIRS
# $XDG_CONFIG_HOME defines the base directory relative to which user-specific
# configuration files should be stored. If $XDG_CONFIG_HOME is either not set
# or empty, a default equal to $HOME/.config should be used.
if test "x$XDG_CONFIG_HOME" = "x" ; then
XDG_CONFIG_HOME=$HOME/.config
fi
[ -d "$XDG_CONFIG_HOME" ] || mkdir "$XDG_CONFIG_HOME"
# $XDG_CACHE_HOME defines the base directory relative to which user-specific
# non-essential data files should be stored. If $XDG_CACHE_HOME is either not
# set or empty, a default equal to $HOME/.cache should be used.
if test "x$XDG_CACHE_HOME" = "x" ; then
XDG_CACHE_HOME=$HOME/.cache
fi
[ -d "$XDG_CACHE_HOME" ] || mkdir "$XDG_CACHE_HOME"
# $XDG_DATA_HOME defines the base directory relative to which user-specific
# data files should be stored.
if test "x$XDG_DATA_HOME" = "x" ; then
XDG_DATA_HOME=$HOME/.local/share
fi
[ -d "$XDG_DATA_HOME" ] || mkdir -p "$XDG_DATA_HOME"
# $XDG_STATE_HOME defines the base directory relative to which user-specific
# state files should be stored.
if test "x$XDG_STATE_HOME" = "x" ; then
XDG_STATE_HOME=$HOME/.local/state
fi
[ -d "$XDG_STATE_HOME" ] || mkdir -p "$XDG_STATE_HOME"
# Unlock gnome-keyring
if test -n "$DESKTOP_SESSION" ; then
# If .xinitrc is used, uncomment the next line
#eval $(gnome-keyring-daemon --start --components=pkc11,secrets,ssh)
export SSH_AUTH_SOCK
fi
# Set up XDG user directores. See
# https://freedesktop.org/wiki/Software/xdg-user-dirs
if command -v xdg-user-dirs-update >/dev/null 2>&1 ; then
xdg-user-dirs-update
fi
| true
|
2b389f258e9d84b8bf74c9728bd650f73b743015
|
Shell
|
devlights/try-linux
|
/basic/convert/bin_to_hex.sh
|
UTF-8
| 771
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# REFERENCES:
# - https://stackoverflow.com/questions/34159451/splitting-big-block-of-binary-into-8-bit-chunks-in-bash
# - https://unix.stackexchange.com/questions/65280/binary-to-hexadecimal-and-decimal-in-a-shell-script
# - https://unix.stackexchange.com/questions/464808/bc-why-does-ibase-16-obase-10-ff-returns-ff-and-not-255
# - https://www.shell-tips.com/bash/math-arithmetic-calculation/
#
target=10101010
echo "TARGET: ${target}"
# bc コマンドを使って 2進数 を 16進数 に変換
echo '--------- with bc command ---------'
echo "obase=16; ibase=2; ${target}" | bc
# bash の算術機能を使って 2進数 を 16進数 に変換
echo '--------- with bash arithmetic expasion ---------'
printf "%X\n" "$((2#${target}))"
| true
|
02ae006343064723f069ae418f4f031feed61503
|
Shell
|
doytsujin/tmux-autoreload
|
/tmux-autoreload.tmux
|
UTF-8
| 6,571
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Automatically reloads your tmux configuration files on change.
#
# Copyright 2021 Maddison Hellstrom <github.com/b0o>, MIT License.
set -Eeuo pipefail
if [[ ${BASH_VERSINFO[0]} -ge 5 || (${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -ge 4) ]]; then
shopt -s inherit_errexit
fi
function years_from() {
local from="$1" to
to="${2:-$(date +%Y)}"
if [[ "$from" == "$to" ]]; then
echo "$from"
else
echo "$from-$to"
fi
}
declare -g self prog name
self="$(realpath -e "${BASH_SOURCE[0]}")"
prog="$(basename "$self")"
name="${prog%.tmux}"
declare -gr version="0.0.1"
declare -gr authors=("$(years_from 2021) Maddison Hellstrom <github.com/b0o>")
declare -gr repo_short="b0o/$name"
declare -gr repository="https://github.com/$repo_short"
declare -gr issues="https://github.com/$repo_short/issues"
declare -gr license="MIT"
declare -gr license_url="https://mit-license.org"
function usage() {
cat <<EOF
Usage: $prog [-f] [OPT...]
Automatically reloads your tmux configuration files on change.
Options
-h Display usage information.
-v Display $name version and copyright information.
-f Run in foreground (do not fork).
-k Kill the running $name instance.
-s Show $name status
Installation
To install $name with TPM (https://github.com/tmux-plugins/tpm), add the
following line to the end of your tmux configuration file:
set-option -g @plugin '$repo_short'
If you don't use a plugin manager, git clone $name to the location of your
choice and run it directly:
run-shell "/path/to/$name/$prog"
Once installed, you should be good to go unless you use non-standard
configuration file paths or want to customize how $name behaves.
Configuration file paths
If your config file is at a non-standard location or if you have multiple,
specify them in @$name-configs, separated by commas:
set-option -g @$name-configs '/path/to/configs/a.conf,/path/to/configs/b.conf'
Entrypoints
Normally, $name will source whichever file changed. If you wish to
source a specific set of files when any configuration file changes, use
@$name-entrypoints:
set-option -g @$name-entrypoints '/path/to/entrypoint.conf'
You can specify multiple entrypoints separated by commas. All entrypoints
will be sourced when any watched file changes.
Set @$name-entrypoints to 1 to use the standard tmux configuration
files as entrypoints, usually /etc/tmux.conf and ~/.tmux.conf. You can see
these files with:
tmux display-message -p "#{config_files}".
Entrypoint Notes:
- If entrypoints are configured, a changed file itself will not necessarily
be reloaded unless it's an entrypoint or is sourced by an entrypoint.
- Entrypoints will not be watched unless they're a standard tmux
configuration file like ~/.tmux.conf or are included in @$name-configs.
Other Options
@$name-quiet 0|1 (Default: 0)
If set to 1, $name will not display messages
EOF
}
function usage_version() {
cat <<EOF
$name v$version
Repository: $repository
Issues: $issues
License: $license ($license_url)
Copyright: ${authors[0]}
EOF
if [[ ${#authors[@]} -gt 1 ]]; then
printf ' %s\n' "${authors[@]:1}"
fi
}
function display_message() {
if [[ "$(tmux show-option -gv "@$name-quiet")" == "1" ]]; then
return 0
fi
# `tmux display-message -c` is broken in v3.2a
# https://github.com/tmux/tmux/issues/2737#issuecomment-898861216
if [[ "$(tmux display-message -p "#{version}")" == "3.2a" ]]; then
tmux display-message "$@"
else
while read -r client; do
tmux display-message -c "$client" "$@"
done < <(tmux list-clients -F '#{client_name}')
fi
}
function get_base_configs() {
tmux display-message -p "#{config_files}" | tr ',' '\n' | sort -u
}
function get_user_configs() {
tmux show-option -gv "@$name-configs" | tr ',' '\n' | sort -u
}
function get_entrypoints() {
local entrypoints
entrypoints="$(tmux show-option -gv "@$name-entrypoints")"
if [[ -z "$entrypoints" || "$entrypoints" == "0" ]]; then
return 0
fi
if [[ "$entrypoints" == "1" ]]; then
get_base_configs
else
echo "$entrypoints" | tr ',' '\n'
fi
}
function get_instance() {
local -i instance_pid
instance_pid="$(tmux show-options -gv "@$name-pid")"
if [[ "$instance_pid" -gt 0 ]] && ps "$instance_pid" &>/dev/null; then
echo "$instance_pid"
return 0
fi
return 1
}
function reload() {
local -a entrypoints
mapfile -t entrypoints < <(get_entrypoints)
if [[ ${#entrypoints[@]} -eq 0 ]]; then
entrypoints=("$@")
fi
if msg="$(tmux source-file "${entrypoints[@]}")"; then
display_message "Reloaded $(
printf '%s\n' "${entrypoints[@]}" | xargs -n1 basename | tr '\n' ',' | sed 's/,$/\n/; s/,/, /g'
)"
else
display_message -d 0 "#[fg=white,bg=red,bold]ERROR: $msg"
fi
}
function onexit() {
local -i code=$?
local -i entr_pid=$1
display_message "$name exited with code $code" || true
if [[ -v entr_pid && $entr_pid -gt 1 ]] && ps "$entr_pid" &>/dev/null; then
kill "$entr_pid" || true
fi
tmux set-option -gu "@$name-pid" &
return "$code"
}
function kill_instance() {
local -i instance_pid
if instance_pid="$(get_instance)"; then
kill "$instance_pid"
return $?
fi
echo "$name -k: kill failed: no instance found" >&2
return 1
}
function get_status() {
local -i instance_pid
if instance_pid="$(get_instance)"; then
echo "running: $instance_pid"
return 0
fi
echo "not running"
return 1
}
function main() {
if ! [[ "${1:-}" =~ ^-[hvfksr]$ ]]; then
"$self" -f "$@" &>/dev/null &
disown
exit $?
fi
local opt OPTARG
local -i OPTIND
while getopts "hvfksr:" opt "$@"; do
case "$opt" in
h)
usage
return 0
;;
v)
usage_version
return 0
;;
f)
# Silently ignore -f
;;
k)
kill_instance
return
;;
s)
get_status
return
;;
r)
reload "$OPTARG"
return
;;
\?)
return 1
;;
esac
done
shift $((OPTIND - 1))
if get_instance &>/dev/null; then
return 0
fi
command -v entr &>/dev/null || {
echo "Command not found: entr" >&2
display_message -d 0 "Failed to start $name: Command not found: entr"
return 1
}
tmux set-option -g "@$name-pid" $$
# shellcheck disable=2016
entr -np sh -c '"$0" -r "$1"' "$self" /_ <<<"$(printf '%s\n' "$(get_base_configs)" "$(get_user_configs)")" &
# shellcheck disable=2064
trap "onexit $!" EXIT
wait
}
main "$@"
| true
|
12b630cd018669b137fcbd9d37ac2cc43cb040d1
|
Shell
|
AGMahmoud/wally
|
/Project/toolbox/src/mergepdfs.sh
|
UTF-8
| 1,319
| 4.21875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Function: Merge PDF files with outlines
# Input:
# 1. PATH: path that contains PDF files which should be name like "1.2 Test Tile"
# Output: generate a merged PDF file with outline
# Input check
if [ $# -lt 1 ] || [ $# -gt 2 ]
then
echo "Error: wrong number of arguments"
echo "Usage: merger_pdf_with_outline.sh path"
exit
fi
path=$1
cd $path
# Replace whiteblank in the filename with _, This should not be considered here.
# return number of pages of a PDF file
pdf_pages(){
file=$1
extension=${file##*\.}
if [ "$extension" == "pdf" ]
then
pdf_info=$(pdfinfo ./$file | grep Pages)
num=${pdf_info##*\ }
echo $num
fi
}
# Generate outline
declare -i page=1 # Page
declare -i level=1 # Current headline lever
touch outline
for file in `ls . | grep '^[0-9]*\.[0-9]'_.*\.pdf`
do
chapter=${file%%_*}
if [ ${chapter##*\.} == 0 ]
then
level=1
else
level=2
fi
headline=${file#*_}
echo "$level $page $chapter $headline" >> outline
page_num=$(pdf_pages $file)
page=$((page+page_num))
done
# merge using gs
gs -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=out.pdf *.pdf
# generate outline for the merged PDF file
pdfoutline out.pdf outline output.pdf
| true
|
791b1500f36aafa76d6d5744b856611ef5555d48
|
Shell
|
tiansen/kuryr-libnetwork
|
/contrib/vagrant/vagrant.sh
|
UTF-8
| 225
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
export OS_USER=vagrant
export OS_HOST_IP=172.68.5.10
# run script
bash /vagrant/devstack.sh "$1"
#set environment variables for kuryr
su "$OS_USER" -c "echo 'source /vagrant/config/kuryr_rc' >> ~/.bash_profile"
| true
|
5b5945f70a2759d206cf5ab6a227368c6f709a42
|
Shell
|
repus/dot
|
/bundles/dsouza@work/autorandr/dist/.bash.d/bin/autorandr.sh
|
UTF-8
| 482
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
hdmi1_enabled () {
xrandr -q | grep HDMI1 | grep -qv disconnected
}
hdmi1_connect () {
xrandr --output eDP1 --auto --left-of HDMI1 --output HDMI1 --auto
}
hdmi1_disconnect () {
xrandr --output eDP1 --auto --output HDMI1 --off
}
hdmi1_state=-1
while sleep 2.5
do
if hdmi1_enabled
then
if [ "$hdmi1_state" -ne 0 ]
then hdmi1_connect; fi
hdmi1_state=0
else
if [ "$hdmi1_state" -ne 1 ]
then hdmi1_disconnect; fi
hdmi1_state=1
fi
done
| true
|
856980d19e4a941dd0c3868758de377747dbac4f
|
Shell
|
andresvanegas19/AirBnB_clone_v2
|
/0-setup_web_static.sh
|
UTF-8
| 601
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# sets up your web servers for the deployment of web_static
if ! which nginex;
then
apt update && apt-get install -y nginx
fi
mkdir -p /data/web_static/shared/
mkdir -p /data/web_static/releases/test/
rm -rf /data/web_static/current
echo "test Nginx configuration" > /data/web_static/releases/test/index.html
ln -s /data/web_static/releases/test/ /data/web_static/current
chown -R ubuntu:ubuntu /data/
location="\\n\tlocation \/hbnb_static/ {\n\t\talias /data/web_static/current/;\n\t}\n"
sed -i "40i\ $location" /etc/nginx/sites-available/default
sudo /etc/init.d/nginx start
| true
|
28b8734032b56c327c0576d14dd2b9356ff9d4ea
|
Shell
|
NelsonJyostna/EmpWage
|
/employeecheck.sh
|
UTF-8
| 167
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash -x
isPresent=1
randomCheck=$(( RANDOM%2 ));
if [ $isPresent -eq $randomCheck ];
then
echo "Employee not present"
else
echo " not present"
fi
| true
|
fae3b301ed7bff16150801d5a993a3dd6f834da6
|
Shell
|
dugrema/millegrilles.consignation.grosfichiers
|
/docker-run.sh
|
UTF-8
| 970
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
source image_info.txt
ARCH=`uname -m`
# Override version (e.g. pour utiliser x86_64_...)
# VERSION=x86_64_1.29.3
IMAGE_DOCKER=$REPO/${NAME}:${ARCH}_${VERSION}
echo Image docker : $IMAGE_DOCKER
# MQ
export HOST=mg-dev4.maple.maceroc.com
export HOST_MQ=mg-dev4
CERT_FOLDER=/home/mathieu/mgdev/certs
export MG_MQ_CAFILE=/certs/pki.millegrille.cert
export MG_MQ_CERTFILE=/certs/pki.fichiers.cert
export MG_MQ_KEYFILE=/certs/pki.fichiers.key
export MG_MQ_URL=amqps://$HOST_MQ:5673
export PORT=3021
# export DEBUG=millegrilles:fichiers:transformationsVideo,millegrilles:messages:*,millegrilles:fichiers:traitementMedia
export DEBUG=millegrilles:fichiers:*
docker run --rm -it \
--network host \
-v /home/mathieu/mgdev/certs:/certs \
-v /var/opt/millegrilles/consignation/grosfichiers:/var/opt/millegrilles/consignation/grosfichiers \
-e MG_MQ_CAFILE -e MG_MQ_CERTFILE -e MG_MQ_KEYFILE \
-e MG_MQ_URL -e HOST -e PORT \
-e DEBUG \
$IMAGE_DOCKER
| true
|
b062346bc4f204ce44e55beda59f276d072627db
|
Shell
|
jeeb/MPGDec
|
/config.sh
|
UTF-8
| 527
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
version=""
clean="no"
config_h="./src/config.h"
# Check options
for opt do
case "$opt" in
--clean)
clean="yes"
;;
esac
done
# Output config.h
if test "$clean" = "yes" ; then
cat > "$config_h" << EOF
#undef DGMPGDEC_GIT_VERSION
EOF
else
if [ -d ".git" ] && [ -n "`git tag`" ]; then
version="`git describe --tags`"
echo "$version"
echo "#define DGMPGDEC_GIT_VERSION \"$verision\"" > "$config_h"
else
echo "#undef DGMPGDEC_GIT_VERSION" > "$config_h"
fi
fi
| true
|
16e140de53895874f5122dd7aa664e44d208c478
|
Shell
|
cmccandless/ExercismSolutions-bash
|
/bob/bob.sh
|
UTF-8
| 623
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
yelling()
{
# contains letters?
[[ "$1" =~ [[:alpha:]] ]] &&
# all uppercase?
[ "$1" == "${1^^}" ] &&
echo '!Y'
}
asking()
{
# has trailing question mark?
[ "$1" != "${1%\?}" ] &&
echo '!A'
}
main() {
input="${1:-}"
# Strip trailing whitespace
input="${input%"${input##*[![:space:]]}"}"
case "$(yelling "$input")$(asking "$input")$input" in
'!Y!A'*) echo "Calm down, I know what I'm doing!";;
'!Y'*) echo "Whoa, chill out!";;
'!A'*) echo "Sure.";;
'') echo "Fine. Be that way!";;
*) echo "Whatever.";;
esac
}
main "$@"
| true
|
464f3f1c1d7cb15d771d4fd7461e59bbaae6712b
|
Shell
|
matiasozdy/randomscripts
|
/check_solr_replica.sh
|
UTF-8
| 488
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
#
### Check status replicas.
#
HOST="$1"
PORT="$2"
URL="http://${HOST}:${PORT}/solr/admin/replication/index.jsp"
MASTER=`curl -s ${URL} | grep "Replicatable Index Version" | awk {'print $5'}`
SLAVE=`curl -s ${URL} | grep "Index Version" | grep -v '<td>' | awk {'print $5'}`
#printf "Master ${MASTER}\n"
#printf "Slave ${SLAVE}\n"
if [ "${MASTER}" -eq "${SLAVE}" ]; then
echo "Replicacion OK"
exit 0
else
echo "Diff de replicacion `expr ${MASTER} - ${SLAVE}`"
exit 2
fi
| true
|
113f93eae77746e7b2b0c7e4846d34eae4722f0c
|
Shell
|
bendavieshe3/mac-scripts
|
/fby.sh
|
UTF-8
| 971
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#required for processing files with spaces
#change default file separator
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
if [ "$#" -gt 0 ]; then
fbypath="$1"
files="$1"
else
fbypath="./"
files="./*"
fi
regex=[12]{1}[09]{1}[0-9]{2}
for f in $files
do
#check to see if filename contains a likely year
if [[ $f =~ $regex ]]; then
#find year and determine the year folder
year=$BASH_REMATCH
yearFolder=$fbypath/$year
filename=${f##*/}
echo "Processing $f - found $year"
#make sure this is not a year filing directory
if [ ! -d $f ] || [[ ! $filename =~ ^$regex ]]; then
echo '-- not a file directory'
if [ ! -d $yearFolder ]; then
echo '-- making folder'
mkdir $yearFolder
fi
if [ ! -a $yearFolder/"$f" ]; then
echo "-- moving $filename"
mv "$f" "$yearFolder/$filename"
fi
else
echo "-- is a directory"
fi
else
echo "Processing $f - no match"
fi
done
#restore default file separator
IFS=$SAVEIFS
| true
|
790d2842c21cfa7e91c90f57875470d366d0942d
|
Shell
|
cha63506/ATP
|
/scripts/localservers.sh
|
UTF-8
| 290
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
TEXT_FIRST=1298
TEXT_LAST=1300
ATP_FIRST=1301
ATP_LAST=1305
for port in $(seq $TEXT_FIRST $TEXT_LAST); do
setsid tcphub $port text </dev/null >/dev/null 2>&1 &
done
for port in $(seq $ATP_FIRST $ATP_LAST); do
setsid tcphub $port atp </dev/null >/dev/null 2>&1 &
done
| true
|
c08c08cc871559b817d5dfe7e7f88717ce9ece25
|
Shell
|
mikerovers/handydockercommands
|
/bash/dockerremovecontainers
|
UTF-8
| 761
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
runningContainerCount="$(docker ps -q | wc -l)"
containerCount="$(docker ps -aq | wc -l)"
zero=0
options=("1: yes" "2: no")
if [ "$containerCount" -eq $zero ]; then
echo "There are no containers to remove."
exit;
fi
if [ "$runningContainerCount" -gt $zero ]; then
echo "There are still containers running."
echo "Do you want to stop all running containers?"
select yn in "${options[@]}"
do
case $yn in
"1: yes")
./dockerstopallcontainers
break;;
"2: no")
exit 0
;;
*) echo "Invalid options choose yes or no";;
esac
done
fi
echo "Removing all docker containers."
docker rm $(docker ps -aq)
exit 0
| true
|
e9e197e7c7c1023bff6a85ea99ef694c855373d6
|
Shell
|
neobuddy89/NX-initramfs
|
/sbin/ext/post-init.sh
|
UTF-8
| 11,123
| 3.28125
| 3
|
[] |
no_license
|
#!/sbin/busybox sh
BB=/sbin/busybox
$BB mount -t rootfs -o remount,rw rootfs;
$BB mount -o remount,rw /system;
# first mod the partitions then boot
$BB sh /sbin/ext/system_tune_on_init.sh;
if [ ! -d /data/.siyah ]; then
$BB mkdir -p /data/.siyah;
fi;
# reset config-backup-restore
if [ -f /data/.siyah/restore_running ]; then
rm -f /data/.siyah/restore_running;
fi;
# set sysrq to 2 = enable control of console logging levelL
echo "2" > /proc/sys/kernel/sysrq;
ccxmlsum=`md5sum /res/customconfig/customconfig.xml | awk '{print $1}'`
if [ "a$ccxmlsum" != "a`cat /data/.siyah/.ccxmlsum`" ]; then
rm -f /data/.siyah/*.profile;
echo "$ccxmlsum" > /data/.siyah/.ccxmlsum;
fi;
[ ! -f /data/.siyah/default.profile ] && cp -a /res/customconfig/default.profile /data/.siyah/default.profile;
[ ! -f /data/.siyah/battery.profile ] && cp -a /res/customconfig/battery.profile /data/.siyah/battery.profile;
[ ! -f /data/.siyah/performance.profile ] && cp -a /res/customconfig/performance.profile /data/.siyah/performance.profile;
[ ! -f /data/.siyah/extreme_performance.profile ] && cp -a /res/customconfig/extreme_performance.profile /data/.siyah/extreme_performance.profile;
[ ! -f /data/.siyah/extreme_battery.profile ] && cp -a /res/customconfig/extreme_battery.profile /data/.siyah/extreme_battery.profile;
$BB chmod -R 0777 /data/.siyah/;
. /res/customconfig/customconfig-helper;
read_defaults;
read_config;
# HACK: we have problem on boot with stuck service GoogleBackupTransport if many apps installed
# here i will rename the GoogleBackupTransport.apk to boot without it and then restore to prevent
# system not responding popup on after boot.
if [ -e /data/dalvik-cache/not_first_boot ]; then
mount -o remount,rw /system;
mv /system/app/GoogleBackupTransport.apk /system/app/GoogleBackupTransport.apk.off
fi;
(
# mdnie sharpness tweak
if [ "$mdniemod" == "on" && "$hook_intercept" == "on" ]; then
$BB sh /sbin/ext/mdnie-sharpness-tweak.sh;
touch /data/.nx_mdniemodon;
date > /data/.nx_mdniemodon;
else
if [ -e /data/.nx_mdniemodon ]; then
rm /data/.nx_mdniemodon;
fi
fi;
sleep 30;
echo "$scenario" > /sys/class/mdnie/mdnie/scenario;
echo "$mode" > /sys/class/mdnie/mdnie/mode;
)&
# dual core hotplug
echo "on" > /sys/devices/virtual/misc/second_core/hotplug_on;
echo "off" > /sys/devices/virtual/misc/second_core/second_core_on;
######################################
# Loading Modules
######################################
$BB chmod -R 755 /lib;
(
sleep 50;
$BB date > /data/nx_modules.log
echo " " >> /data/nx_modules.log;
# order of modules load is important
if [ "$j4fs_module" == "on" ]; then
echo "Loading J4FS Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/j4fs.ko >> /data/nx_modules.log 2>&1;
$BB mount -t j4fs /dev/block/mmcblk0p4 /mnt/.lfs
fi;
echo "Loading Si4709 Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/Si4709_driver.ko >> /data/nx_modules.log 2>&1;
if [ "$usbserial_module" == "on" ]; then
echo "Loading USB Serial Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/usbserial.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/ftdi_sio.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/pl2303.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$usbnet_module" == "on" ]; then
echo "Loading USB Net Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/usbnet.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/asix.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$cifs_module" == "on" ]; then
echo "Loading CIFS Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/cifs.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$eds_module" == "on" ]; then
echo "Loading EDS Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/eds.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$xpad_module" == "on" ]; then
echo "Loading XPAD Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/ff-memless.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/xpad.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$frandom_module" == "on" ]; then
echo "Loading FRANDOM Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/frandom.ko >> /data/nx_modules.log 2>&1;
$BB chmod 644 /dev/frandom >> /data/nx_modules.log 2>&1;
$BB chmod 644 /dev/erandom >> /data/nx_modules.log 2>&1;
mv /dev/random /dev/random.ori >> /data/nx_modules.log 2>&1;
mv /dev/urandom /dev/urandom.ori >> /data/nx_modules.log 2>&1;
ln /dev/frandom /dev/random >> /data/nx_modules.log 2>&1;
$BB chmod 644 /dev/random >> /data/nx_modules.log 2>&1;
ln /dev/erandom /dev/urandom >> /data/nx_modules.log 2>&1;
$BB chmod 644 /dev/urandom >> /data/nx_modules.log 2>&1;
fi;
echo "First Run completed ...." >> /data/nx_modules.log;
echo " *******************" >> /data/nx_modules.log;
sleep 50;
echo " " >> /data/nx_modules.log;
echo "Second Run starts ...." >> /data/nx_modules.log;
# order of modules load is important
if [ "$j4fs_module" == "on" ]; then
echo "Loading J4FS Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/j4fs.ko >> /data/nx_modules.log 2>&1;
$BB mount -t j4fs /dev/block/mmcblk0p4 /mnt/.lfs
fi;
echo "Loading Si4709 Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/Si4709_driver.ko >> /data/nx_modules.log 2>&1;
if [ "$usbserial_module" == "on" ]; then
echo "Loading USB Serial Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/usbserial.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/ftdi_sio.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/pl2303.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$usbnet_module" == "on" ]; then
echo "Loading USB Net Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/usbnet.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/asix.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$cifs_module" == "on" ]; then
echo "Loading CIFS Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/cifs.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$eds_module" == "on" ]; then
echo "Loading EDS Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/eds.ko >> /data/nx_modules.log 2>&1;
fi;
if [ "$xpad_module" == "on" ]; then
echo "Loading XPAD Module" >> /data/nx_modules.log;
$BB insmod /lib/modules/ff-memless.ko >> /data/nx_modules.log 2>&1;
$BB insmod /lib/modules/xpad.ko >> /data/nx_modules.log 2>&1;
fi;
date >> /data/nx_modules.log;
echo " " >> /data/nx_modules.log;
echo " " >> /data/nx_modules.log;
echo "Loaded Modules on boot:" >> /data/nx_modules.log;
echo " " >> /data/nx_modules.log;
$BB lsmod >> /data/nx_modules.log
)&
# enable kmem interface for everyone
echo "0" > /proc/sys/kernel/kptr_restrict;
# Cortex parent should be ROOT/INIT and not NXTweaks
nohup /sbin/ext/cortexbrain-tune.sh;
(
# Run init.d scripts if chosen
if [ $init_d == on ]; then
$BB sh /sbin/ext/run-init-scripts.sh;
fi;
)&
# disable debugging on some modules
if [ "$logger" == "off" ]; then
echo "0" > /sys/module/ump/parameters/ump_debug_level;
echo "0" > /sys/module/mali/parameters/mali_debug_level;
echo "0" > /sys/module/kernel/parameters/initcall_debug;
# echo "0" > /sys/module/lowmemorykiller/parameters/debug_level;
echo "0" > /sys/module/cpuidle_exynos4/parameters/log_en;
echo "0" > /sys/module/earlysuspend/parameters/debug_mask;
echo "0" > /sys/module/alarm/parameters/debug_mask;
echo "0" > /sys/module/alarm_dev/parameters/debug_mask;
echo "0" > /sys/module/binder/parameters/debug_mask;
echo "0" > /sys/module/xt_qtaguid/parameters/debug_mask;
fi;
# for ntfs automounting
mount -t tmpfs -o mode=0777,gid=1000 tmpfs /mnt/ntfs >> /data/.nx_ntfs 2>&1;
(
# Apps Install
$BB sh /sbin/ext/install.sh;
# EFS Backup
$BB sh /sbin/ext/efs-backup.sh;
)&
echo "0" > /tmp/uci_done;
chmod 666 /tmp/uci_done;
(
# custom boot booster
COUNTER=0;
while [ "`cat /tmp/uci_done`" != "1" ]; do
if [ "$COUNTER" -ge "10" ]; then
break;
fi;
pkill -f "com.gokhanmoral.stweaks.app";
echo "Waiting For UCI to finish";
sleep 10;
COUNTER=$(($COUNTER+1));
done;
# restore normal freq and cpu core values
echo "$scaling_min_freq" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq;
echo "$scaling_max_freq" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq;
echo "$scaling_governor" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor;
echo "$scheduler" > /sys/block/mmcblk0/queue/scheduler;
# HACK: restore GoogleBackupTransport.apk after boot.
if [ -e /data/dalvik-cache/not_first_boot ]; then
mv /system/app/GoogleBackupTransport.apk.off /system/app/GoogleBackupTransport.apk
else
/sbin/fix_permissions -l -r -v > /dev/null 2>&1;
chmod 777 -R /system/etc/init.d
touch /data/dalvik-cache/not_first_boot;
chmod 777 /data/dalvik-cache/not_first_boot;
fi;
# ROOTBOX fix notification_wallpaper
if [ -e /data/data/com.aokp.romcontrol/files/notification_wallpaper.jpg ]; then
chmod 777 /data/data/com.aokp.romcontrol/files/notification_wallpaper.jpg
fi;
while [ ! `cat /proc/loadavg | cut -c1-4` \< "3.50" ]; do
echo "Waiting For CPU to cool down";
sleep 10;
done;
sync;
sysctl -w vm.drop_caches=3
sync;
sysctl -w vm.drop_caches=1
sync;
)&
(
# stop uci.sh from running all the PUSH Buttons in stweaks on boot
$BB mount -o remount,rw rootfs;
$BB chown -R root:system /res/customconfig/actions/;
$BB chmod -R 6755 /res/customconfig/actions/;
$BB mv /res/customconfig/actions/push-actions/* /res/no-push-on-boot/;
$BB chmod 6755 /res/no-push-on-boot/*;
# change USB mode MTP or Mass Storage
$BB sh /res/uci.sh usb-mode ${usb_mode};
# apply NXTweaks settings
echo "booting" > /data/.siyah/booting;
chmod 777 /data/.siyah/booting;
pkill -f "com.gokhanmoral.stweaks.app";
nohup $BB sh /res/uci.sh restore;
echo "1" > /tmp/uci_done;
# restore all the PUSH Button Actions back to there location
$BB mount -o remount,rw rootfs;
$BB mv /res/no-push-on-boot/* /res/customconfig/actions/push-actions/;
pkill -f "com.gokhanmoral.stweaks.app";
# update cpu tunig after profiles load
# $BB sh /sbin/ext/cortexbrain-tune.sh apply_cpu update > /dev/null;
$BB rm -f /data/.siyah/booting;
DM=`ls -d /sys/block/loop*`;
for i in ${DM}; do
if [ -e $i/queue/rotational ]; then
echo "0" > ${i}/queue/rotational;
fi;
if [ -e $i/queue/iostats ]; then
echo "0" > ${i}/queue/iostats;
fi;
if [ -e $i/queue/nr_requests ]; then
echo "1024" > ${i}/queue/nr_requests;
fi;
done;
mount -o remount,rw /system;
mount -o remount,rw /;
setprop persist.sys.scrollingcache 3
setprop windowsmgr.max_events_per_sec 300
setprop ro.max.fling_velocity 12000
setprop ro.min.fling_velocity 8000
# correct oom tuning, if changed by apps/rom
$BB sh /res/uci.sh oom_config_screen_on $oom_config_screen_on;
$BB sh /res/uci.sh oom_config_screen_off $oom_config_screen_off;
# correct mDNIe mode and scenario
pkill -f "com.cyanogenmod.settings.device";
echo "$scenario" > /sys/class/mdnie/mdnie/scenario;
echo "$mode" > /sys/class/mdnie/mdnie/mode;
echo "$pwm_val" > /sys/vibrator/pwm_val;
echo "Done Booting" > /data/nx-boot-check;
date >> /data/nx-boot-check;
)&
| true
|
798694341c0277e248805533805aeca2c340ec08
|
Shell
|
maxsu/antergos-packages
|
/antergos/antergos-wallpapers/PKGBUILD
|
UTF-8
| 976
| 2.8125
| 3
|
[] |
no_license
|
# Maintainer: Alexandre Filgueira <faidoc@gmail.com>
pkgname=antergos-wallpapers
pkgver=0.7
pkgrel=1
pkgdesc='The default wallpapers for Antergos'
arch=('any')
url='http://www.antergos.com'
license=('CCPL:by-nc-sa')
source=("http://antergos.com/${pkgname}-${pkgver}.zip")
md5sums=('cd09fe9d1dd72af28ba334f9e63e6b95')
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
install -d "${pkgdir}/usr/share/antergos/wallpapers"
install -m644 *.jpg "${pkgdir}/usr/share/antergos/wallpapers/"
install -m644 *.png "${pkgdir}/usr/share/antergos/wallpapers/"
install -d "${pkgdir}/usr/share/gnome-background-properties"
install -m644 antergos-backgrounds-4-3.xml "${pkgdir}/usr/share/gnome-background-properties/"
# Cinnamon
_bg_path="${pkgdir}/usr/share/cinnamon-background-properties/antergos.xml"
install -d "${pkgdir}/usr/share/cinnamon-background-properties"
cp --no-preserve=ownership antergos-backgrounds-4-3.xml "${_bg_path}"
sed -i 's|gnome-wp-list|cinnamon-wp-list|g' "${_bg_path}"
}
| true
|
bf5da2e0005bae8311e09dce0a001d35dd6da718
|
Shell
|
forty/play.it
|
/play.it-2/src/icons.sh
|
UTF-8
| 3,005
| 3.859375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
# extract .png or .ico files from given file
# USAGE: extract_icon_from $file[…]
# NEEDED VARS: PLAYIT_WORKDIR
# CALLS: liberror
extract_icon_from() {
for file in "$@"; do
local destination="$PLAYIT_WORKDIR/icons"
mkdir --parents "$destination"
case "${file##*.}" in
('exe')
if [ "$WRESTOOL_NAME" ]; then
WRESTOOL_OPTIONS="--name=$WRESTOOL_NAME"
fi
wrestool --extract --type=14 $WRESTOOL_OPTIONS --output="$destination" "$file"
;;
('ico')
icotool --extract --output="$destination" "$file" 2>/dev/null
;;
('bmp')
local filename="${file##*/}"
convert "$file" "$destination/${filename%.bmp}.png"
;;
(*)
liberror 'file extension' 'extract_icon_from'
;;
esac
done
}
# create icons layout
# USAGE: sort_icons $app[…]
# NEEDED VARS: APP_ICON_RES (APP_ID) GAME_ID PKG PKG_PATH
sort_icons() {
for app in $@; do
testvar "$app" 'APP' || liberror 'app' 'sort_icons'
local app_id
if [ -n "$(eval printf -- '%b' \"\$${app}_ID\")" ]; then
app_id="$(eval printf -- '%b' \"\$${app}_ID\")"
else
app_id="$GAME_ID"
fi
local icon_res="$(eval printf -- '%b' \"\$${app}_ICON_RES\")"
local pkg_path="$(eval printf -- '%b' \"\$${PKG}_PATH\")"
for res in $icon_res; do
path_icon="$PATH_ICON_BASE/${res}x${res}/apps"
mkdir --parents "${pkg_path}${path_icon}"
for file in "$PLAYIT_WORKDIR"/icons/*${res}x${res}x*.png; do
mv "$file" "${pkg_path}${path_icon}/${app_id}.png"
done
done
done
}
# extract and sort icons from given .ico or .exe file
# USAGE: extract_and_sort_icons_from $app[…]
# NEEDED VARS: APP_ICON APP_ICON_RES (APP_ID) GAME_ID PKG PKG_PATH PLAYIT_WORKDIR
# CALLS: extract_icon_from liberror sort_icons
extract_and_sort_icons_from() {
local app_icon
local pkg_path="$(eval printf -- '%b' \"\$${PKG}_PATH\")"
for app in $@; do
testvar "$app" 'APP' || liberror 'app' 'sort_icons'
if [ "$ARCHIVE" ] && [ -n "$(eval printf -- '%b' \"\$${app}_ICON_${ARCHIVE#ARCHIVE_}\")" ]; then
app_icon="$(eval printf -- '%b' \"\$${app}_ICON_${ARCHIVE#ARCHIVE_}\")"
export ${app}_ICON="$app_icon"
else
app_icon="$(eval printf -- '%b' \"\$${app}_ICON\")"
fi
if [ ! "$WRESTOOL_NAME" ] && [ -n "$(eval printf -- '%b' \"\$${app}_ICON_ID\")" ]; then
WRESTOOL_NAME="$(eval printf -- '%b' \"\$${app}_ICON_ID\")"
fi
extract_icon_from "${pkg_path}${PATH_GAME}/$app_icon"
unset WRESTOOL_NAME
if [ "${app_icon##*.}" = 'exe' ]; then
extract_icon_from "$PLAYIT_WORKDIR/icons"/*.ico
fi
sort_icons "$app"
rm --recursive "$PLAYIT_WORKDIR/icons"
done
}
# move icons to the target package
# USAGE: move_icons_to $pkg
# NEEDED VARS: PATH_ICON_BASE PKG
move_icons_to() {
local source_path="$(eval printf -- '%b' \"\$${PKG}_PATH\")"
local destination_path="$(eval printf -- '%b' \"\$${1}_PATH\")"
(
cd "$source_path"
cp --link --parents --recursive "./$PATH_ICON_BASE" "$destination_path"
rm --recursive "./$PATH_ICON_BASE"
rmdir --ignore-fail-on-non-empty --parents "./${PATH_ICON_BASE%/*}"
)
}
| true
|
b31565dcc0fb1693ab72a450e3e6535a7e6b8ce2
|
Shell
|
Vanders/borg
|
/script/install.sh
|
UTF-8
| 6,468
| 3.171875
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
#
# ___________
# /-/_'/-/_/-/| B O R G
# /'-/-_'/-_//|| << https://github.com/agh/borg >>
# /__________/|/|
# |'|_'='-]:+|/|| Sit back and relax, human. Resistance is futile.
# |-+-|.|_'-'||// This should be a painless process and before long this
# |['.[:!+-'=|// system will be a functional part of the collective.
# |='!+|-:]|-|/
# ----------
#
# Author: Alex Howells <alex@howells.me>
# Copyright (c) 2014, Aeode Ltd <hello@aeode.com>
# Please see the LICENSE file in the repository root for further information.
#
function bold() {
bold=`tput bold`
normal=`tput sgr0`
echo "${bold}$1${normal}"
}
function notify() {
echo "==> $1"
}
function subnotify() {
echo " $1"
}
cat << __EOF__
$(tput setaf 4) ___________
$(tput setaf 4) /-/_'/-/_/-/| $(tput setaf 1) B O R G
$(tput setaf 4) /'-/-_'/-_//|| $(tput setaf 1) << https://github.com/agh/borg >>
$(tput setaf 4) /__________/|/| $(tput setaf 2)
$(tput setaf 4) |'|_'='-]:+|/|| $(tput setaf 2) Sit back and relax, human. Resistance is futile.
$(tput setaf 4) |-+-|.|_'-'||// $(tput setaf 2) This should be a painless process and before long this
$(tput setaf 4) |['.[:!+-'=|// $(tput setaf 2) system will be a functional part of the collective.
$(tput setaf 4) |='!+|-:]|-|/
$(tput setaf 4) ----------
$(tput sgr0)
__EOF__
bold " Ready to get started? We'll have you ready to assimilate in just a tick."
bold " We need your password so packages that require sudo can be installed."
# Ensure that sudo isn't going to prompt for a password in a weird place.
sudo -k && sudo -p " Password: " -l 2>&1 >/dev/null && echo "" || exit 1
#
# Extend sudo tickets to last for 30 minutes.
#
if [ -f /private/etc/sudoers.borg ]; then
notify '👍 Looks like sudo is already hot to trot with Borg!'
else
notify 'Extending the sudo timeout, or installs will error out.'
sudo cat << __EOF__ | sudo tee -a /private/etc/sudoers >/dev/null 2>&1
# Borg - Facilitates installs without sudo ticket expiring and causing errors.
Defaults !fqdn,insults,!lecture,timestamp_timeout=30
__EOF__
# Making sure we have a ticket good for 30 minutes.
sudo -v
sudo touch /private/etc/sudoers.borg
fi
#
# Xcode Command Line Tools
#
if ! /usr/bin/xcode-select -p >/dev/null 2>&1; then
notify 'Installing the Xcode Command Line Tools. When this completes we shall continue.'
xcode-select --install
for i in `seq 1 360`; do
if ! /usr/bin/xcode-select -p >/dev/null 2>&1; then
sleep 5
else
break
fi
done
else
notify '👍 Found the Xcode Command Line Tools!'
fi
# Sanity check on CLT install completion...
sleep 2
/usr/bin/xcode-select -p >/dev/null 2>&1 || exit 17
#
# ChefDK
#
chefdk_version="0.3.2-1"
(test -x /opt/chefdk/bin/chef-solo &&
(/opt/chefdk/bin/chef --version | grep 0.3.2 2>&1 >/dev/null &&
(notify "👍 Found the Chef Development Kit!"))) || {
# If we got this far, then we need to install or upgrade ChefDK.
notify "Installing the Chef Development Kit (${chefdk_version})"
# This naively assumes we are running on OSX.
platform="mac_os_x"
release="10.8"
# Perform the download.
fname="chefdk-${chefdk_version}.dmg"
url="https://opscode-omnibus-packages.s3.amazonaws.com/$platform/$release/x86_64/$fname"
subnotify "Downloading ${url}..."
cd /tmp && curl -L -O $url
# Perform the install
test "$platform" = "mac_os_x" && {
hdiutil attach /tmp/$fname 2>&1 >/dev/null && echo "mounted"
sudo installer -package "/Volumes/Chef Development Kit/chefdk-${chefdk_version}.pkg" -target /
hdiutil detach "/Volumes/Chef Development Kit" 2>&1 >/dev/null && rm /tmp/$fname
}
}
#
# Homebrew
#
if [ ! -d /usr/local/.git ]; then
notify "🚄 Doesn't look like you have Homebrew. I'll clone that down for you!"
sudo rm -rf /usr/local
sudo git clone https://github.com/mxcl/homebrew /usr/local
else
notify "👍 Looks like you already have Homebrew installed!"
fi
if [ ! -d /opt/homebrew-cask ]; then
notify "👎 I can't find the necessary directories for Homebrew. Gonna help you create them."
sudo mkdir -p /Library/Caches/Homebrew /opt/homebrew-cask
else
notify "👍 Looks like all the extra directories for Homebrew exist!"
fi
if [ $(stat -f "%Su" /usr/local) != "$USER" ]; then
notify "😠 Permissions on /usr/local look wrong. Gonna help you fix that."
sudo chown -R $USER:admin /usr/local /Library/Caches/Homebrew /opt/homebrew-cask
sudo chmod 775 /usr/local /Library/Caches/Homebrew /opt/homebrew-cask
else
notify "👍 Permissions on necessary directories all look good!"
fi
#
# Borg
#
if [ ! -d $HOME/.borg ]; then
notify "👎 Looks like we need to clone the rest of Borg. I'll do that now!"
BORG_ROOT="${HOME}/.borg"
# Let's make some directories to get the basics installed.
BORG_DIRECTORIES="logs src"
for directory in $BORG_DIRECTORIES; do
mkdir -p $BORG_ROOT/$directory
done
# Pull down source from GitHub for the first time.
git clone https://github.com/agh/borg $BORG_ROOT/src
else
notify "👍 I've found Borg and thus you can 'borg update' to get newer code."
fi
# Symlinks so that stuff appears in PATH.
BORG_TOOLS="borg"
for tool in $BORG_TOOLS; do
if [ ! -f /usr/local/bin/$tool ]; then
ln -sf $BORG_ROOT/src/script/$tool /usr/local/bin/$tool
fi
done
#
# Validation
# Also known as 'checks and balances', or making sure everything worked!
#
type chef-solo >/dev/null 2>&1 || {
echo >&2 "Expected to find chef-solo but it is missing. Bummer!"
exit 1
}
type berks >/dev/null 2>&1 || {
echo >&2 "Expected to find Berkshelf but it is missing. Bummer!"
exit 1
}
if [ $(stat -f "%Su" /usr/local) != "$USER" ]; then
echo >&2 "Expected your user to own /usr/local, and you need to fix this!"
exit 1
fi
if [ ! -d /opt/homebrew-cask ] || [ $(stat -f "%Su" /opt/homebrew-cask) != "$USER" ]; then
echo >&2 "Expected your user to own /opt/homebrew-cask, and you need to fix this!"
exit 1
fi
type borg >/dev/null 2>&1 || {
echo >&2 "Expected to find Borg but it is missing. Bummer!"
exit 1
}
cat << __EOF__
$(bold "System is now prepared for assimilation. Type 'borg help' for info.")
__EOF__
| true
|
534e98258eeaa6fb6a41035b579523c1382d6980
|
Shell
|
OpenMandrivaSoftware/docker-abf
|
/abf-genhdlists/entrypoint.sh
|
UTF-8
| 2,308
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
# Modified by tpgxyz@gmail.com
# Modified by bero@lindev.ch
# Call this script with path where createrepo_c will run
# and call with regenerate paramater to build repodata
# from scratch.
run_createrepo() {
REPOSITORY="$1"
REGENERATE="$2"
[ ! -d "${REPOSITORY}" ] && printf '%s\n' "Directory ${REPOSITORY} does not exist. Exiting." && exit 1
printf '%s\n' "Starting regenerating repodata in ${REPOSITORY}"
if [ ! -e "${REPOSITORY}"/media_info ] || [ "$2" = 'regenerate' ]; then
printf '%s\n' "Regenerating repodata from scratch in ${REPOSITORY}"
rm -rf "${REPOSITORY}"/media_info
/usr/bin/genhdlist2 -v --clean --nolock --allow-empty-media --versioned --xml-info --xml-info-filter='.lzma:lzma -0 --text' --no-hdlist ${REPOSITORY}
rc=$?
else
printf '%s\n' "Regenerating and updating media_info in ${REPOSITORY}"
XZ_OPT="-7 -T0" /usr/bin/genhdlist2 -v --nolock --allow-empty-media --versioned --synthesis-filter='.cz:xz -7 -T0' --xml-info --xml-info-filter='.lzma:xz -7 -T0' --no-hdlist --merge --no-bad-rpm ${REPOSITORY}
rc=$?
if [ "${rc}" != '0' ]; then
printf '%s\n' "Failed updating repodata in ${REPOSITORY}, trying regeneration from scratch"
run_createrepo "${REPOSITORY}" "regenerate"
return
fi
fi
if [ "${rc}" != '0' ]; then
printf '%s\n' "Failed regenerating repodata in ${REPOSITORY}"
else
printf '%s\n' "Finished regenerating repodata in ${REPOSITORY}"
fi
if [ -e "${REPOSITORY}"/media_info ]; then
[ $(stat -c "%U" "${REPOSITORY}"/media_info ) != 'root' ] && chown root:root "${REPOSITORY}"/media_info
[ $(stat -c "%a" "${REPOSITORY}"/media_info ) != '755' ] && chmod 0755 "${REPOSITORY}"/media_info
fi
}
if [ -n "$1" ]; then
run_createrepo "$1" "$2"
else
REPOSITORY="/share/platforms/3.0/repository"
[ ! -d "${REPOSITORY}" ] && printf '%s\n' "Directory ${REPOSITORY} does not exist. Exiting." && exit 1
for i in i586 x86_64 SRPMS; do
for j in main contrib non-free restricted debug_main debug_contrib debug_non-free debug_restricted; do
for k in release updates testing; do
run_createrepo "${REPOSITORY}/${i}/${j}/${k}"
done
done
done
fi
exit 0
| true
|
8467c1fd24a033556957e6a1520c9d82be99def3
|
Shell
|
dtlnor/COMP3211
|
/run.sh
|
UTF-8
| 1,034
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
gen_gcov() {
gcc bugAlgo.c -fprofile-arcs -ftest-coverage -o bugAlgo
./bugAlgo $1 $2 $3 $4 $5 > bugAlgoFile.csv
bugVar=$(cat bugAlgoFile.csv)
gcc trueAlgo.c -o trueAlgo
./trueAlgo $1 $2 $3 $4 $5 > trueAlgoFile.csv
trueVar=$(cat trueAlgoFile.csv)
gcov bugAlgo.c
gcc gen_coverage.c -o gen_coverage
./gen_coverage "${bugVar[@]}" "${trueVar[@]}"
#nano bugAlgo.c.gcov
}
#main
rm -rf testcase.csv bugAlgoResult.csv trueAlgoResult.csv coverageResult.csv TarantulaResult.csv CrossTabResult.csv
#gen testcase
gcc gen_testCase.c -o gen_testCase
./gen_testCase
input="./testcase.csv"
while IFS= read -r var
do
IFS=',' # , is set as delimiter
read -ra ADDR <<< "$var" # var is read into an array as tokens separated by IFS
gen_gcov "${ADDR[0]}" "${ADDR[1]}" "${ADDR[2]}" "${ADDR[3]}" "${ADDR[4]}"
done < "$input"
rm -rf bugAlgo trueAlgo gen_testCase bugAlgo.gcda bugAlgo.gcno trueAlgoFile.csv bugAlgoFile.csv gen_coverage #bugAlgo.c.gcov
bash runDebug.sh
bash moveFile.sh
echo end
| true
|
69f0ba4ec5d547b90c6d9bd658fae184c764591d
|
Shell
|
dolsem/shell-collection
|
/oh-my-zsh/install.zsh
|
UTF-8
| 1,070
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
###########################################################################
# Script Name : install.zsh
# Description : Installs Oh My Zsh and its plugins.
# Author : Denis Semenenko
# Email : dols3m@gmail.com
# Date written: January 2019
#
# Distributed under MIT license
# Copyright (c) 2019 Denis Semenenko
###########################################################################
OHMYZSH_DIR=${0:a:h}
PROJECT_DIR="${OHMYZSH_DIR}/.."
SCRIPT_URL="https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh"
source ${PROJECT_DIR}/utils/string.bash
source ${PROJECT_DIR}/utils/term.bash
if [[ $1 == "--return-control" ]]; then
ZSH_EXEC_ON_START=`escape_fslash "${PROJECT_DIR}/install.bash \"$2\" --installed-oh-my-zsh"`
else
ZSH_EXEC_ON_START=`escape_fslash "${OHMYZSH_DIR}/install_plugins.zsh --check"`
fi
RUN_CMD="echo 'if [[ -n \$ZSH_EXEC_ON_START ]]; then eval \$ZSH_EXEC_ON_START; fi' >> ~\/.zshrc \&\& ZSH_EXEC_ON_START='$ZSH_EXEC_ON_START' env zsh -l"
sh -c "$(curl -fsSL $SCRIPT_URL | sed "s/env zsh -l/$RUN_CMD/g")" | dim
| true
|
13d01c97db2800d4e614e9fa6ecd62a52661db91
|
Shell
|
steveyken/bash-dot-files
|
/prompt-nodejs/main.sh
|
UTF-8
| 270
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
NPM_PACKAGES="~/.npm-packages/"
NODE_PATH="$NPM_PACKAGES/lib/node_modules:$NODE_PATH"
PATH="$NPM_PACKAGES/bin:$PATH"
unset MANPATH
MANPATH="$NPM_PACKAGES/share/man:$(manpath)"
ORIGINAL_PATH=$PATH
function build_node_path {
export PATH=$(npm bin):$ORIGINAL_PATH
}
| true
|
2eb167a5475ed970bb4de199c1cc7a37d9768bb6
|
Shell
|
vova557/cliff
|
/integration-tests/openstackclient-tip.sh
|
UTF-8
| 599
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -x
set -e
envdir=$1
# The source for the client library is checked out by pip because of
# the deps listed in tox.ini, so we just need to move into that
# directory.
# NOTE(tonyb): tools/tox_install.sh will place the code in 1 of 2 paths
# depending on whether zuul-cloner is used, so try each possible location
cd $envdir/src/python-openstackclient/ || \
cd $envdir/src/openstack/python-openstackclient/
pip install -r test-requirements.txt
# Force a known hash seed value to avoid sorting errors from tox
# giving us a random one.
export PYTHONHASHSEED=0
python setup.py testr
| true
|
4881baa02fe2fcf3bb3b96b9d06c29b51ecd54a0
|
Shell
|
jmmarzolino/CCXXIRAD
|
/statistics/count003_trim.sh
|
UTF-8
| 1,694
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash -l
#SBATCH --output=/rhome/jmarz001/bigdata/CCXXIRAD/Scripts/retained.stdout
#SBATCH --mail-user=jmarz001@ucr.edu
#SBATCH --mail-type=ALL
#SBATCH --job-name='retained'
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=10G
#SBATCH --time=1:00:00
#SBATCH -p intel
#Get the number of reads from before and after trimming
###BEFORE
#set and move to working directory
PRE=/rhome/jmarz001/bigdata/CCXXIRAD/barcode/quality
cd $PRE/
printf "Filename \t Total Sequences \n" > $PRE/pre_fileseqs.txt
#make a list of all the directory names so they can be cd'd into in turn
folders=`ls -d *fastqc/`
#move into each directory from the list
#copy directory name and Total Sequences line from fastqc data file into fileseqs
for dir in $folders; do
cd $dir
A=`grep "Filename" fastqc_data.txt | cut -f2`
B=`grep "Total Sequences" fastqc_data.txt | cut -f2`
printf "$A\t$B\n" >> $PRE/pre_fileseqs.txt
cd ..
done
###AFTER
#set and move to new working dir
POST=/rhome/jmarz001/bigdata/CCXXIRAD/trim/fastqc
cd $POST
printf "Filename \t Total Sequences \n" > $POST/post_fileseqs.txt
#make a list of all the directory names so they can be cd'd into in turn
folders=`ls -d *fastqc/`
#move into each directory from the list
#copy directory name and Total Sequences line from fastqc data file into fileseqs
for dir in $folders; do
cd $dir
A=`grep "Filename" fastqc_data.txt | cut -f2`
B=`grep "Total Sequences" fastqc_data.txt | cut -f2`
printf "$A\t$B\n" >> $POST/post_fileseqs.txt
cd ..
done
#Alternative idea
#strip.dirNAME=name
#cp fastqc_data.txt > ../name_data.txt
# take any column and paste them as columns into
#paste < (cut -f1,5,7 file)
#paste < (cut -f5 file) < (cut -f50 file) > outfile
| true
|
ccddcf1bf1a51b994e8f9d661d4a4268633cc735
|
Shell
|
scruwys/dotfiles
|
/install/pyenv.sh
|
UTF-8
| 493
| 3.296875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if ! is-executable curl -o ! is-executable git; then
echo "Skipped: pyenv (missing: curl and/or git)"
return
fi
curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash
cat <<EOT >> ~/.zshrc
# pyenv configuration
export PATH="$HOME/.pyenv/shims:$PATH"
export PATH="$HOME/.pyenv/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
EOT
versions=(
"2.7.1"
"3.6.5"
)
pyenv install "${versions[@]}"
pyenv global 3.6.5
| true
|
e5b400a5effe5fbb571c2ac5dc6dd3cd5629a892
|
Shell
|
guadalinex-archive/guadalinex-2005
|
/trunk/generation_system/live/initramfs-tools/docs/example_script
|
UTF-8
| 2,704
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# This script is run inside of the initramfs environment during the
# system boot process. It is installed there by 'mkinitramfs'. The
# package that owns it may opt to install it in either an appropriate
# location under "/usr/share/initramfs-tools/scripts/", or a similar
# location under "/etc/mkinitramfs/scripts/", depending upon whether
# it should be considered to be a user modifiable conffile or not.
#
# TODO: How do we deal with the case where the package that installed
# this has been removed but not purged, if we always arbitrarily
# copy all of these scripts into the initramfs?
#
# * The available toolset is limited inside this environment...
#
# TODO: document that toolset in the man page.
#
# * /dev, /proc, and /sys are already mounted. / is a ?? ro/rw
# filesystem... etc. more documentation.
#
# * It is expected that /proc and /sys will be umounted before
# changing over to the real root file system, so you must not keep
# any files open on them beyond these scripts.
#
# * You may like to strip these documentation comments from this
# example if you take it for a template, to save a little space in
# the initramfs, since nobody will ever read it from inside of
# there anyhow.
#
#
# The environment contains at least the following variables:
#
# TODO: Decide what environment variables are meaningful and defined
# in this context, then document them as part of the interface.
#
# Because this script will be run as a full separate process, rather
# than sourced inside the context of the driver script, if it needs to
# pass information to another script that may run after it, it must do
# so by writing data to a file location known to both scripts. Simply
# setting an environment variable will not work.
#
#
# List the soft prerequisites here. This is a space separated list of
# names, of scripts that are in the same directory as this one, that
# must be run before this one can be.
#
PREREQ=""
prereqs()
{
echo "$PREREQ"
}
case $1 in
# get pre-requisites
prereqs)
prereqs
exit 0
;;
esac
# Do the work here.
echo "Got here!"
# Handle an error:
if [ -n "$an_error_occured" ];
then
#
# TODO: Do we need 'warn()', 'error()', and/or 'fatal()' for this?
# I think we ultimately do, and that they need to be in their own
# well-documented location so that an overlay can override them.
# Think 'usplash' progress updates.
#
echo "An error occured in $0: $an_error_occured" >&2
exit 1
#
# TODO: Decide if different error codes are meaningful, what they
# mean, and what the semantics of them are wrt 'init' pass
# or panic. Consider naming the error values with mnemonic
# symbols rather than magic numbers.
#
fi
exit 0
| true
|
0107c41d08bd4fe09705ce897f4c1bf476f43942
|
Shell
|
suban244/dotfiles
|
/setup.sh
|
UTF-8
| 262
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/zsh
# Ubuntu
# sudo apt-get install vim tmux nvim
# sudo snap install nvim
# Arch
# sudo pacman -S vim tmux nvim
for file in .vimrc .zshrc .tmux.conf
do
ln -s $file "~/${file}"
done
for folder in nvim
do
ln -s $folder "~/.config/${folder}"
done
| true
|
c102ac787ac5949d8563a16e5ff86c9c98930fd9
|
Shell
|
hikouki/private-docker-images
|
/ubuntu/apache-php/run.sh
|
UTF-8
| 530
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
chown www-data:www-data /app -R
usermod -u 1000 www-data
if [ "$ALLOW_OVERRIDE" = "**False**" ]; then
unset ALLOW_OVERRIDE
else
sed -i "s/AllowOverride None/AllowOverride All/g" /etc/apache2/apache2.conf
a2enmod rewrite
fi
if [ "$DOCUMENT_ROOT" = "**False**" ]; then
unset DOCUMENT_ROOT
else
sed -i "s'DocumentRoot /var/www/html'DocumentRoot ${DOCUMENT_ROOT}'g" /etc/apache2/sites-available/000-default.conf
fi
source /etc/apache2/envvars
tail -F /var/log/apache2/* &
exec apache2 -D FOREGROUND
| true
|
5aa855f5e09fbcbd65e4561695f4bf6b85e470f5
|
Shell
|
ValeryEMalakhov/spark-nginx-logs-handler
|
/injection/cron/rename_and_move_logs.sh
|
UTF-8
| 472
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
LOG_PATH=$1
TIMESTAMP=$(date -u '+%s%N');
RAND=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1);
SHA=$(echo $RAND | sha256sum);
NEW_FILE_NAME=${TIMESTAMP}_${SHA%" -"}.log
#Rename and move log file
mv $LOG_PATH/access.log $LOG_PATH/nginx_logs_archive/${NEW_FILE_NAME}
#Re-run logs
docker exec -i docker-nginx bash -c 'kill -USR1 `cat /var/run/nginx.pid`'
#Archive the log file
cd $LOG_PATH/nginx_logs_archive
gzip -rm ${NEW_FILE_NAME}
| true
|
b3568dce0aa74325e8302bb0ef0c45cc1f72db9c
|
Shell
|
code4tots/jj
|
/x
|
UTF-8
| 1,435
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# source this from your bashrc.
### CUSTOMIZABLE VARIABLES
PATH_TO_JJ=~/git/hub/jj
PATH_TO_ANTLR4=/usr/local/lib/antlr-4.5-complete.jar
PATH_TO_ANDROID_SDK=~/git/pkg/android-sdk-linux
### END CUSTOMIZATIONS
capitalize() {
echo $(tr '[:lower:]' '[:upper:]' <<< ${1:0:1})${1:1}
}
lowercase() {
echo $1 | tr '[:upper:]' '[:lower:]'
}
jj_build() {
(cd $PATH_TO_JJ && java -jar $PATH_TO_ANTLR4 -Dlanguage=Python3 Jj.g4)
}
jj_clean() {
(cd $PATH_TO_JJ && rm -rf \
__pycache__ \
JjLexer.py JjLexer.tokens JjListener.py JjParser.py Jj.tokens \
*.class)
}
create_android_project() {
$PATH_TO_ANDROID_SDK/tools/android create project \
--target "android-22" \
--path ./"$1" \
--package com.mtots.jj \
--activity MainActivity
}
create_project() {
([ -d "$1" ] || create_android_project "$1") && \
cp $PATH_TO_JJ/JjRuntime.java "$1"/src/com/mtots/jj/ && \
cp $PATH_TO_JJ/MainActivity.java "$1"/src/com/mtots/jj/ && \
cp $PATH_TO_JJ/AndroidManifest.xml "$1"/
}
jj_run() {
if ! [[ "$1" = *.jj ]]; then
echo "filename must end in .jj"
return 1
fi
local base=$(basename "$1" .jj) && \
local name=$(capitalize $base) && \
(cd $PATH_TO_JJ && ([ -f JjParser.py ] || jj_build)) && \
create_project $name && \
cat $PATH_TO_JJ/Script.java | sed s/%s/"$(cat "$1" | (cd $PATH_TO_JJ && python3 jj.py simp))"/ > \
"$name"/src/com/mtots/jj/Script.java && \
(cd $name && ant debug && ant -d installd)
}
| true
|
9891e77268b27c31d9390cf52b79928fce501291
|
Shell
|
a1kaid/hunter
|
/docker/update_hunter.sh
|
UTF-8
| 1,320
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#删除正在运行的其他容器,redis,rabbitmq,mysql容器不动,因为里面保存了数据
#是否包含
contains(){
str=$1
array=$2
result=0
for item in $array; do
if [ $str == "$item" ]; then #判断字符串是否相等,注意前后要有空格,否则变为赋值语句
result=1
break
fi
done
return $result
}
#停止容器
removeContainers(){
dataContainers=("/hunter-mysql" "/hunter-rabbitmq" "/hunter-redis")
containers=`docker inspect -f '{{.Name}}' $(docker ps -aq)|grep "hunter"`
for container in $containers;do
contains $container "${dataContainers[*]}"
if [ $? == 0 ];then
docker stop $container > /dev/null 2>&1
docker rm $container > /dev/null 2>&1
echo "删除容器"$container"成功"
fi
done
}
#删除镜像
removeImages(){
dataImages=("bsmali4/hunter-mysql:2.0" "bsmali4/hunter-rabbitmq:2.0" "bsmali4/hunter-redis:2.0")
images=`docker images|grep "hunter"|awk {'print $1":"$2'}`
for image in $images;do
contains $image "${dataImages[*]}"
if [ $? == 0 ];then
docker rmi $image > /dev/null 2>&1
echo "删除镜像"$image"成功"
fi
done
}
removeContainers
removeImages
| true
|
803b119a54323b511c83d4f7ac41c91cc9740aa7
|
Shell
|
ksowmya/perfscripts
|
/ACS40/advanced_zone_rvr/createUserAccount.sh
|
UTF-8
| 1,351
| 2.640625
| 3
|
[] |
no_license
|
#
# Copyright (C) 2011 Cloud.com, Inc. All rights reserved.
#
mgmtServer=$1
count=$2
noId="9384dab9-d19d-4b2d-bf28-d545787dcaeb"
zoneid="ec7df848-5880-4b81-8e45-e2d8b8382414"
#count=$(($count+4004))
echo $count
for name in `seq 1 $count`
#for name in `seq 4001 $count`
do
account_query="GET http://$mgmtServer/client/?command=createAccount&accounttype=0&email=simulator%40simulator.com&username=test$name&firstname=first$name&lastname=last$name&password=5f4dcc3b5aa765d61d8327deb882cf99&account=test$name&domainid=1 HTTP/1.1\n\n"
echo -e $account_query | nc -v -w 120 $mgmtServer 8096
#createNetwork_query="GET http://$mgmtServer/client/?command=createNetwork&networkOfferingId=$noId&name=test$name&displayText=test$name&zoneId=1&&account=test$name&domainid=1 HTTP/1.0\n\n"
createNetwork_query="GET http://$mgmtServer/client/?command=createNetwork&networkOfferingId=$noId&name=test$name&displayText=test$name&zoneId=$zoneid&account=test$name&domainid=1 HTTP/1.0\n\n"
createNetwork_out=$(echo -e $createNetwork_query | nc -v -w 120 $mgmtServer 8096)
network_id=$(echo $createNetwork_out | sed 's/\(.*<id>\)\(.*\)\(<\/id>.*\)/\2/g')
echo $network_id
#updateResource_query="GET http://$mgmtServer/client/?command=updateResourceLimit&domainid=1&account=test$name&resourceType=0&max=50 HTTP/1.1\n\n"
#echo -e $updateResource_query | nc -v -w 120 $mgmtServer 8096
done
| true
|
91efdcf351c27224fef0ed99274610031e8e3a1d
|
Shell
|
alphaemmeo/private2public
|
/pre-commit
|
UTF-8
| 533
| 3.546875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
current_branch="$(git branch | grep -e "^*" | sed -e 's/^* //')"
if [ ! -f "${GIT_DIR}/../desert.gitignore" ]; then
echo "ERROR: desert.gitignore not found!"
exit 1
fi
DIR_LIST_FROM_FILE=$(cat ${GIT_DIR}/../desert.gitignore)
if [ "${current_branch}" = "public_release" ]; then
for dir in ${DIR_LIST_FROM_FILE}; do
if [ -e ${dir} ]; then
git rm -r --cach ${dir}
rm -rf ${dir}
else
echo "-- ${dir} not found!!"
fi
done
echo "------"
fi
exit 0
| true
|
b9d1a94fd662b60ebd8dcbd322798d5896464642
|
Shell
|
gentoo/gentoo
|
/dev-util/nvidia-cuda-toolkit/files/cuda-config.in
|
UTF-8
| 407
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
SUPPORT_GCC_VERSIONS_BY_CUDA="CUDA_SUPPORTED_GCC"
_print_help() {
cat <<- EOF
Usage:
$(basename $0) [options]
-s | --supported Returns by current CUDA supported gcc versions
-h | --help Shows this help
EOF
}
case ${1} in
-s|--supported)
echo "${SUPPORT_GCC_VERSIONS_BY_CUDA}"
exit 0
;;
-h|--help)
_print_help
exit 255
;;
*)
_print_help
exit 1
;;
esac
| true
|
6707ea0f073b79cd1311b0598d310e4dcf003342
|
Shell
|
DexterTheDragon/dotfiles
|
/bin/passbootstrap
|
UTF-8
| 592
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -f .passenger_port ]; then
if [ ! `which passenger` ]; then
echo 'passenger not installed'
gem install passenger
fi
passenger status -p $(cat .passenger_port) | grep 'not running' > /dev/null
if [ $? == 0 ]; then
passenger start -p $(cat .passenger_port) -d
else
echo 'Passenger already running'
fi
else
echo 'No port file'
list=`netstat -lpn --inet 2>/dev/null | grep passenger | awk '{print $4}' | awk -F: '{print $2}'`
for i in `seq 8001 8009`; do
echo -n ''
done
fi
# netstat -lpn --inet
| true
|
2c8c63305f07e0e38b56f356053a574f410e2e59
|
Shell
|
KrivoKu/OSLabs
|
/calc.sh
|
UTF-8
| 661
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
calc(){
if [[ "$1" != "sum" && "$1" != "sub" && "$1" != "mul" && "$1" != "div" ]]
then
echo "Error: Wrong action tried"
exit -2
elif ! [[ $2 =~ ^[+-]?[0-9]+$ && $3 =~ ^[+-]?[0-9]+$ ]]
then
echo "Error: You need two integer numbers for program to work"
exit -1
elif [[ $1 == "div" && $3 == "0" ]]
then
echo "Error: You can't divide by 0"
exit -5
elif [[ $1 == "div" && $3 == "-0" ]]
then
echo "Error: You can't divide by 0"
exit -5
elif [[ $1 == "div" && $3 == "+0" ]]
then
echo "Error: You can't divide by 0"
exit -5
else
case "$1" in
sum)
echo $(($2+$3))
;;
sub)
echo $(($2-$3))
;;
mul)
echo $(($2*$3))
;;
div)
echo $(($2/$3))
esac
fi
}
| true
|
9d2d3497a2bdef63f4bbaacbf6306b9ef1b0ba9e
|
Shell
|
firewalker06/dotfiles
|
/bin/git-refresh
|
UTF-8
| 139
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Hard reset current branch from remote
branch=$(git rev-parse --abbrev-ref HEAD)
git pull && git reset --hard origin/$branch
| true
|
39cc5581deba276a4b3f6a895b9702f681a42d76
|
Shell
|
Travis990/LiveClient
|
/android/Tools/download_crash_log.sh
|
UTF-8
| 1,503
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
# App crash log file download and unzip script
# Author: Max.Chiu
PARAM_URL=""
if [ ! "$1" == "" ]
then
PARAM_URL="$1"
else
echo "Usage: ./download_crash_log.sh \"http://mobile.asiame.com/other/crash_file_view/usertype/?date1=2019-01-01&date2=2019-02-12&manid=&devicetype=31&deviceid=&versioncode=136&search=+%E6%90%9C+%E7%B4%A2+&__hash__=14b802a461d11e2cea5307d1f86a9fff_07201d6c31a21b8435a1b3b19951e8d0\" 2 output"
exit 1
fi
PARAM_PAGE=""
if [ ! "$2" == "" ]
then
PARAM_PAGE="$2"
else
echo "Usage: ./download_crash_log.sh \"http://mobile.asiame.com/other/crash_file_view/usertype/?date1=2019-01-01&date2=2019-02-12&manid=&devicetype=31&deviceid=&versioncode=136&search=+%E6%90%9C+%E7%B4%A2+&__hash__=14b802a461d11e2cea5307d1f86a9fff_07201d6c31a21b8435a1b3b19951e8d0\" 2 output"
exit 1
fi
DIR=./
if [ ! "$3" == "" ]
then
DIR=./"$3"
fi
rm -rf link.txt
for ((i=1; i<=$PARAM_PAGE; i++))
do
URL="$PARAM_URL&p=$i"
RESULT=`curl "$URL"`
echo $RESULT | sed -e $'s/zip"/zip"\\\n/g' >> tmp.txt
sed -e 's/.*a\ href="\(.*zip\)"/\1/g' -i "" tmp.txt
sed -e '$d' -i "" tmp.txt
cat tmp.txt >> link.txt
rm -rf tmp.txt
done
mkdir -p $DIR
mkdir -p $DIR/zip
mkdir -p $DIR/crash
DOMAIN="http://mobile.asiame.com"
cat link.txt | while read line
do
URL=$DOMAIN$line
FILE_NAME=`echo $line | awk -F '/' '{print $NF}'`
DIR_NAME=`echo $FILE_NAME | awk -F '.' '{print $1}'`
curl -o $DIR/zip/$FILE_NAME "$URL"
unzip -o -P Qpid_Dating $DIR/zip/$FILE_NAME -d $DIR/crash/$DIR_NAME
done
rm -rf link.txt
| true
|
4dee5bc24c845538b3a0e840796c1ccb3637fad0
|
Shell
|
kaixinguo360/ShellScript
|
/cloud/owncloud.sh
|
UTF-8
| 3,446
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
# 检查是否为Root
[ $(id -u) != "0" ] && { echo "Error: You must be root to run this script"; exit 1; }
# 检查系统信息
if [ ! -z "`cat /etc/issue | grep 'Ubuntu 16'`" ];
then
OS='Ubuntu'
else
echo "Not support OS(Ubuntu 16), Please reinstall OS and retry!"
#exit 1
fi
## 初始化安装参数 ##
# 设置静态变量
APT_KEY='https://download.owncloud.org/download/repositories/production/Ubuntu_16.04/Release.key'
APT_SOURCE='http://download.owncloud.org/download/repositories/production/Ubuntu_16.04/'
NGINX_CONF='/etc/nginx/sites-enabled/'
SITE_CONF='https://raw.githubusercontent.com/kaixinguo360/ShellScript/master/cloud/nginx_site_conf'
NEW_SITE_URL="https://raw.githubusercontent.com/kaixinguo360/ShellScript/master/other/new_site.sh"
# 读取用户输入
read -p '您的网站域名: ' SERVER_NAME
while true :
do
read -r -p "创建新的MySQL用户? [Y/n] " input
case $input in
[yY][eE][sS]|[yY])
CREATE_USER='1'
break
;;
[nN][oO]|[nN])
break
;;
*)
echo "Invalid input..."
;;
esac
done
if [ -n "${CREATE_USER}" ];then
read -p '请输入MySQL根用户密码(!务必正确!): ' MYSQL_PW
read -p '请设置新的MySQL用户名: ' MYSQL_USER
while true :
do
read -s -p "请设置MySQL用户 ${MYSQL_USER} 的密码: " MYSQL_PASSWORD_1
echo ''
read -s -p '再输一遍: ' MYSQL_PASSWORD_2
echo ''
if [ "${MYSQL_PASSWORD_1}" = "${MYSQL_PASSWORD_2}" ]; then
MYSQL_PW=${MYSQL_PASSWORD_1}
break
else
echo -e "两次输入密码不一致!\n"
fi
done
fi
## 正式安装开始 ##
# 新增apt密钥
wget -nv ${APT_KEY} -O Release.key
apt-key add - < Release.key
rm -rf Release.key
# 更新apt
echo "deb ${APT_SOURCE} /" > /etc/apt/sources.list.d/owncloud.list
apt-get update
# 安装OwnCloud
apt-get install owncloud-files -y
# 创建数据目录
mkdir -p /var/cloud/data
chown -R www-data:www-data /var/cloud/data
# 安装PHP扩展插件
apt-get install php-curl php-gd php-mbstring php-mcrypt php-xml php-xmlrpc php-zip php-intl -y
systemctl restart php7.0-fpm
## 配置Nginx ##
# 创建新的网站
wget -O new_site.sh ${NEW_SITE_URL}
chmod +x new_site.sh
expect << HERE
spawn ./new_site.sh
expect "*本地配置文件名*"
send "owncloud\r"
expect "*默认根目录*"
send "y\r"
expect "*域名*"
send "${SERVER_NAME}\r"
expect "*启用SSL*"
send "y\r"
expect eof
HERE
rm -rf new_site.sh
# 下载配置文件
wget -O ${NGINX_CONF}owncloud ${SITE_CONF}
# 修改配置文件
sed -i "s/TMP_SERVER_NAME/${SERVER_NAME}/g" ${NGINX_CONF}owncloud
sed -i "s/SITE_NAME/owncloud/g" ${NGINX_CONF}owncloud
# 重启Nginx
service nginx restart
## 创建MySQL用户 ##
if [ -n "${CREATE_USER}" ];then
# 数据库操作
MYSQL_SHORTCUT="mysql -u root -p${MYSQL_PW} -e"
# 创建数据库
${MYSQL_SHORTCUT} "CREATE DATABASE oc DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;"
# 创建WP用户
${MYSQL_SHORTCUT} "GRANT ALL ON oc.* TO '${MYSQL_USER}'@'localhost' IDENTIFIED BY '${MYSQL_PW}';"
# 刷新特权, 令MySQL知道最近的改变:
${MYSQL_SHORTCUT} "FLUSH PRIVILEGES;"
fi
echo -e "\n ## OwnCloud安装完成 ##"
echo -e " 您可以通过 http://${SERVER_NAME}/ 访问OwnCloud\n"
| true
|
876474b8e290e3a7a2e0ae1a5201e118a54f6fb2
|
Shell
|
roche-emmanuel/singularity
|
/scripts/deps/boost.sh
|
UTF-8
| 2,448
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
. `pwd`/tools.sh
flavor=$1
PACKAGENAME=$dep_boost
# VERBOSE=1
CLEAN=0
os="linux"
threadingapi=""
# linkmode=$3
# linkmode=shared
linkmode=static
# mode=$2
mode=release
comp_1="-sNO_BZIP2=0 -sNO_COMPRESSION=0 -sBZIP2_BINARY=bz2 -sBZIP2_INCLUDE=$DEPS_DIR/$flavor/$dep_bzip2/include -sBZIP2_LIBPATH=$DEPS_DIR/$flavor/$dep_bzip2/lib"
comp_2="-sNO_ZLIB=0 -sZLIB_BINARY=z -sZLIB_INCLUDE=$DEPS_DIR/$flavor/$dep_zlib/include -sZLIB_LIBPATH=$DEPS_DIR/$flavor/$dep_zlib/lib"
added=" : <compileflags>-fPIC ;"
bjam_src_dir="$BUILD_DIR/$PACKAGENAME/tools/build/v2/engine"
bjam="$BUILD_DIR/$PACKAGENAME/tools/build/v2/engine/bin.linuxx86_64/bjam"
# Infos on bjam gcc config:
# http://www.boost.org/boost-build2/doc/html/bbv2/reference/tools.html#bbv2.reference.tools.compiler.gcc
if [ "$flavor" == "linux64" ]; then
compiler=g++
elif [ "$flavor" == "linux32" ]; then
compiler=g++
elif [ "$flavor" == "mingw32" ]; then
compiler="$PATH_MINGW32/${HOST_MINGW32}-g++"
os="windows"
threadingapi="threadapi=win32"
added=" : <compileflags>-DBOOST_USE_WINDOWS_H <compileflags>-mthreads <linkflags>-mthreads ;"
elif [ "$flavor" == "mingw64" ]; then
compiler="$PATH_MINGW32/${HOST_MINGW32}-g++"
os="windows"
threadingapi="threadapi=win32"
added=" : <compileflags>-DBOOST_USE_WINDOWS_H <compileflags>-DBUILDING_BOOST_MINGW64 <compileflags>-mthreads <linkflags>-lkernel32 <linkflags>-mthreads ;"
else
echo "Unknown compilation platform."
exit 1
fi
PRECONFIG_CMD="echo \"using gcc : $flavor : $compiler $added\" > boost-config.jam && cd $bjam_src_dir && ./build.sh"
MAKE_CMD="cd $BUILD_DIR/$PACKAGENAME/ && $bjam -d+2 --user-config=boost-config.jam variant=$mode link=$linkmode threading=multi --without-mpi --without-python toolset=gcc-$flavor target-os=$os $threadingapi --prefix=$DEPS_DIR/$flavor/$PACKAGENAME $comp_1 $comp_2 install && cp $bjam $DEPS_DIR/$flavor/$PACKAGENAME"
DO_MAKE_INSTALL=0
buildStdPackage
# post installation additions:
cd $BUILD_DIR
unzip $DEPS_DIR/sources/endian-0.8.zip
mv -i endian-0.8/boost/integer/* $DEPS_DIR/$flavor/$PACKAGENAME/include/boost/integer/
rm -Rf endian-0.8/
unzip $DEPS_DIR/sources/floating_point_utilities_v3.zip
mv -i floating_point_utilities_v3/boost/math/* $DEPS_DIR/$flavor/$PACKAGENAME/include/boost/math/
rm -Rf floating_point_utilities_v3/
tar xvzf $DEPS_DIR/sources/boost.atomic.tar.gz
mv -i boost.atomic/boost/* $DEPS_DIR/$flavor/$PACKAGENAME/include/boost/
rm -Rf boost.atomic/
| true
|
768139fb1f0dbf39b91407a30c33e8622f5aaf1a
|
Shell
|
alfpedraza-aws-devops/infrastructure
|
/modules/environment/scripts/metadata.sh
|
UTF-8
| 1,242
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
function get_host_name() {
curl http://169.254.169.254/latest/meta-data/hostname
}
function get_node_ip() {
curl http://169.254.169.254/latest/meta-data/local-ipv4
}
function get_account_id() {
curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | awk -F'"' '/\"accountId\"/ { print $4 }'
}
function get_region_name() {
curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | awk -F'"' '/\"region\"/ { print $4 }'
}
function get_instance_id() {
curl http://169.254.169.254/latest/meta-data/instance-id
}
function get_master_instance_id() {
aws ec2 describe-instances \
--filters \
"Name=tag:Name,Values=$GLOBAL_MASTER_NAME" \
"Name=instance-state-name,Values=running" \
--query "Reservations[*].Instances[*].[InstanceId]" \
--output text
}
function get_master_bucket_name() {
local INSTANCE_ID=$1
local TAGS="$(aws ec2 describe-tags \
--filters "Name=resource-id,Values=$INSTANCE_ID" \
--output text)"
set +exuo pipefail; #Disable error checking
local BUCKET_NAME=$(echo "$TAGS" | grep BUCKET_NAME | cut -f5)
set -exuo pipefail; #Enable error checking
echo $BUCKET_NAME
}
| true
|
f332c195f59e4c0beca3944bbe41f22c153240bc
|
Shell
|
angry-tony/ksql-examples
|
/ccloud/stop-docker.sh
|
UTF-8
| 1,218
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Source library
. ../utils/helper.sh
. ./config.sh
check_ccloud_config $CONFIG_FILE || exit
./ccloud-generate-cp-configs.sh $CONFIG_FILE
source delta_configs/env.delta
topics_to_delete="pageviews pageviews.replica users pageviews_enriched_r8_r9 PAGEVIEWS_FEMALE PAGEVIEWS_REGIONS"
for topic in $topics_to_delete
do
if [[ $(docker-compose exec connect-cloud kafka-topics --bootstrap-server $BOOTSTRAP_SERVERS --command-config /tmp/ak-tools-ccloud.delta --describe --topic $topic) =~ "Topic:${topic}"$'\t' ]]; then
echo "Deleting $topic"
docker-compose exec connect-cloud kafka-topics --bootstrap-server $BOOTSTRAP_SERVERS --command-config /tmp/ak-tools-ccloud.delta -delete --topic $topic 2>/dev/null
fi
done
docker-compose down
for v in $(docker volume ls -q --filter="dangling=true"); do
docker volume rm "$v"
done
# Delete subjects from Confluent Cloud Schema Registry
if [[ "${USE_CONFLUENT_CLOUD_SCHEMA_REGISTRY}" == true ]]; then
schema_registry_subjects_to_delete="users-value pageviews-value"
for subject in $schema_registry_subjects_to_delete
do
curl -X DELETE --silent -u $SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO $SCHEMA_REGISTRY_URL/subjects/$subject
done
fi
| true
|
188569b7c471b06277fd61392c0c89455c6d7073
|
Shell
|
syndicate-storage/syndicate
|
/old/google-maps-demo/google-map/demo.sh
|
UTF-8
| 223
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [[ $# < 2 ]]; then
echo "Usage: $0 site mountpoint"
exit 1
fi
SITE=$1
MOUNT=$2
syndicatefs -f -c syndicate-Internet2-$SITE.conf $MOUNT > /dev/null 2>/dev/null &
./poll-file.py $SITE.coord $MOUNT/hello &
| true
|
b47b69060cbef7df556ea4457fc289c671671dbd
|
Shell
|
josemarkz/create_users.sh
|
/create_users.sh
|
UTF-8
| 1,014
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
USER1=“elena”
USER2=“jose”
USER3=“luis”
USER4=“carlos”
USER5=“carmen”
GROUP1=“contabilidad”
GROUP2=“administrador”
echo “Iniciando la creacion de usuarios”
#create el usuario elena
useradd -m -d /home/$USER1 -s /bin/bash $USER1
#create el usuario jose
useradd -m -d /home/$USER2 -s /bin/bash $USER2
#create el usuario luis
useradd -m -d /home/$USER3 -s /bin/bash $USER3
#create el usuario carlos
useradd -m -d /home/$USER4 -s /bin/bash $USER4
#crate el usuario carmen
useradd -m -d /home/$USER5 -s /bin/bash $USER5
#create el grupo cantabilidad
groupadd $GROUP1
#create el grupo administrador
groupadd $GROUP2
#agregar al grupo contabilidad
usermod -a -G $GROUP1 $USER1
usermod -a -G $GROUP1 $USER2
usermod -a -G $GROUP1 $USER3
echo “usuarios ${USER1} ${USER2} y ${USER3} y grupo creado con
#agregar al grupo administrador
usermod -a -G $GROUP2 $USER4
usermod -a -G $GROUP2 $USER5
echo “usuarios ${USER4} y ${USER5} y grupos creados con exito”
| true
|
0b23b92648e6fb1fdf93ba73711871aa3bd42b5d
|
Shell
|
monash-merc/ansible_cluster_in_a_box
|
/roles/mellanox_drivers/files/mlnx_install.sh
|
UTF-8
| 1,167
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
# A CRUDE Script to install Mellanox OFED drivers
# Philip.Chan@monash.edu
#
# TODO: check if MLNX_OFED is already installed!
# TODO: check kernel...
KERN=`uname -r`
if [ "$KERN" != "3.10.0-229.14.1.el7.x86_64" ]
then
echo "Oops! Did you forget to reboot?"
echo "Kernel version has to be 3.10.0-229.14.1.el7.x86_64"
exit 1
fi
sudo yum install -y pciutils gcc-gfortran libxml2-python tcsh libnl lsof tcl tk perl
sudo yum install -y gtk2 atk cairo
tar xzvf MLNX_OFED_LINUX-3.1-1.0.3-rhel7.1-x86_64-ext.tgz
cd MLNX_OFED_LINUX-3.1-1.0.3-rhel7.1-x86_64-ext
sudo ./mlnxofedinstall -q
cd ..
tmpfile="/tmp/ifcfg.pc"
rm -f $tmpfile
./set_ifcfg.pl $tmpfile
if [ -f $tmpfile ]
then
echo "Attempting to install ifcfg-ens6"
if [ -f /etc/sysconfig/network-scripts/ifcfg-ens6 ]
then
echo "/etc/sysconfig/network-scripts/ifcfg-ens6 already exists!"
grep IP /etc/sysconfig/network-scripts/ifcfg-ens6
echo "bailing!"
else
sudo cp -ip $tmpfile /etc/sysconfig/network-scripts/ifcfg-ens6
sudo chown root:root /etc/sysconfig/network-scripts/ifcfg-ens6
cd /etc/sysconfig/network-scripts
sudo ./ifup ens6
ping -c 1 172.16.228.1
fi
fi
exit 0
| true
|
6a9567dd71bba1d24e0894322f72b21810071913
|
Shell
|
tspannhw/nifi-addons
|
/Operations/nifi-ops/core/src/test/resources/azure-gateway-init.sh
|
UTF-8
| 512
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
# warming up yum in the background
# yum -y makecache &
export CLOUD_PLATFORM="AZURE_RM"
export START_LABEL=98
export PLATFORM_DISK_PREFIX=sd
export LAZY_FORMAT_DISK_LIMIT=12
export IS_GATEWAY=true
export TMP_SSH_KEY="ssh-rsa test"
export PUBLIC_SSH_KEY="ssh-rsa public"
export RELOCATE_DOCKER=true
export SSH_USER=cloudbreak
date >> /tmp/time.txt
find /usr/lib -name "os_family.json*" | xargs sed -i 's/amazon2015/amazon2016/g'
/usr/bin/user-data-helper.sh "$@" &> /var/log/user-data.log
| true
|
9174bf89c73ea9e04888a61ec21880b59b3a73a6
|
Shell
|
garlicbulb-puzhuo/ggo
|
/bin/application_log.sh
|
UTF-8
| 1,286
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -u
# current program name
progname="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")"
working_dir=
working_dir_arg=0
function usage {
echo "usage: ${progname} -w directory [-h]"
echo " -w Working directory"
echo " [-h] Display usage"
exit 1
}
while getopts ":w:h" opt; do
case $opt in
w)
working_dir=$OPTARG
working_dir_arg=1
;;
h)
usage
;;
\?)
echo "${progname}: Invalid option: -$OPTARG"
usage
;;
:)
echo "${progname}: Option -$OPTARG requires an argument"
usage
;;
esac
done
if [ ${working_dir_arg} -eq 0 ]; then
echo "${progname}: Missing working output directory argument"
usage
fi
echo "Start pulling results"
regex="application_[0-9]+_[0-9]+"
application_id=$(grep -Ei " $regex " ${working_dir}/train.log | head -1 | grep -oEi $regex)
echo "Found application ${application_id}"
hdfs dfs -cat /var/log/hadoop-yarn/apps/${USER}/logs/${application_id}/* \
| grep -a "history and metadata values" \
| awk -F: '{print $2}' \
| sed -e 's/[[]//g' -e 's/[]]//g' \
>${working_dir}/results.csv
echo "Done pulling results"
| true
|
4f26ee9e6a2bdf4e734b0d3e301d4d02174de094
|
Shell
|
paulomach/tool
|
/bin/oracle_wrapper
|
UTF-8
| 271
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $1 = "-h" ]]; then
echo "Wrap/obfuscated Oracle procedures and functions"
echo "Call in the source dir"
exit
fi
for i in $(ls *.{prc,fnc});
do
wrap iname=$i &> /dev/null
done
rm -rf wrapped_objects
mkdir wrapped_objects
mv *.plb wrapped_objects
| true
|
9acd6469ce537bb1dc8d4e371810fe9f9853cf78
|
Shell
|
olgn/helpers
|
/squirt/squirt-bash.sh
|
UTF-8
| 1,217
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
#output
OUTPUT=""
# Determine directory prefixes -
# different in git bash vs ubuntu
if [ -d "/mnt/c" ]
then
PREFIX_C="/mnt/c"
PREFIX_D="/mnt/d"
else
PREFIX_C="/c"
PREFIX_D="/d"
fi
# Interactive prompt for directory selection
PS3="Please select a directory: "
options=("C" "D" "Quit")
select opt in "${options[@]}"
do
case $opt in
"C")
DIR_STRING="${PREFIX_C}/Users/User/Documents/Coordinate Solutions/Code"
OUTPUT="${OUTPUT}/${DIR_STRING}"
cd "${DIR_STRING}"
break
;;
"D")
DIR_STRING="${PREFIX_D}/cs"
cd "${DIR_STRING}"
OUTPUT="${OUTPUT}/${DIR_STRING}"
break
;;
"Quit")
exit
;;
esac
done
options=( $(ls -1) )
PS3="$prompt "
select opt in "${options[@]}" "Quit" ; do
if (( REPLY == 1 + ${#options[@]} )) ; then
exit
elif (( REPLY > 0 && REPLY <= ${#options[@]} )) ; then
OUTPUT="${OUTPUT}/$opt"
if [ "$1" == ui ] ; then
echo "cd \"${OUTPUT}\" && cd ui && nvm use && yarn start"
else
echo "cd \"${OUTPUT}\""
fi
exit
fi
done
| true
|
638a362b24439e55a4777565c51862c1dca99c6f
|
Shell
|
rchytas/dabblelab-youtube-channel
|
/2017-tutorials/2017-07-28-sessions-in-dynamodb/publish.sh
|
UTF-8
| 433
| 3.828125
| 4
|
[] |
no_license
|
SKILL_NAME=$1
NOW=$(date +%s)
ZIP_FILE="$SKILL_NAME-$NOW.zip"
if [ ! -f ./$ZIP_FILE ]; then
echo "$ZIP_FILE not found. Creating file..."
chmod -R +rw $SKILL_NAME
cd $SKILL_NAME
zip -r "../$ZIP_FILE" * -x "*.DS_Store"
echo "$ZIP_FILE created."
cd ..
aws lambda update-function-code --function-name $SKILL_NAME --zip-file fileb://$ZIP_FILE
if [ ! -f ./builds ]; then
mkdir builds
fi
mv $ZIP_FILE ./builds/
fi
| true
|
882afd7524546d46bc4b4d78760954a77446378a
|
Shell
|
seanbermejo/dotfiles
|
/.scripts/timezones
|
UTF-8
| 940
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# Show current time of timezones I care about
# called on click from dwm_time
# no prompt
PS1=
print_center(){
local x
local y
text="$*"
x=$(( ($(tput cols) - ${#text}) / 2))
echo -ne "\E[6n";read -sdR y; y=$(echo -ne "${y#*[}" | cut -d';' -f1)
echo -ne "\033[${y};${x}f$*"
}
#echo -e "\e[?1000;1006;1015h" # Enable mouse tracking
#bind "\e[<0;3;21M" "exit"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
print_center " $(timedatectl | awk '/Time zone:/ {print $3}'):\t$(date +'%H:%M')\n"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
print_center " Europe/Vienna:\t$(TZ=':Europe/Vienna' date +'%H:%M')\n"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
print_center " Europe/London:\t$(TZ=':Europe/London' date +'%H:%M')\n"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
# exit on mouse click or enter
echo -e "\e[?1000h"
while read -n 6; do clear;exit; done
| true
|
7ea247ba668c8f31cb2275614c0db1c7c4263cf3
|
Shell
|
van-namdinh/noxim-dev
|
/noxim_run_v8_gnuplot.sh
|
UTF-8
| 3,633
| 2.515625
| 3
|
[] |
no_license
|
# -----------------------------------------------------------------------------
# Project name :
# File name : gnuplot_v8_latency.sh
# Created date : Th11 28 2019
# Author : Van-Nam DINH
# Last modified : Th11 28 2019 18:32
# Guide :
# -----------------------------------------------------------------------------
#!/bin/bash
#for proj_name in default_conf_testXY default_conf_testDYADONE default_conf_testDYADTWO
#do
gnuplot << EOF
set term postscript eps color enhanced "" 24
set output "multiple_files_v8_latency.eps"
set title "Average latency (cycles)"
set xlabel "PIR %"
set xrange [0:200]
set ylabel "cycles"
plot \
"/home/vnd/WORK/noxim/bin/DYAD_algorithmv8/DYAD_results/outoffinal_DYAD_latency.dat" \
title "DYAD" lt rgb "brown", \
"/home/vnd/WORK/noxim/bin/NEGATIVE_FIRST_algorithmv8/NEGATIVE_FIRST_results/outoffinal_NEGATIVE_FIRST_latency.dat" \
title "NEGATIVE FIRST" lt rgb "red", \
"/home/vnd/WORK/noxim/bin/NORTH_LAST_algorithmv8/NORTH_LAST_results/outoffinal_NORTH_LAST_latency.dat" \
title "NORTH LAST" lt rgb "orange", \
"/home/vnd/WORK/noxim/bin/ODD_EVEN_algorithmv8/ODD_EVEN_results/outoffinal_ODD_EVEN_latency.dat" \
title "ODD EVEN" lt rgb "yellow", \
"/home/vnd/WORK/noxim/bin/WEST_FIRST_algorithmv8/WEST_FIRST_results/outoffinal_WEST_FIRST_latency.dat" \
title "WEST FIRST" lt rgb "green", \
"/home/vnd/WORK/noxim/bin/XY_algorithmv8/XY_results/outoffinal_XY_latency.dat" \
title "XY" lt rgb "blue"
#plot energy
set term postscript eps color enhanced "" 24
set output "multiple_files_v8_energy.eps"
set title "Average Energy (J)"
set xlabel "PIR %"
set xrange [0:200]
set ylabel "Total Power Energy (J)"
plot \
"/home/vnd/WORK/noxim/bin/DYAD_algorithmv8/DYAD_results/outoffinal_DYAD_energy.dat" \
title "DYAD" lt rgb "brown", \
"/home/vnd/WORK/noxim/bin/NEGATIVE_FIRST_algorithmv8/NEGATIVE_FIRST_results/outoffinal_NEGATIVE_FIRST_energy.dat" \
title "NEGATIVE FIRST" lt rgb "red", \
"/home/vnd/WORK/noxim/bin/NORTH_LAST_algorithmv8/NORTH_LAST_results/outoffinal_NORTH_LAST_energy.dat" \
title "NORTH LAST" lt rgb "orange", \
"/home/vnd/WORK/noxim/bin/ODD_EVEN_algorithmv8/ODD_EVEN_results/outoffinal_ODD_EVEN_energy.dat" \
title "ODD EVEN" lt rgb "yellow", \
"/home/vnd/WORK/noxim/bin/WEST_FIRST_algorithmv8/WEST_FIRST_results/outoffinal_WEST_FIRST_energy.dat" \
title "WEST FIRST" lt rgb "green", \
"/home/vnd/WORK/noxim/bin/XY_algorithmv8/XY_results/outoffinal_XY_energy.dat" \
title "XY" lt rgb "blue"
#plot throughput
set term postscript eps color enhanced "" 24
set output "multiple_files_v8_throughput.eps"
set title "Average Throughput (flits/cycle/IP)"
set xlabel "PIR %"
set xrange [0:200]
set ylabel "Average Throughput"
plot \
"/home/vnd/WORK/noxim/bin/DYAD_algorithmv8/DYAD_results/outoffinal_DYAD_throughput.dat" \
title "DYAD" lt rgb "brown", \
"/home/vnd/WORK/noxim/bin/NEGATIVE_FIRST_algorithmv8/NEGATIVE_FIRST_results/outoffinal_NEGATIVE_FIRST_throughput.dat" \
title "NEGATIVE FIRST" lt rgb "red", \
"/home/vnd/WORK/noxim/bin/NORTH_LAST_algorithmv8/NORTH_LAST_results/outoffinal_NORTH_LAST_throughput.dat" \
title "NORTH LAST" lt rgb "orange", \
"/home/vnd/WORK/noxim/bin/ODD_EVEN_algorithmv8/ODD_EVEN_results/outoffinal_ODD_EVEN_throughput.dat" \
title "ODD EVEN" lt rgb "yellow", \
"/home/vnd/WORK/noxim/bin/WEST_FIRST_algorithmv8/WEST_FIRST_results/outoffinal_WEST_FIRST_throughput.dat" \
title "WEST FIRST" lt rgb "green", \
"/home/vnd/WORK/noxim/bin/XY_algorithmv8/XY_results/outoffinal_XY_throughput.dat" \
title "XY" lt rgb "blue"
set key center ##legend placement
quit
EOF
#done
| true
|
43a42192f15a87aca777412def9426b17355894a
|
Shell
|
thaabit/dotfiles
|
/setup/ldap.sh
|
UTF-8
| 3,977
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
# https://www.server-world.info/en/note?os=CentOS_7&p=openldap
db_config="/var/lib/ldap/DB_CONFIG"
if [ ! -e $db_config ]; then
sudo cp /usr/share/openldap-servers/DB_CONFIG.example $db_config
sudo chown ldap. /var/lib/ldap/DB_CONFIG
fi
firewall-cmd --add-service=ldap --permanent > /dev/null
firewall-cmd --reload > /dev/null
sudo systemctl start slapd
sudo systemctl enable slapd
# get host and tld from hostname
hostname=$(hostname)
if [[ $hostname =~ ^([a-z]+)\.([a-z]+)$ ]]; then
host=${BASH_REMATCH[1]}
tld=${BASH_REMATCH[2]}
else
echo "Hostname not set or invalid: $hostname"
exit
fi
# cert
read -p "Do you want to create a new cert (y/[n])? " -n 1 -r
echo
cert_path="/etc/pki/tls/certs/$host${tld}_ldap_cert.pem"
priv_path="/etc/pki/tls/certs/$host${tld}_ldap_priv.pem"
if [[ $REPLY =~ ^[Yy]$ ]]; then
sudo openssl req -new -x509 -nodes -out $cert_path -keyout $priv_path -days 365
sudo chown ldap:ldap $cert_path
sudo chown ldap:ldap $priv_path
sudo chmod 600 $priv_path
echo "new cert at $cert_path"
echo "new private key at $priv_path"
fi
echo
# root password
read -p "Do you want to create a new root pw (y/[n])? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Enter root ldap password"
ssha=$(slappasswd)
echo="dn: olcDatabase={0}config,cn=config
changetype: modify
replace: olcRootPW
olcRootPW: $ssha" | \
sudo ldapadd -Y EXTERNAL -H ldapi:///
fi
echo
#schemas
read -p "Do you want to copy the default schemas (y/[n])? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
if ! -e /etc/openldap/schema/mozillaAbPersonAlpha.ldif; then
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
sudo cp $dir/mozillaAbPersonAlpha.ldif /etc/openldap/schema
fi
schemas="cosine nis core inetorgperson mozillaAbPersonAlpha"
for schema in $schemas; do
echo "enabling $schema schema"
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/$schema.ldif
done
fi
echo
read -p "Do you want to create the directory manager's pw (y/[n])? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Enter directory manager's password"
ssha=$(slappasswd)
echo "dn: olcDatabase={1}monitor,cn=config
changetype: modify
replace: olcAccess
olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth"
read by dn.base="cn=Manager,dc=$host,dc=$tld" read by * none
dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcSuffix
olcSuffix: dc=$host,dc=$tld
dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcRootDN
olcRootDN: cn=Manager,dc=$host,dc=$tld
dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcRootPW
olcRootPW: $ssha
dn: olcDatabase={2}hdb,cn=config
changetype: modify
replace: olcAccess
olcAccess: {0}to attrs=userPassword,shadowLastChange by
dn="cn=Manager,dc=$host,dc=$tld" write by anonymous auth by self write by * none
olcAccess: {1}to dn.base="" by * read
olcAccess: {2}to * by dn="cn=Manager,dc=$host,dc=$tld" write by * read" | \
sudo ldapadd -Y EXTERNAL -H ldapi:///
fi
echo
read -p "Do you want to create the basic OUs (y/[n])? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "dn: dc=$host,dc=$tld
objectClass: top
objectClass: dcObject
objectclass: organization
o: $hostname
dc: $host
dn: cn=Manager,dc=$host,dc=$tld
objectClass: organizationalRole
cn: Manager
description: Directory Manager
dn: ou=People,dc=$host,dc=$tld
objectClass: organizationalUnit
ou: People
dn: ou=Contacts,dc=$host,dc=$tld
objectClass: organizationalUnit
ou: Mail
dn: ou=Group,dc=$host,dc=$tld
objectClass: organizationalUnit
ou: Group" | \
ldapadd -x -D cn=Manager,dc=$host,dc=$tld -W
fi
echo
sudo systemctl restart slapd
echo "finishing installing openldap"
| true
|
c965956fc5039c8f6b7c589379c546226bd8a761
|
Shell
|
isudnop/util-shell-script
|
/one_script.sh
|
UTF-8
| 3,363
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
###output function
echo_green () {
green=`tput setaf 2`
reset=`tput sgr0`
word=$1
echo $green $word $reset
}
echo_red () {
red=`tput setaf 1`
reset=`tput sgr0`
word=$1
echo $red $word $reset
}
echo_green "### One Script Shall Install Them All ###"
echo_green "### Start Add PPA and do upgrade first###"
sudo apt-get update
sudo add-apt-repository ppa:ondrej/php -y
sudo add-apt-repository ppa:webupd8team/java -y
##sudo apt-get -y upgrade
echo_green "### Update ###"
sudo apt-get update
echo_green "### Install zsh ###"
sudo apt-get -qy install terminator
sudo apt-get -qy install zsh
sudo chsh -s $(which zsh)
echo_green "### Install CURL & Oh My Zsh! ###"
sudo apt-get -y install curl wget
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo_green "### Install PHP ###"
sudo apt-get -qy install php \
php-mysql \
php-soap \
php-mbstring \
php-mcrypt \
php-mongodb \
php-cli \
php-xml \
php-zip \
php-tokenizer \
php-pdo \
echo_green "### Install Composer ###"
EXPECTED_SIGNATURE=$(wget -q -O - https://composer.github.io/installer.sig)
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
ACTUAL_SIGNATURE=$(php -r "echo hash_file('SHA384', 'composer-setup.php');")
if [ "$EXPECTED_SIGNATURE" != "$ACTUAL_SIGNATURE" ]
then
>&2 echo_red "ERROR: Invalid installer signature"
rm composer-setup.php
echo 1
fi
php composer-setup.php --quiet
RESULT=$?
rm composer-setup.php
echo $RESULT
echo_green "### Install GIT ###"
sudo apt-get -y install git
echo_green "### Install python & python3 ###"
sudo apt-get -y install python-pip python-dev python3-pip python3-dev python-software-properties
echo_green "### Install debconf-utils ###"
sudo apt-get -y install debconf-utils
echo_green "### Install thefuck ###"
sudo pip install psutil thefuck
echo_green "### Install htop ###"
sudo apt-get -y install htop
echo_green "### Install NodeJs ###"
sudo apt-get -y install nodejs
echo_green "### Install VIM ###"
sudo apt-get -y install vim
echo_green "### Install JAVA 8 ###"
echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | sudo debconf-set-selections
sudo apt-get install -y oracle-java8-installer
echo_green "### Install Angular CLI ###"
sudo npm install -g @angular/cli
echo_green "### Install Docker ###"
sudo apt-get -y install \
apt-transport-https \
ca-certificates \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get -y install docker-ce
echo_green "### Install Docker Compose 1.16.0 ###"
sudo curl -L https://github.com/docker/compose/releases/download/1.16.0-rc1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo_red "### If docker-compose install commad not work , try sudo -i "
echo_red " $ curl -L https://github.com/docker/compose/releases/download/1.16.0-rc1/docker-compose-\`uname -s\`-\`uname -m\` > /usr/local/bin/docker-compose"
echo_red " $ chmod +x /usr/local/bin/docker-compose "
echo_red " $ exit "
| true
|
0c13e24cdcbb07be454536cba093696c24ba1674
|
Shell
|
lestrot/shell_linux
|
/compt.sh
|
UTF-8
| 64
| 3.03125
| 3
|
[] |
no_license
|
a=0
while [ "$a" -lt 6 ]
do
echo $a
a=`expr $a + 1`
done
| true
|
6a092f9f128da7ab1643e05d18cd10f4bf56a235
|
Shell
|
federbenjamin/Autodownloader
|
/downloader.sh
|
UTF-8
| 3,991
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
DL_LINK=$1
NOTIFICATION_EMAIL=$2
TEMP_DIR=~/"Movies/Incomplete/"
MOVIE_DIR=~/"Movies/Movies/"
TV_DIR="/Volumes/TVShowHDD/TV_Shows/"
DL_LOG="logs/downloads/"
mkdir $TEMP_DIR $DL_LOG $MOVIE_DIR $TV_DIR
# Extract filename from download link
FILENAME=$(echo $1 | sed 's/%2F/\//g' | egrep -oe '[^\/]*$.*')
EXTENSION=$(echo $FILENAME | egrep -oe '.(zip|avi|mp4|mkv)')
if [[ ! "$EXTENSION" ]]; then
FILENAME=$(echo "$FILENAME.zip")
EXTENSION='.zip'
fi
# If download is not a zip file, then it was manually downloaded, if email was provided send a starting message
if [[ "$EXTENSION" != '.zip' && "$NOTIFICATION_EMAIL" ]]; then
osascript sendFinishedMessage.applescript $NOTIFICATION_EMAIL "Starting download: $FILENAME ...."
fi
# Download file and store output in variable
axel -a -n 30 -s 20000000 -o "$TEMP_DIR$FILENAME" "$DL_LINK" &>"$DL_LOG$FILENAME.txt"
AXELOUTPUT=$(tail -3 "$DL_LOG$FILENAME.txt")
FINISHTIME=$(date +"%r")
AXELSUCCESS=$(echo $AXELOUTPUT | grep "100%")
# Download failed
if [[ ! "$AXELSUCCESS" ]]; then
# Log error accordingly
LINE="-------------------------------------------------------------------------"
echo -e "\n$FINISHTIME:\n$LINE\nLink:\n$DL_LINK\nfailed to download\n" >> logs/basherror.log
echo -e "Axel output found at:\n$DL_LOG$FILENAME.txt\n$LINE\n" >> logs/basherror.log
echo -e $FINISHTIME"\nError Downloading File: $FILENAME\n\n" >> logs/bashapplication.log
if [[ "$NOTIFICATION_EMAIL" ]]; then
osascript sendFinishedMessage.applescript $NOTIFICATION_EMAIL "$FILENAME failed to download."
fi
#exit 1
# Download was successful
else
echo -e $FINISHTIME"\n$FILENAME Download Complete\n\n" >> logs/bashapplication.log
# Replace non-space characters between words with spaces in the filename
FILENAMEWITHSPACES=$(echo $FILENAME | tr ._ ' ' | sed 's/%[12][0-9A-F]/ /g')
# Determine filename based on if file fits a TV Show name format (US or UK or Daily Show)
# Standard US
DOWNLOADNAME=$(echo $FILENAMEWITHSPACES | egrep -oe '.*?[sS][0-3][0-9][eE][0-3][0-9]')
CHARTOREMOVE=8
# UK
if [[ "$DOWNLOADNAME" = '' ]]; then
DOWNLOADNAME=$(echo $FILENAMEWITHSPACES | egrep -oe '.*?1?[0-9]x[0-3][0-9]')
CHARTOREMOVE=6
fi
# Daily Show
if [[ "$DOWNLOADNAME" = '' ]]; then
DOWNLOADNAME=$(echo $FILENAMEWITHSPACES | egrep -oe '.*?20([[:digit:]]{2} ){3}')
CHARTOREMOVE=12
fi
# Full Season
if [[ "$DOWNLOADNAME" = '' ]]; then
DOWNLOADNAME=$(echo $FILENAMEWITHSPACES | egrep -oie '.*(season |s)[0-9]{1,2}')
CHARTOREMOVE=1
fi
# Full Series
if [[ "$DOWNLOADNAME" = '' ]]; then
DOWNLOADNAME=$(echo $FILENAMEWITHSPACES | egrep -oie '.*(complete.*series|series.*complete)')
CHARTOREMOVE=0
fi
# Process Movies
if [[ "$DOWNLOADNAME" = '' ]]; then
DOWNLOADNAME=$(echo $FILENAMEWITHSPACES | egrep -oe '.*?(480p|720p|1080p)')
if [[ "$DOWNLOADNAME" != '' ]]; then
mv "$TEMP_DIR$FILENAME" "$MOVIE_DIR$DOWNLOADNAME$EXTENSION"
fi
# Process TV Shows
else
if [[ "$CHARTOREMOVE" != '1' ]]; then
SHOWNAME=$(echo $DOWNLOADNAME | rev | cut -c $CHARTOREMOVE- | rev)
else
SHOWNAME=$(echo $DOWNLOADNAME | \
perl -nle'print $& if m{^[a-zA-Z0-9 &]+?(?=[^a-zA-Z0-9]*?([Ss]eason|SEASON|[Ss][\d]{1,2}))}' \
| rev | cut -c $CHARTOREMOVE- | rev )
fi
SHOWNAME=$(echo $SHOWNAME | tr '[:upper:]' '[:lower:]')
mkdir "$TV_DIR$SHOWNAME"
# Unzip if file is compressed, otherwise do nothing, then sort
if [[ "$EXTENSION" = '.zip' ]]; then
ZIPSUCCESS=$(unar -o "$TV_DIR$SHOWNAME" "$TEMP_DIR$FILENAME")
if [[ "$ZIPSUCCESS" ]]; then
rm "$TEMP_DIR$FILENAME"
fi
else
mv "$TEMP_DIR$FILENAME" "$TV_DIR$SHOWNAME"
fi
fi
rm "$DL_LOG$FILENAME.txt"
# Send success message
if [[ "$NOTIFICATION_EMAIL" ]]; then
if [[ "$DOWNLOADNAME" != '' ]]; then
osascript sendFinishedMessage.applescript $NOTIFICATION_EMAIL "$DOWNLOADNAME has been downloaded."
else
osascript sendFinishedMessage.applescript $NOTIFICATION_EMAIL "$FILENAME has been downloaded, but requires manual sorting."
fi
fi
fi
| true
|
0d594d9079a35ef7f89808624c15b9978e6f1e87
|
Shell
|
kubovy/oh-my-zsh
|
/themes/poterion-lite.zsh-theme
|
UTF-8
| 4,060
| 3.109375
| 3
|
[] |
no_license
|
# ------------------------------------------------------------------------
# Juan G. Hurtado oh-my-zsh theme
# (Needs Git plugin for current_branch method)
# ------------------------------------------------------------------------
# Color shortcuts
RED=$fg[red]
GREEN=$fg[green]
BLUE=$fg[blue]
CYAN=$fg[cyan]
MAGENTA=$fg[magenta]
YELLOW=$fg[yellow]
WHITE=$fg[white]
RED_BOLD=$fg_bold[red]
GREEN_BOLD=$fg_bold[green]
BLUE_BOLD=$fg_bold[blue]
CYAN_BOLD=$fg_bold[cyan]
YELLOW_BOLD=$fg_bold[yellow]
MAGENTA_BOLD=$fg_bold[magenta]
WHITE_BOLD=$fg_bold[white]
RESET_COLOR=$reset_color
# Format for git_prompt_info()
ZSH_THEME_GIT_PROMPT_PREFIX="%{$MAGENTA%}"
ZSH_THEME_GIT_PROMPT_SUFFIX=""
# Format for parse_git_dirty()
ZSH_THEME_GIT_PROMPT_DIRTY="%{$RED%}*"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$GREEN%}√"
# Format for git_prompt_status()
ZSH_THEME_GIT_PROMPT_UNMERGED="%{$RED%}≈"
ZSH_THEME_GIT_PROMPT_DELETED="%{$RED%}-"
ZSH_THEME_GIT_PROMPT_RENAMED="%{$YELLOW%}ρ"
ZSH_THEME_GIT_PROMPT_MODIFIED="%{$YELLOW%}~"
ZSH_THEME_GIT_PROMPT_ADDED="%{$GREEN%}+"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$WHITE%}?"
# Format for git_prompt_ahead()
ZSH_THEME_GIT_PROMPT_AHEAD="%{$RED%}!"
# Format for git_remote_status()
ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE="%{$RED%}!"
ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE="%{$MAGENTA%}!"
ZSH_THEME_GIT_PROMPT_DIVERGED_REMOTE="%{$YELLOW%}!"
# Format for git_prompt_long_sha() and git_prompt_short_sha()
ZSH_THEME_GIT_PROMPT_SHA_BEFORE="%{$WHITE%}sha:%{$YELLOW%}"
ZSH_THEME_GIT_PROMPT_SHA_AFTER=" "
# Memory limits
MEMORY_WARNING=75
MEMORY_CRITICAL=95
prompt_git() {
git status &> /dev/null
local IS_GIT=$?
if [[ $IS_GIT -eq 0 ]]; then
echo -n "%{$WHITE%}[$(git_prompt_info)$(git_remote_status)%{$WHITE%}] "
fi
}
prompt_user() {
UID=`id -u`
if [[ $UID -eq 0 ]]; then
echo -n "%{$RED%}%m"
else
echo -n "%{$GREEN_BOLD%}%n@%m"
fi
}
prompt_result() {
if [[ $RETVAL -eq 0 ]]; then
echo -n "%{$GREEN%}:) "
else
echo -n "%{$RED%}:( "
fi
}
prompt_jobs() {
local JOBS=`jobs -l | wc -l`
if [[ $JOBS -gt 0 ]]; then
echo -n "%{$YELLOW%}(%{$JOBS%}) "
fi
}
rprompt_logged_users() {
#local USERS=$(users | wc -w)
#echo -n "%{$WHITE%}u:%{$CYAN%}%{$USERS%}"
}
rprompt_load() {
up=`uptime | awk -F"load average:" '{print $2}' | tr -d "[:blank:]"`
local L1=$(echo $up | cut -d"," -f1)
local L5=$(echo $up | cut -d"," -f2)
local L15=$(echo $up | cut -d"," -f3)
local CORES=$(grep 'model name' /proc/cpuinfo | wc -l)
local LC1=$(echo "scale=2;$L1/$CORES" | bc)
local LC5=$(echo "scale=2;$L5/$CORES" | bc)
local LC15=$(echo "scale=2;$L15/$CORES" | bc)
echo -n "%{$WHITE%}l:"
if [[ $LC1 -eq 0 ]]; then
echo -n "%{$GREEN%}0.0$LC1 "
elif [[ $LC1 -lt 1 ]]; then
echo -n "%{$GREEN%}0$LC1 "
elif [[ $LC1 -eq 1 ]]; then
echo -n "%{$YELLOW%}$LC1 "
elif [[ $LC1 -gt 1 ]]; then
echo -n "%{$RED%}$LC1 "
fi
if [[ $LC5 -eq 0 ]]; then
echo -n "%{$GREEN%}0.0$LC5 "
elif [[ $LC5 -lt 1 ]]; then
echo -n "%{$GREEN%}0$LC5 "
elif [[ $LC5 -eq 1 ]]; then
echo -n "%{$YELLOW%}$LC5 "
elif [[ $LC5 -gt 1 ]]; then
echo -n "%{$RED%}$LC5 "
fi
if [[ $LC15 -eq 0 ]]; then
echo -n "%{$GREEN%}0.0$LC15"
elif [[ $LC15 -lt 1 ]]; then
echo -n "%{$GREEN%}0$LC15"
elif [[ $LC15 -eq 1 ]]; then
echo -n "%{$YELLOW%}$LC15"
elif [[ $LC15 -gt 1 ]]; then
echo -n "%{$RED%}$LC15"
fi
echo -n " "
}
rprompt_mem() {
local USED=$(echo "scale=1;$(free | grep "Mem:" | awk '{print $3}')/$(free | grep "Mem:" | awk '{print $2}')*100" | bc | cut -d"." -f1)
echo -n "%{$WHITE%}m:"
if [[ $USED -gt $MEMORY_WARNING ]]; then
echo -n "%{$YELLOW%}"
elif [[ $USED -gt $MEMORY_CRITICAL ]]; then
echo -n "%{$RED%}"
else
echo -n "%{$GREEN%}"
fi
echo -n "$USED%%"
}
build_prompt() {
RETVAL=$?
prompt_user
echo -n "%{$WHITE%}:%{$BLUE_BOLD%}%~%u "
prompt_git
prompt_jobs
prompt_result
echo -n "%{$BLUE_BOLD%}»%{$RESET_COLOR%}"
}
# Prompt format » ►
PROMPT='%{%f%b%k%}$(build_prompt) '
#RPROMPT='%{$GREEN_BOLD%}%{$WHITE%}[$(git_prompt_short_sha)$(rprompt_logged_users) $(rprompt_load) $(rprompt_mem)%{$WHITE%}]%{$RESET_COLOR%}'
| true
|
3c5e26cb9af2240918e45234a1a8469ec99c0263
|
Shell
|
zakariakov/elokab
|
/etc/skel/.xprofile
|
UTF-8
| 151
| 2.640625
| 3
|
[] |
no_license
|
export EDITOR=/usr/bin/nano
if [ "$XDG_CURRENT_DESKTOP" = "KDE" ];then
echo $XDG_CURRENT_DESKTOP
else
export QT_QPA_PLATFORMTHEME="qt5ct"
fi
| true
|
9ef2e9ac84481a1be6f92f3c2ae7ba8e5226a001
|
Shell
|
trigrass2/grisp2-rtems-toolchain
|
/debug/openocd/start-openocd.sh
|
UTF-8
| 427
| 3.234375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# be more verbose
set -x
# exit on wrong command and undefined variables
set -e -u
# find out own directory
SCRIPTPATH=$(readlink -- "$0" || echo "$0")
SCRIPTDIR=$(CDPATH= cd -- "$(dirname -- "$SCRIPTPATH")" && pwd)
PREFIX="${SCRIPTDIR}/../../rtems/5"
export PATH="${PREFIX}/bin:${PATH}"
openocd -s "${PREFIX}/share/openocd/scripts/" \
-f "${SCRIPTDIR}/openocd-grisp2.cfg" \
-c "init" \
-c "reset halt" \
"$@"
| true
|
10cac573eb6b396486cb4e97c9c16a45172a66dd
|
Shell
|
krwmilz/citrun
|
/tt/ccitrunrun.sh
|
UTF-8
| 1,852
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh -u
#
# Test that citrun run on itself works and the resulting binaries run.
#
exit 0
if [ `uname` = "OpenBSD" ]; then
rm -rf /usr/ports/devel/ccitrunrun
cp -R distrib/openbsd/ccitrunrun /usr/ports/devel/
fi
. tt/package.subr "devel/ccitrunrun"
plan 13
enter_tmpdir
export NO_CHECKSUM=1
pkg_check_deps
pkg_clean
pkg_build
cat <<EOF > check.good
Summary:
24 Source files used as input
2 Application link commands
14 Rewrite parse warnings
24 Rewrite successes
23 Rewritten source compile successes
1 Rewritten source compile failures
Totals:
4907 Lines of source code
222 Function definitions
179 If statements
24 For loops
5 While loops
3 Switch statements
100 Return statement values
985 Call expressions
15568 Total statements
1475 Binary operators
5 Errors rewriting source
EOF
pkg_check
cat <<EOF > tu_list.good
src/demo-atlas.cc 149
src/demo-font.cc 253
src/demo-glstate.cc 153
src/demo-shader.cc 210
src/gl_buffer.cc 192
src/gl_main.cc 216
src/gl_view.cc 526
src/glyphy/glyphy-arcs.cc 321
src/glyphy/glyphy-blob.cc 329
src/glyphy/glyphy-extents.cc 90
src/glyphy/glyphy-outline.cc 328
src/glyphy/glyphy-sdf.cc 92
src/glyphy/glyphy-shaders.cc 40
src/matrix4x4.c 399
src/process_dir.cc 47
src/process_file.cc 177
src/trackball.c 338
EOF
$workdir/src/ccitrunrun-gl < /dev/null
ok "is write_tus.pl exit code 0" \
perl -I$treedir $treedir/tt/write_tus.pl ${CITRUN_PROCDIR}ccitrunrun-gl_*
pkg_check_manifest
cat <<EOF > tu_list.good
src/inst_action.cc 118
src/inst_frontend.cc 262
src/inst_main.cc 145
src/inst_visitor.cc 188
EOF
$workdir/src/ccitrunrun-inst
ok "is write_tus.pl exit code 0" \
perl -I$treedir $treedir/tt/write_tus.pl ${CITRUN_PROCDIR}ccitrunrun-inst*
pkg_check_manifest
pkg_clean
| true
|
fcef3a1f26b20da2989b325fdf0131fa8eb01346
|
Shell
|
BM1880-BIRD/bm1880-system-sdk
|
/ramdisk/target/overlay/bm1880/etc/step3.sh
|
UTF-8
| 1,673
| 3.3125
| 3
|
[] |
no_license
|
cd /data
DATA_FREE_SPACE=`df -k /data | tail -n1|awk '{print $4}'`
DATA_FREE_SPACE_MB=`expr $DATA_FREE_SPACE / 1024`
echo "Data partition free space(MB):"
echo $DATA_FREE_SPACE_MB
EMMC_TAR_GZ_SIZE_MB=`du -m emmc.tar.gz | awk '{print $1}'`
echo "emmc.tar.gz size(MB)"
echo $EMMC_TAR_GZ_SIZE_MB
DATA_NEED_SPACE_MB=`expr $EMMC_TAR_GZ_SIZE_MB \* 2`
echo "Data partition need space(MB)"
echo $DATA_NEED_SPACE_MB
if [ $DATA_NEED_SPACE_MB -lt $DATA_FREE_SPACE_MB ]; then
echo "It is able to upgrade"
else
NEED_SPACE=`expr $DATA_NEED_SPACE_MB - $DATA_FREE_SPACE_MB`
echo "No enough free space, stop upgrade"
echo "Need more free space(MB)"
echo $NEED_SPACE
exit
fi
tar zxvf emmc.tar.gz
cd emmc_nnmc_pkg
ROOTFS_FREE_SPACE=`df -k /rootfs | tail -n1|awk '{print $4}'`
ROOTFS_FREE_SPACE_MB=`expr $ROOTFS_FREE_SPACE / 1024`
echo "rootfs partition free space(MB) :"
echo $ROOTFS_FREE_SPACE_MB
ROOTFS_TAR_GZ_SIZE_MB=`du -m rootfs.tar.gz | awk '{print $1}'`
echo "rootfs.tar.gz size(MB)"
echo $ROOTFS_TAR_GZ_SIZE_MB
ROOTFS_NEED_SPACE_MB=`expr $ROOTFS_TAR_GZ_SIZE_MB \* 2`
echo "rootfs partition need space(MB)"
echo $ROOTFS_NEED_SPACE_MB
if [ $ROOTFS_NEED_SPACE_MB -lt $ROOTFS_FREE_SPACE_MB ]; then
echo "It is able to upgrade"
else
NEED_SPACE=`expr $ROOTFS_NEED_SPACE_MB - $ROOTFS_FREE_SPACE_MB`
echo "No enough free space, stop upgrade rootfs partition"
echo "Need more free space(MB)"
echo $NEED_SPACE
exit
fi
dd if=emmcboot.itb of=/dev/mmcblk0p1
dd if=recovery.itb of=/dev/mmcblk0p2
ls /
mount
cp rootfs.tar.gz /rootfs/
cd /rootfs
tar zxvf rootfs.tar.gz --strip-components=1
rm /rootfs/rootfs.tar.gz
rm -rf /data/emmc.tar.gz /data/emmc_nnmc_pkg
sync
| true
|
45304d16f1f3b54369d0ba824433fc147dc3cc90
|
Shell
|
MatteoGioioso/aws-tools
|
/periodicCostsNotification/deploy.sh
|
UTF-8
| 528
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
export EMAIL=${1}
export STACK_NAME=periodic-costs-notification
export BUCKET=${STACK_NAME}
# make the deployment bucket in case it doesn't exist
aws s3 mb s3://"${BUCKET}"
aws cloudformation package \
--template-file template.yaml \
--output-template-file output.yaml \
--s3-bucket "${BUCKET}"
# the actual deployment step
aws cloudformation deploy \
--template-file output.yaml \
--stack-name "${STACK_NAME}" \
--capabilities CAPABILITY_IAM \
--parameter-overrides \
EMAIL="${EMAIL}"
| true
|
2c0e16701070ebf5282e0d65b24fcb42b803af96
|
Shell
|
IAmBullsaw/liu-utils
|
/scripts/liu-search-person.sh
|
UTF-8
| 1,263
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
function usage() {
echo -e "usage:\n$0 [-flags] name\nExample '$0 [-flags] marco kuhlman'"
}
function help() {
echo "Help for bash/liu-search-person.sh"
echo ""
echo "DESCRIPTION
Fetches given parameters by searching the web with the persons name"
echo "USAGE
`usage`"
echo ""
echo "OPTIONS
-h Print this 'help'
-m Print email"
echo ""
echo "EXAMPLE
$0 -m 'marco kuhlman'
$0 -mt 'marco'"
echo ""
}
### Functions
function search() {
regex=$1
shift
searchterm=$@
url="https://search.liu.se/jellyfish/rest/apps/LiU/searchers/sitecore2_people_only?query=$searchterm&lang=sv"
awk -F"[:\"]" '/'"$regex"'/{printf("%s\n", $5)}' <(curl -L $url 2> /dev/null)
}
### Main program flow
[ $# -gt 0 ] || { usage; exit -1; }
# Get options and call functions accordingly
regex=''
while getopts 'mht' flag; do
case "${flag}" in
h) HELP=true ;;
m) regex+='email_strict|';;
t) regex+='phone_strict|';;
*) { error "Unexpected option $(flag)"; exit -2; } ;;
esac
done
shift $((OPTIND-1)) # Eat options!
# Print help
if [ $HELP ]; then
help;
exit 0;
fi
searchterm=$@
search ${regex::-1} $searchterm
| true
|
869daa0ce0125e0340e1ce7959500ecd261eab90
|
Shell
|
kirinnee/nix-ci
|
/scripts/ci/docker-cd.sh
|
UTF-8
| 2,806
| 3.59375
| 4
|
[] |
no_license
|
#! /bin/sh
# check for necessary env vars
[ "${DOMAIN}" = '' ] && echo "'DOMAIN' env var not set" && exit 1
[ "${GITHUB_REPO_REF}" = '' ] && echo "'GITHUB_REPO_REF' env var not set" && exit 1
[ "${GITHUB_TAG}" = '' ] && echo "'GITHUB_TAG' env var not set" && exit 1
[ "${CI_DOCKER_IMAGE}" = '' ] && echo "'CI_DOCKER_IMAGE' env var not set" && exit 1
[ "${CI_DOCKER_CONTEXT}" = '' ] && echo "'CI_DOCKER_CONTEXT' env var not set" && exit 1
[ "${CI_DOCKERFILE}" = '' ] && echo "'CI_DOCKERFILE' env var not set" && exit 1
[ "${EXTERNAL_IMAGE_NAME}" = '' ] && echo "'EXTERNAL_IMAGE_NAME' env var not set" && exit 1
[ "${GITHUB_DOCKER_PASSWORD}" = '' ] && echo "'GITHUB_DOCKER_PASSWORD' env var not set" && exit 1
[ "${GITHUB_DOCKER_USER}" = '' ] && echo "'GITHUB_DOCKER_USER' env var not set" && exit 1
[ "${EXTERNAL_DOCKER_PASSWORD}" = '' ] && echo "'EXTERNAL_DOCKER_PASSWORD' env var not set" && exit 1
[ "${EXTERNAL_DOCKER_USER}" = '' ] && echo "'EXTERNAL_DOCKER_USER' env var not set" && exit 1
# Login to GitHub Registry
echo "${GITHUB_DOCKER_PASSWORD}" | docker login docker.pkg.github.com -u "${GITHUB_DOCKER_USER}" --password-stdin
# Obtain image
IMAGE_ID="${DOMAIN}/${GITHUB_REPO_REF}/${CI_DOCKER_IMAGE}"
IMAGE_ID=$(echo "${IMAGE_ID}" | tr '[:upper:]' '[:lower:]') # convert to lower case
# Fix Tag
# shellcheck disable=SC2016
GITHUB_TAG="$(echo "${GITHUB_TAG}" | sd '^.*/v([^/]*)$' '$1')"
# Generate image references
LATEST_IMAGE_REF="${IMAGE_ID}:latest"
CACHED_IMAGE_REF="${IMAGE_ID}:cache"
TAGGED_IMAGE_REF="${IMAGE_ID}:${GITHUB_TAG}"
EXTERNAL_TAGGED_IMAGE_REF="${EXTERNAL_IMAGE_NAME}:${GITHUB_TAG}"
EXTERNAL_LATEST_IMAGE_REF="${EXTERNAL_IMAGE_NAME}:latest"
# Print out reference for debug
echo Latest Image Ref: "${LATEST_IMAGE_REF}"
echo Cached Image Ref: "${CACHED_IMAGE_REF}"
echo Tagged Image Ref: "${TAGGED_IMAGE_REF}"
echo
echo ==============================
echo
echo External Tagged Image Ref: "${EXTERNAL_TAGGED_IMAGE_REF}"
echo External Latest Image Ref: "${EXTERNAL_LATEST_IMAGE_REF}"
# pull cache and tag it as cache
docker pull "${LATEST_IMAGE_REF}" || true
docker tag "${LATEST_IMAGE_REF}" "${CACHED_IMAGE_REF}" || true
# build image
docker build "${CI_DOCKER_CONTEXT}" -f "${CI_DOCKERFILE}" --tag "${CI_DOCKER_IMAGE}" --cache-from="${CACHED_IMAGE_REF}"
# tag built images
docker tag "${CI_DOCKER_IMAGE}" "${TAGGED_IMAGE_REF}"
docker tag "${CI_DOCKER_IMAGE}" "${EXTERNAL_TAGGED_IMAGE_REF}"
docker tag "${CI_DOCKER_IMAGE}" "${EXTERNAL_LATEST_IMAGE_REF}"
# docker push to Github Registry
docker push "${TAGGED_IMAGE_REF}"
# Login to External registry
echo "${EXTERNAL_DOCKER_PASSWORD}" | docker login -u "${EXTERNAL_DOCKER_USER}" --password-stdin
# docker push to external repository
docker push "${EXTERNAL_TAGGED_IMAGE_REF}"
docker push "${EXTERNAL_LATEST_IMAGE_REF}"
| true
|
3baa3d3f847b269f398ebc44fd4eea6df27de5a3
|
Shell
|
onzyone/k8s-tilt
|
/charts/stable/ambassador-chart/ci/providers/kubernaut.sh
|
UTF-8
| 1,577
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; }
TOP_DIR="$CURR_DIR/../.."
# shellcheck source=../common.sh
source "$CURR_DIR/../common.sh"
#########################################################################################
CLAIM_NAME="ambassador-chart-${USER}-$(uuidgen)"
CLAIM_FILENAME="$HOME/kubernaut-claim.txt"
DEV_KUBECONFIG="$HOME/.kube/${CLAIM_NAME}.yaml"
KUBERNAUT_CONF="$CURR_DIR/kconf.b64"
#########################################################################################
[ -f "$KUBERNAUT_CONF" ] || abort "no kubernaut conf file found at $KUBERNAUT_CONF"
case $1 in
setup)
info "Creating kubernaut config..."
base64 -d < "$KUBERNAUT_CONF" | ( cd ~ ; tar xzf - )
echo "$CLAIM_NAME" > "$CLAIM_FILENAME"
;;
create)
info "Removing any previous claim for $CLAIM_NAME..."
kubernaut claims delete "$CLAIM_NAME"
info "Creating a kubernaut cluster for $CLAIM_NAME..."
kubernaut claims create --name "$CLAIM_NAME" --cluster-group main || abort "could not claim $CLAIM_NAME"
info "Doing a quick sanity check on that cluster..."
kubectl --kubeconfig "$DEV_KUBECONFIG" -n default get service kubernetes || \
abort "kubernaut was not able to create a valid kubernetes cluster"
info "kubernaut cluster created"
;;
delete)
info "Releasing kubernaut claim..."
kubernaut claims delete "$(cat $CLAIM_FILENAME)"
;;
get-kubeconfig)
echo "$HOME/.kube/$(cat $CLAIM_FILENAME).yaml"
;;
esac
| true
|
50fe234c9f938dfa19d2820ac4a9107f583053b5
|
Shell
|
zorangagic/awsinfo
|
/scripts/commands/ec2/security-groups.bash
|
UTF-8
| 998
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
FILTER_QUERY=""
PERMISSIONS_QUERY=""
PERMISSION="{\"P1.FromPort\":FromPort||'',\"P2.ToPort\":ToPort||'',\"P3.IpProtocol\":IpProtocol||'',\"P4.Ipv4Ranges\":IpRanges[].CidrIp, \"P5.Ipv6Ranges\":Ipv6Ranges[].CidrIp,\"P6.UserIdGroupPairs\":UserIdGroupPairs}"
while getopts "p" opt;
do
case "$opt" in
p) PERMISSIONS_QUERY="\"6.Permissions\":{In:IpPermissions[].$PERMISSION,Out:IpPermissionsEgress[].$PERMISSION}" ;;
esac
done
shift $(($OPTIND-1))
if [[ $# -gt 0 ]]; then
FILTER_NAME+=$(filter_query "$TAG_NAME" $@)
FILTER_ID+=$(filter_query "GroupId" $@)
FILTER_GROUP_NAME+=$(filter_query "GroupName" $@)
FILTER_VPC_ID+=$(filter_query "VpcId" $@)
FILTER_QUERY="?$(join "||" $FILTER_NAME $FILTER_ID $FILTER_GROUP_NAME $FILTER_VPC_ID)"
fi
awscli ec2 describe-security-groups --output table --query "SecurityGroups[$FILTER_QUERY].{\"1.Name\":$TAG_NAME,\"2.GroupId\":GroupId,\"3.GroupName\":GroupName,\"4.VpcId\":VpcId,\"5.Description\":Description$PERMISSIONS_QUERY}"
| true
|
269ba945fdd2da7c292e00549209a02536ae3dea
|
Shell
|
scil/ansible-ambari-manager
|
/scripts/python/async-python-command.sh
|
UTF-8
| 789
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
: ${PYTHON_COMMAND_FILE:?"Please set the PYTHON_COMMAND_FILE variable!"}
: ${PYTHON_COMMAND_FILE_FOLDER:?"Please set the PYTHON_COMMAND_FILE_FOLDER variable!"}
: ${PYTHON_COMMAND_FILE_PARAMS:?"Please set the PYTHON_COMMAND_FILE_PARAMS variable!"}
: ${PYTHON_COMMAND_PID_FILENAME:?"Please set the PYTHON_COMMAND_PID_FILENAME variable!"}
: ${PYTHON_COMMAND_PID_FOLDER:?"Please set the PYTHON_COMMAND_PID_FOLDER variable!"}
cd $PYTHON_COMMAND_FILE_FOLDER
echo "Run: nohup /usr/bin/python $PYTHON_COMMAND_FILE $PYTHON_COMMAND_FILE_PARAMS > /dev/null 2>&1 & echo $! > $PYTHON_COMMAND_PID_FOLDER/$PYTHON_COMMAND_PID_FILENAME"
nohup /usr/bin/python $PYTHON_COMMAND_FILE $PYTHON_COMMAND_FILE_PARAMS > /dev/null 2>&1 & echo $! > $PYTHON_COMMAND_PID_FOLDER/$PYTHON_COMMAND_PID_FILENAME
| true
|
f7dda43e409a8e536036fd8beb6a6aa2f7c4c0bf
|
Shell
|
cloudstead/cloudos
|
/dev_bootstrap_ubuntu.sh
|
UTF-8
| 2,359
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
function die () {
echo 2>&1 "${1}"
exit 1
}
if [ $(whoami) != "root" ] ; then
sudo $0 $@ || die "Not run as root or cannot sudo to root"
exit $?
fi
apt-get install -y memcached redis-server postgresql daemon unzip npm
# Create dev/test databases and users, set passwords
for user in $(whoami) cloudos cloudos_dns wizard_form ${CLOUDSTEAD_ADDITIONAL_DBS} ; do
sudo -u postgres -H createuser ${user}
for name in ${user} ${user}_test ; do
sudo -u postgres -H createdb ${name}
done
sudo -u postgres -H bash -c "echo \"alter user ${user} with password '${user}'\" | psql -U postgres"
done
# Set passwords for db users (needed to generate schemas).
# If you changed the password generation above, then update the values below to match your passwords
echo "export CLOUDOS_DB_PASS=cloudos" >> ~/.cloudos-test.env
echo "export CLOUDOS_DNS_DB_PASS=cloudos_dns" >> ~/.cloudos-dns-test.env
KESTREL_HOME=/usr/local/kestrel
sudo useradd -d ${KESTREL_HOME} -s /usr/sbin/nologin kestrel
for dir in /usr/local /var/log /var/run /var/spool ; do
sudo mkdir -p ${dir}/kestrel
done
wget -O /tmp/kestrel-2.4.1.zip http://robey.github.com/kestrel/download/kestrel-2.4.1.zip
cd ${KESTREL_HOME} && \
sudo rm -rf ./* && \
sudo unzip /tmp/kestrel-2.4.1.zip && \
sudo ln -s kestrel-2.4.1 current && \
sudo chmod +x current/scripts/* && \
sudo mkdir -p ${KESTREL_HOME}/logs && \
sudo mkdir -p ${KESTREL_HOME}/target && \
cd ${KESTREL_HOME}/target && \
KESTREL_JAR=$(find ../current/ -type f -name "kestrel*.jar" | grep -v javadoc | grep -v sources | grep -v test) && \
if [ -z ${KESTREL_JAR} ] ; then
echo "Kestrel jar not found"
exit 1
fi && \
sudo ln -s ${KESTREL_JAR} kestrel-$(basename ${KESTREL_JAR}) && \
sudo ln -s ../current/config && \
sudo chown -R kestrel ${KESTREL_HOME} && \
echo "Kestrel successfully installed." && \
sudo _JAVA_OPTIONS=-Djava.net.preferIPv4Stack=true ${KESTREL_HOME}/current/scripts/devel.sh & \
sleep 2s && echo "Kestrel successfully started: $(ps auxwww | grep kestrel_ | grep -v grep)"
# Install bcrypt
npm install -g bcryptjs
BCRYPT=/usr/local/lib/node_modules/bcryptjs/bin/bcrypt
TMP=$(mktemp /tmp/bcrypt.XXXXXX) || die "Error creating temp file"
cat ${BCRYPT} | tr -d '\r' > ${TMP}
cat ${TMP} > ${BCRYPT}
chmod a+rx ${BCRYPT}
ln -s ${BCRYPT} /usr/local/bin/bcrypt
| true
|
1c2b53bd24883b3beef623c177959821c0aa7258
|
Shell
|
mosszhaodphil/QUASAR_ANALSER
|
/estimate_cbf_quasar.sh
|
UTF-8
| 2,573
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
# Assign arguments
file_data=$1 # ASL tag - control file
file_option=$2 # Model based analysis options file
file_aif_option=$3 # Model free analysis AIF options file
echo "Begin model based analysis..."
home_dir=`pwd`
# Get current timestamp as output directory name
current_time=$(date +%H%M%S)
out_dir_mbased="$current_time"_mbased
out_cbf_mbased=perfusion_"$current_time"_mbased
out_abv_mbased=abv_"$current_time"_mbased
out_phi_mbased=phi_"$current_time"_mbased
out_the_mbased=the_"$current_time"_mbased
# Model fitting
fabber --data=$file_data --data-order=singlefile --output=$out_dir_mbased -@ $file_option
# Rescale CBF to ml/100g/min unit
fslmaths $out_dir_mbased/mean_ftiss -mul 6000 $out_cbf_mbased
# Rescale ABV to percentage
fslmaths $out_dir_mbased/mean_fblood -mul 100 $out_abv_mbased
# Rescale estimated flow suppression angles
fslmaths $out_dir_mbased/mean_phiblood -mul 1000000 $out_phi_mbased
fslmaths $out_dir_mbased/mean_thblood -mul 1000000 $out_the_mbased
echo "Model based analysis finished."
echo "Begin model free analysis"
out_dir_mfree="$current_time"_mfree
out_cbf_mfree=perfusion_"$current_time"_mfree
# Split files
mkdir $out_dir_mfree
cd $out_dir_mfree
# Split QUASAR ASL (differenced) sequence into six phases
asl_file --data=$home_dir/$file_data --ntis=6 --ibf=tis --iaf=diff --split=asldata_ph
# 3rd and 6th phases are noncrushed signal
fslmaths asldata_ph002 -add asldata_ph005 -mul 0.5 asl_noncrush
# 1st, 2nd, 4th, and 5th phases are crushed signals
# Average them to get tissue signal
fslmaths asldata_ph000 -add asldata_ph001 -add asldata_ph003 -add asldata_ph004 -mul 0.25 asl_tissue
# Blood signal = noncruseshed - tissue
fslmaths asl_noncrush -sub asl_tissue asl_blood
# Inference AIF
fabber --data=asl_blood --mask=$home_dir/mask --output=aif --data-order=singlefile -@ $home_dir/$file_aif_option
# Get AIF shape
fslmaths aif_latest/modelfit -div aif_latest/mean_fblood aif_shape
# Model free analysis (deconvolution) without arrival time correction
# asl_mfree --data=$home_dir/signal_tissue --mask=$home_dir/mask --out=modfree --aif=$home_dir/signal_aif --dt=0.3
# asl_mfree --data=$home_dir/asl_tissue --mask=$home_dir/mask --out=modfree --aif=$home_dirsignal_aif --dt=0.3
asl_mfree --data=asl_tissue --mask=$home_dir/mask --out=modfree --aif=aif_shape --dt=0.3
cd $home_dir
# Rescaling
#fslmaths $out_dir_mfree/modfree_magntiude -mul 6000 -div 2 -div 0.9 $out_cbf_mfree
fslmaths $out_dir_mfree/modfree_magntiude -mul 6000 $out_cbf_mfree
echo "Model free analysis finished."
echo "Complete"
| true
|
1967ca8ec868cbb8f657c440148279abd363abad
|
Shell
|
Dejvino/pinephone-arch-install
|
/00-sanity_check.sh
|
UTF-8
| 2,981
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Installation Scripts Sanity Check
#
# Checks the current environment before doing any real work.
#
echo "Sanity check starting..."
[ -f common.sh ] \
&& echo "common.sh ... found" \
|| { echo "File 'common.sh' not found in this directory. Change directory and try again." ; exit 1; }
source common.sh
printInfo
printInfo "Checking the environment:"
FREE_SPACE=`df -P . | tail -1 | awk '{print $4}'`
[ $FREE_SPACE -gt 4000000 ] \
&& printInfo "free disk space > 4 GB ... OK" \
|| failure "Not enough disk space. Have at least 4 GB."
ping -c 1 github.com >> /dev/null
[ $? -eq 0 ] \
&& printInfo "Internet connection ... OK" \
|| failure "Internet connection failed."
rm -rf test.txt && touch test.txt && rm test.txt
[ $? -eq 0 ] \
&& printInfo "Local directory Read/Write permissions ... OK" \
|| failure "Could not read or write to this directory. Check your permissions."
[ -e $SD_CARD_DEVICE ] \
&& printInfo "SD Card device '$SD_CARD_DEVICE' exists ... OK" \
|| failure "SD Card device '$SD_CARD_DEVICE' not found. Check that it is connected or update the config.sh file."
if mount | grep $SD_CARD_ROOT > /dev/null; then
failure "SD Card root partition '$SD_CARD_ROOT' is mounted. Unmount it first!"
fi
if mount | grep $SD_CARD_BOOT > /dev/null; then
failure "SD Card boot partition '$SD_CARD_BOOT' is mounted. Unmount it first!"
fi
printInfo
printInfo "Checking commands:"
command -v bash >> /dev/null \
&& printInfo "bash exists ... OK" \
|| failure "bash not found."
command -v dd >> /dev/null \
&& printInfo "dd exists ... OK" \
|| failure "dd not found. All hope is lost."
command -v blockdev >> /dev/null \
&& printInfo "blockdev exists ... OK" \
|| failure "blockdev not found. All hope is lost."
command -v mkimage >> /dev/null \
&& printInfo "mkimage exists ... OK" \
|| failure "mkimage not found. Install uboot-tools package."
command -v sudo >> /dev/null \
&& printInfo "sudo exists ... OK" \
|| failure "sudo not found. Install sudo package."
command -v sync >> /dev/null \
&& printInfo "sync exists ... OK" \
|| failure "sync not found."
command -v lsblk >> /dev/null \
&& printInfo "lsblk exists ... OK" \
|| failure "lsblk not found."
command -v bsdtar >> /dev/null \
&& printInfo "bsdtar exists ... OK" \
|| failure "bsdtar not found."
command -v wget >> /dev/null \
&& printInfo "wget exists ... OK" \
|| failure "wget not found. Install wget package."
command -v mkfs.vfat >> /dev/null \
&& printInfo "mkfs.vfat exists ... OK" \
|| failure "mkfs.vfat not found. Install dosfstools package."
command -v mkfs.f2fs >> /dev/null \
&& printInfo "mkfs.f2fs exists ... OK" \
|| failure "mkfs.f2fs not found. Install f2fs-tools package."
printInfo
printInfo "All tests passed. You're ready to go!"
stageFinished "Execute the first script (starting with '01') and follow the instructions."
| true
|
a5679ab08cf43db701e59f61e6c59ce30b972a2c
|
Shell
|
jmrolsson/SUSY_EWK_Truth
|
/scripts/make_filelists.sh
|
UTF-8
| 927
| 2.625
| 3
|
[] |
no_license
|
# Truth studies for RunII SUSY EWK
# Script for creating separate file lists for each sample
# Joakim Olsson <joakim.olsson@cern.ch>
# 2017-02-07
## February truth samples
#jo_version=feb2017
#tag=20170204_30k_1
## New truth samples, same generator settings as the "February samples" just different mass points, in order to validate the job options (JOs) for the official request
#jo_version=march2017
#tag=20170506
#tag=20170514
#tag=20170516
## Testing official Wh JOs (https://its.cern.ch/jira/browse/ATLMCPROD-4979)
jo_version=aug2017
tag=20170814
pattern=/share/t3data2/jolsson/EWK/ewk_truth_xaod/*TRUTH1.${tag}*/*.root
for path in $(ls ${pattern})
do
dir=$(dirname "${path}")
file=$(basename "${path}")
if [ -d $dir ]; then
sample_tag=$(echo ${dir} | sed -r "s/^.*user.*A14N23LO_//g" | sed -r "s/\.TRUTH.*//g")
echo $path >> $ROOTCOREBIN/../SUSY_EWK_Truth/filelists/mc15_13TeV_${sample_tag}_${jo_version}.txt;
fi
done;
| true
|
5d3353636825aede8a6331f9bd77a78bbe78b2da
|
Shell
|
uc-cdis/ohif-orthanc-app-example
|
/orthanc/entrypoint.sh
|
UTF-8
| 373
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
while ! psql -h postgres -U postgres orthanc -c 'select now()'; do sleep 1; done
COUNT=0
#
# in gen3 /data/ presents as a mount point,
# but in docker-compose it just looks like a folder
#
while ! mount | grep '/data/' && [[ "$COUNT" -lt 20 ]]; do
sleep 1
COUNT=$((COUNT + 1))
done
if [[ ! -d /data/ ]]; then
echo "WARNING: no /data/?"
fi
Orthanc "$@"
| true
|
b70b4e442a12dc48bc0e24b0d76a2a48d7f172b5
|
Shell
|
sergey-platonov/electrol
|
/earth_bg.sh
|
UTF-8
| 2,796
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
resolution="1920x1080"
monitor=0
data_dir=$HOME/.local/share/electrol/
de="kde"
usage=\
"Usage: $(basename $0) [-d <pictures dir>] [-r <resolution>] [-m <monitor>] [-e <desktop environment>]\n\
\tscript updates XFCE wallpaper on specified monitro\n
params:\n
\tpictures dir\t - directory where images are downloaded (default value: $HOME/.local/share/electrol/)\n\
\tresolution\t\t - resolution to which image is beeing resized (default value: 1920x1080)\n\
\tmonitor\t\t - number of monitor (default value: 0)\n
\tdesktop environment\t - desktop environment, supported values are kde, xfce (default)"
while getopts "hr:m:d:e:" opt; do
case $opt in
h)
echo $usage
exit 1
;;
r)
resolution=$OPTARG
;;
m)
monitor=$OPTARG
;;
d)
data_dir=$OPTARG
;;
e)
de=$OPTARG
;;
\?)
echo $usage
exit
;;
esac
done
export LC_ALL=en_US.UTF-8
# ELEKTRO-L server
ftp_site=ftp://electro:electro@ntsomz.gptl.ru:2121/ELECTRO_L_2/
# generating date information
year=`date +%Y`
year00=`date +%y`
month=`date +%B`
month00=`date +%m`
day=`date +%d`
hour=`date +%H -d "25 min ago"`
minute=`date +%M -d "25 min ago"`
# minutes can be only only 00 or 30
if [ $minute -ge 30 ]; then
minute="30"
else
minute="00"
fi
# generate file name
file_name=$year00$month00$day"_"$hour$minute"_original_RGB.jpg"
# generate url of image
image_url=$ftp_site/$year/$month/$day/$hour$minute/$file_name
if [ ! -d "$data_dir" ]; then
mkdir $data_dir
fi
cd $data_dir
# download image
background="$year00$month00$day$hour$minute.jpg"
echo $background
wget $image_url -O $file_name
# add time and resize
if [ ! -s "$background" ]; then
convert -font Courier $file_name -pointsize 200 -draw "gravity SouthWest fill grey text 0,0 '$hour:$minute $day.$month00.$year' " -resize $resolution $background
fi
# update background
if [ "x$de" == "xxfce" ]; then
xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor$monitor/image-path -s $data_dir/$background
fi
if [ "x$de" == "xkde" ]; then
script_string="string:
var Desktops = desktops();
for (i=0;i<Desktops.length;i++) {
d = Desktops[i];
d.wallpaperPlugin = \"org.kde.image\";
d.currentConfigGroup = Array(\"Wallpaper\",
\"org.kde.image\",
\"General\");
d.writeConfig(\"Image\", \"file://$data_dir/$background\");
}"
dbus-send --session --dest=org.kde.plasmashell --type=method_call /PlasmaShell org.kde.PlasmaShell.evaluateScript "$script_string"
fi
# remove original image (~5M)
rm -f $file_name
| true
|
dbfe8c1d1e9a290a573b12d9140758c1f0e7b046
|
Shell
|
magurotuna/dotfiles
|
/install.sh
|
UTF-8
| 1,753
| 4.625
| 5
|
[] |
no_license
|
#!/bin/sh
# You have to execute this script ONLY ONCE.
# This script does the following things:
# 1. Fetch this repo
# 2. Install basic dependencies
set -eu
DOTPATH=${HOME}/dotfiles
REPO_URL=https://github.com/magurotuna/dotfiles.git
REPO_TARBALL=https://github.com/magurotuna/dotfiles/archive/master.tar.gz
function has() {
type "${1:?too few arguments}" &>/dev/null
}
# If DOTPATH already exists, remove this.
if [ -d ${DOTPATH} ]; then
cd ${HOME}
rm -rf ${DOTPATH}
fi
echo "Start fetching..."
if has "git"; then
git clone --recursive ${REPO_URL} ${DOTPATH}
elif has "curl"; then
curl -L ${REPO_TARBALL} | tar zxv
mv -f dotfiles-master ${DOTPATH}
elif has "wget"; then
wget -O - ${REPO_TARBALL} | tar zxv
mv -f dotfiles-master ${DOTPATH}
else
echo "At least one of git, curl, wget is required."
exit 1
fi
echo "fetch done."
if [[ $OSTYPE == "linux*" ]]; then
if has "yum" || has "apt"; then
DEPS=$(cat ${DOTPATH}/basic_deps.txt | tr "\n" " ")
if has "yum"; then
if ! has "sudo"; then
yum update -y && yum install -y sudo
fi
echo "Install basic dependencies by using yum..."
sudo yum update -y && sudo yum install -y ${DEPS}
elif has "apt"; then
if ! has "sudo"; then
apt update -y && apt install -y sudo
fi
echo "Install basic dependencies by using apt..."
sudo apt update -y && sudo apt install -y ${DEPS}
else
echo "Neither yum nor apt is installed, so deps installation skips."
fi
fi
fi
echo "Installation successfully finished!"
echo "Next step:"
echo "$ cd ~/dotfiles"
echo "$ make deploy"
echo "$ make init"
| true
|
123d0977765931e55d25b1256dbdd9d83b3ac20d
|
Shell
|
goober2186/sm
|
/functions/getops_check.sh
|
UTF-8
| 775
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# Check flag item variables to make sure that they are set
getopts_validate() {
if [ -z "$HOST" ] || [ -z "$TOKEN" ] || [ -z "$FILENAME" ]; then
if [ -z "$HOST" ]; then
echo "Missing -h <closedstack url>"
echo "summon.sh -h <url> -t <closedstack api token> -f <path to server list file>"
exit_abnormal
elif [ -z "$TOKEN" ]; then
echo "Missing -t <token>"
echo "summon.sh -h <url> -t <closedstack api token> -f <path to server list file>"
exit_abnormal
elif [ -z "$FILENAME" ]; then
echo "Missing -f <path to server list file>"
echo "summon.sh -h <url> -t <closedstack api token> -f <path to server list file>"
exit_abnormal
fi
fi
}
# Exits on getops error
exit_abnormal() {
exit 1
}
| true
|
5a1200909609f64b481c624e7c1d37f776c75feb
|
Shell
|
micha3lbrown/.dotfiles
|
/zsh/functions/confirm.zsh
|
UTF-8
| 230
| 3.421875
| 3
|
[] |
no_license
|
confirm()
{
local resp
while true
do
echo -n "$* [y/n]"
read resp
case $resp
in
[yY])
return 0
;;
[nN])
return 1
;;
*)
echo "Please enter y or n"
esac
done
}
| true
|
18fb28691d534e2e2b84e184954e91341836cb2a
|
Shell
|
xmorera/SolrNet
|
/pack.sh
|
UTF-8
| 1,625
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env nix-shell
#! nix-shell -i sh
set -e
set -o pipefail
dotnet tool restore
version_js="$(dotnet tool run nbgv get-version --format json)"
export Version=$(echo $version_js | jq -r '.AssemblyInformationalVersion')
echo Tag: $Version
export PackageVersion=$(echo $version_js | jq -r '.NuGetPackageVersion')
public_tag=$(git tag --points-at HEAD)
if [ -n "$public_tag" ]; then
export PackageVersion=$(echo $version_js | jq -r '.SimpleVersion')
echo "Public release $public_tag"
fi
dotnet pack
if [ -n "$GITHUB_TOKEN" ]; then
dotnet nuget add source --username mausch --password "$GITHUB_TOKEN" --store-password-in-clear-text --name github "https://nuget.pkg.github.com/SolrNet/index.json"
for nupkg in "$(find | rg nupkg)"; do
echo "Publishing $nupkg to github"
dotnet nuget push --source "github" $nupkg --skip-duplicate &
done
wait
if [ -n "$GITHUB_ENV" ]; then
echo "VersionTag=$Version" >> $GITHUB_ENV # for github actions
fi
else
echo "GITHUB_TOKEN not defined, won't push packages to github"
fi
if [ -n "$NUGET_API_KEY" ]; then
for nupkg in "$(find | rg '[^s]nupkg')"; do
echo "Publishing $nupkg to nuget.org"
dotnet nuget push $nupkg --api-key $NUGET_API_KEY --source https://api.nuget.org/v3/index.json --skip-duplicate &
done
wait
for nupkg in "$(find | rg nupkg)"; do
echo "Publishing $nupkg to nuget.org"
dotnet nuget push $nupkg --api-key $NUGET_API_KEY --source https://api.nuget.org/v3/index.json --skip-duplicate &
done
wait
else
echo "NUGET_API_KEY not defined, won't push packages to nuget.org"
fi
echo $version_js
echo Tag: $Version
| true
|
c2df705dad424b28c91889c320e192bdf89cb69c
|
Shell
|
Sid2-1971/spaghetti-scripts
|
/issue_brander/addip2issue
|
UTF-8
| 321
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
# Instalar en /etc/network/if-up.d/addip2issue
if [ "$METHOD" = loopback ]; then
exit 0
fi
# Only run from ifup.
if [ "$MODE" != start ]; then
exit 0
fi
cat /etc/issue-original > /etc/issue
/usr/local/bin/get-ip-address >> /etc/issue
echo "" >> /etc/issue
| true
|
b0b2895029e2740b4f2f5444e371945d775f09e6
|
Shell
|
duaraghav8/solidityparser
|
/scripts/generate-solidity-parser.sh
|
UTF-8
| 413
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# This setup assumes you have antlr4 installed on your system
# as described at https://github.com/antlr/antlr4/blob/master/doc/getting-started.md
java \
-Xmx500M \
-cp "/usr/local/lib/antlr-4.8-complete.jar:$CLASSPATH" \
org.antlr.v4.Tool \
-Dlanguage=Go solidity-antlr4/Solidity.g4 \
-package solidityparser \
-o lib
mv lib/solidity-antlr4/* .
# Cleanup
rm -rf lib
| true
|
c5995e57ecbcf0d852410d83f44391e07b2d022e
|
Shell
|
rmtrane/ccdprobs_771
|
/scripts/shell/find_project_folder.sh
|
UTF-8
| 303
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
path=$(pwd)
found="F"
while [ $path != / ] && [ $found != T ]; do
tmp=$(find "$path" -maxdepth 1 -mindepth 1 -name ".project_folder")
n_found=$(echo $tmp | wc -w)
if [ $n_found -gt 0 ]; then
found="T"
else
path="$(dirname "$path")"
fi
done
echo "$path"
| true
|
f7460b8e00aa614605de513014ff4b87d62fb6e2
|
Shell
|
ncuesta/tesis-md
|
/update-readme.sh
|
UTF-8
| 363
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# Actualiza el README con los archivos que tenga el proyecto, para facilitar su acceso.
REPO_URL="`git remote -v | grep fetch | tr "\t" ' ' | tr -s ' ' | cut -d ' ' -f 2 | sed 's/\.git$//'`/blob/master"
echo -e "# Tesina\n\nArchivos:\n" > README.md
for file in `find * -type f -name '*.md'`; do
echo "* [$file]($REPO_URL/$file)" >> README.md
done
| true
|
f1ce261ce654222ca80476e5517e0189281a4b66
|
Shell
|
drnic/cf-genesis-kit
|
/hooks/subkit
|
UTF-8
| 451
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
for kit in $@; do
if [[ $kit == "shield" ]]; then
SHIELD_ENABLED=1
fi
if [[ $kit == "blobstore-webdav" ]]; then
BOSH_MANAGED_BLOBSTORE=1
fi
if [[ $kit == "db-internal-postgres" ]]; then
BOSH_MANAGED_POSTGRES=1
fi
echo $kit
done
if [[ $BOSH_MANAGED_POSTGRES == 1 && $SHIELD_ENABLED == 1 ]]; then
echo shield-dbs
fi
if [[ $BOSH_MANAGED_BLOBSTORE == 1 && $SHIELD_ENABLED == 1 ]]; then
echo shield-blobstore
fi
| true
|
8a9a222ad1fa839d1372b3ca6f65326c90bd5b22
|
Shell
|
GsDevKit/GsDevKit_home
|
/bin/private/gsDevKitTodeCommandLine
|
UTF-8
| 579
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
#=========================================================================
# Copyright (c) 2015, 2016 GemTalk Systems, LLC <dhenrich@gemtalksystems.com>.
#
# MIT license: https://github.com/GsDevKit/GsDevKit_home/blob/master/license.txt
#=========================================================================
# forward all command invocations to devKitCommandLine .. preserve unique
# identity of gsDevKitTodeCommandLine to make it clear when reading scripts
# that the callers of this script expect tODE to be installed in the server
"$GS_HOME/bin/devKitCommandLine" $*
| true
|
e0aa0a2d6b9c315224063c364d2bb56e838fdcdc
|
Shell
|
Karkanius/MIECT
|
/Ano_2/LFA/antlr4-bin-v5/bin/antlr4-run
|
UTF-8
| 344
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
IFS=$'\n'
count=`find . -name \*Main.class | wc -l`
if ((count == 0)); then
echo "Main file not found!"
exit 1
elif ((count > 1)); then
echo "Too many main files!"
find . -name \*Main.java -printf " %p\n"
exit 1
fi
main=`find . -name \*Main.class | sed 's/.class//g' | sed 's/^.\///1'`
cat $* | java -ea $main
| true
|
e2ddcb856961b07268893b1d628038b47f11ca55
|
Shell
|
FauxFaux/debian-control
|
/c/cvsd/cvsd_1.0.24_amd64/postinst
|
UTF-8
| 9,727
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
# source debconf library.
. /usr/share/debconf/confmodule
# location of the configfile
configfile="/etc/cvsd/cvsd.conf"
# location of the file that will be edited
# only when editing is complete will $configfile be overwritten
tmpconfigfile=`mktemp`
# location of the file that will be used temporarily
tmpfile=`mktemp`
# create cvsd group if it's not there yet
getent group cvsd >/dev/null || \
addgroup --system cvsd
# create cvsd user if it's not there yet
getent passwd cvsd >/dev/null || \
adduser --system --ingroup cvsd --home /var/lib/cvsd cvsd
# correct settings for cvsd user
(getent passwd cvsd | grep -q "^cvsd:.*:cvs pserver daemon:") || usermod -c "cvs pserver daemon" cvsd
gid=`getent group cvsd | sed 's/^cvsd:[^:]*:\([0-9]*\):.*$/\1/'`
(getent passwd cvsd | grep -q "^cvsd:.*:$gid:") || usermod -g "cvsd" cvsd
(getent passwd cvsd | grep -q ":/bin/false$") || usermod -s "/bin/false" cvsd
# sed expressions to escape character so vars may be used in regexps
RE_ESCAPE="s/[\\\\\/.+*()\[^$]/\\\\&/g"
RP_ESCAPE="s/[\\\\\/&]/\\\\&/g"
# function to do modification of an option
# modifications are to $tmpconfigfile
thingamagiggy () {
setting="$1"
value="$2"
value_re=`echo "$value" | sed "$RE_ESCAPE"`
value_rp=`echo "$value" | sed "$RP_ESCAPE"`
# if config already contains this setting we're done
grep \
'^[[:space:]]*'"$setting"'[[:space:]][[:space:]]*'"$value_re"'[[:space:]]*$' \
< "$tmpconfigfile" > /dev/null 2>&1 \
&& return 0
# check if a setting should be modified or added
if grep \
'^[[:space:]]*'"$setting"'[[:space:]][[:space:]]*[^[:space:]]*[[:space:]]*$' \
< "$tmpconfigfile" > /dev/null 2>&1
then
# setting is present with another value, modify
sed \
's/^\([[:space:]]*'"$setting"'[[:space:]][[:space:]]*\)[^[:space:]]*\([[:space:]]*\)$/\1'"$value_rp"'\2/' \
< "$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
else
# setting is not yet present, add
echo "$setting $value" >> "$tmpconfigfile"
fi
}
# install a new configfile if there is no existing one or if it
# is an old generated one, otherwise modify current configfile
if ! [ -e "$configfile" ]
then
# create a new configuration file
cat > "$tmpconfigfile" << EOM
# This is the configuration file for cvsd.
# See the manual page cvsd.conf(5) for more information.
#
# You can also use 'dpkg-reconfigure cvsd' to modify these
# settings.
#
# See the "Password authentication server"
# section in the cvs texinfo for more information
# about running a pserver.
# RootJail <path>
# This is the location of the chroot jail
# cvs should be run in.
# Specify 'none' (without quotes) to not use
# a chroot jail.
# This directory should be initialized with
# the cvsd-buildroot script.
RootJail /var/lib/cvsd
# Uid <uid>
# This specifies which user id cvs should be
# run as. This can be a numerical id or
# a symbolic value.
Uid cvsd
# Gid <gid>
# This specifies which group id cvs should be
# run as. This can be a numerical id or
# a symbolic value.
Gid cvsd
# CvsCommand <path>
# If you define this it should point to the cvs
# command to execute. Otherwise "/bin/cvs" will
# be used if a RootJail is configured and the
# cvs command found at compiletime otherwise.
# The path should be relative to the specified
# RootJail and should start with a '/'.
# CvsArgs <arg>...
# Additional arguments to pass to the cvs command.
# For example, to enable read-only access to the
# repository, pass the -R option.
# Nice <num>
# This specifies the nice value (on most systems
# ranging from -20 to 20) where the smaller the number
# (more negative) the higher the priority.
Nice 1
# Umask <mask>
# This specifies a umask used by the cvs pserver when
# creating files. Specify as an octal value.
Umask 027
# Limit <resource> <value>
# <resource> can be one of: coredumpsize, cputime, datasize, filesize,
# memorylocked, openfiles, maxproc, memoryuse, stacksize or virtmem.
# <value> is the maximum value for the given resource. For size values
# a suffix of 'b', 'k' or 'm' can be specified ('k' is default). Time
# values can be formatted as 'mm:ss' or have 'm' or 's' suffixes
# ('s' is default).
Limit coredumpsize 0
# PidFile <file>
# This specifies the location the process id of the
# daemon is written.
PidFile /var/run/cvsd.pid
# Listen <address> <port>
# The addresses and ports to listen on for connections.
#Listen * 2401
# MaxConnections <num>
# The maximum number of connections that will
# be handled simultaneously. 0 is unlimited.
MaxConnections 10
# Log <scheme/file> [<loglevel>]
# The way logging is done. Either none, syslog or a
# filename may be specified, followed by an optional
# loglevel. Loglevel may be one of:
# crit, error, warning, notice, info (default) or debug.
# This option can be supplied multiple times.
# If this option is not specified syslog info is assumed.
Log syslog info
#Log /var/log/cvsd.log debug
# Repos <path>
# This option specifies which repositories
# can be used. The value is passed as a
# --allow-root=<path> parameter to cvs.
# The path should be relative to the specified
# RootJail and should start with a '/'.
# This option can be supplied multiple times.
Repos /test
Repos /coolsoftware
EOM
else
# use the existing configfile
cat "$configfile" > "$tmpconfigfile"
fi
# Location of Chroot jail
db_get cvsd/rootjail
[ "$RET" = "" ] || [ "$RET" = "/" ] && RET="none"
thingamagiggy "RootJail" "$RET"
if [ "$RET" = "none" ]; then
(getent passwd cvsd | grep -q "^cvsd:.*:/:") || usermod -d "/" cvsd
else
(getent passwd cvsd | grep -q "^cvsd:.*:$RET:") || usermod -d "$RET" cvsd
fi
rootjail="$RET"
# Maximum number of connections
db_get cvsd/maxconnections
[ -n "$RET" ] || RET=0
thingamagiggy "MaxConnections" "$RET"
# Nice value to run at
db_get cvsd/nice
[ -n "$RET" ] || RET=1
thingamagiggy "Nice" "$RET"
# Umask to use
db_get cvsd/umask
[ -n "$RET" ] || RET="027"
thingamagiggy "Umask" "$RET"
# Address-Port combinations to listen on
sed 's/^\([[:space:]]*\)Listen/\1x-Listen/' < "$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
# check current listen settings
db_get cvsd/listen
if [ -n "$RET" ]
then
echo "$RET" | tr ' ' '\n' | while read addr
do
read port
# if current config already contains this addr/port combination
# remove the x-, otherwise add a new option
addr_re=`echo "$addr" | sed "$RE_ESCAPE"`
port_re=`echo "$port" | sed "$RE_ESCAPE"`
if grep \
'^[[:space:]]*x-Listen[[:space:]][[:space:]]*'"$addr_re"'[[:space:]][[:space:]]*'"$port_re"'[[:space:]]*$' \
< "$tmpconfigfile" > /dev/null 2>&1
then
sed \
's/^\([[:space:]]*\)x-\(Listen[[:space:]][[:space:]]*'"$addr_re"'[[:space:]][[:space:]]*'"$port_re"'[[:space:]]*\)$/\1\2/' \
"$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
else
echo "Listen $addr $port" >> "$tmpconfigfile"
fi
done
fi
# remove remaining x-Listen options
grep -v '^[[:space:]]*x-Listen' < "$tmpconfigfile" > "$tmpfile" || true
cat "$tmpfile" > "$tmpconfigfile"
# Repositories to have
sed 's/^\([[:space:]]*\)Repos/\1x-Repos/' < "$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
# check current repository settings
db_get cvsd/repositories
# remove extra repositories
OLDIFS="$IFS"
IFS=':'
for repos in $RET
do
IFS="$OLDIFS"
repos_re=`echo "$repos" | sed "$RE_ESCAPE"`
if grep \
'^[[:space:]]*x-Repos[[:space:]][[:space:]]*'"$repos_re"'[[:space:]]*$' \
< "$tmpconfigfile" > /dev/null 2>&1
then
sed \
's/^\([[:space:]]*\)x-\(Repos[[:space:]][[:space:]]*'"$repos_re"'[[:space:]]*\)$/\1\2/' \
"$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
else
echo "Repos $repos" >> "$tmpconfigfile"
fi
done
# remove remaining x-Repos options
grep -v '^[[:space:]]*x-Repos' < "$tmpconfigfile" > "$tmpfile" || true
cat "$tmpfile" > "$tmpconfigfile"
# Limits
sed 's/^\([[:space:]]*\)Limit/\1x-Limit/' < "$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
# check current limits
db_get cvsd/limits
for limit in `echo "$RET" | sed 's/,//g'`
do
db_get "cvsd/limit_$limit"
if [ "x$RET" != "x" ]
then
value="$RET"
value_rp=`echo "$value" | sed "$RP_ESCAPE"`
if grep \
'^[[:space:]]*x-Limit[[:space:]][[:space:]]*'"$limit"'[[:space:]]' \
< "$tmpconfigfile" > /dev/null 2>&1
then
sed \
's/^\([[:space:]]*\)x-\(Limit[[:space:]][[:space:]]*'"$limit"'[[:space:]]*\)[^[:space:]]*\([[:space:]]*\)$/\1\2'"$value_rp"'\3/' \
"$tmpconfigfile" > "$tmpfile"
cat "$tmpfile" > "$tmpconfigfile"
else
echo "Limit $limit $value" >> "$tmpconfigfile"
fi
fi
done
# remove remaining x-Limit options
grep -v '^[[:space:]]*x-Limit' < "$tmpconfigfile" > "$tmpfile" || true
cat "$tmpfile" > "$tmpconfigfile"
# acutually save (and make a backup) of the configfile
# only save if there are any differences
if ! diff "$tmpconfigfile" "$configfile" > /dev/null 2>&1
then
if [ -e "$configfile" ]
then
# make backup
[ -e "$configfile.old" ] || ( touch "$configfile.old"; chmod 644 "$configfile.old")
cat "$configfile" > "$configfile.old"
else
# create configfile
touch "$configfile"
chmod 644 "$configfile"
fi
cat "$tmpconfigfile" > "$configfile"
fi
rm "$tmpconfigfile"
rm "$tmpfile"
# rebuild the rootjail
if [ "$rootjail" != "none" ]
then
cvsd-buildroot "$rootjail"
fi
# Automatically added by dh_installinit
if [ -x "/etc/init.d/cvsd" ]; then
update-rc.d cvsd defaults >/dev/null
invoke-rc.d cvsd start || exit $?
fi
# End automatically added section
db_stop
exit 0
| true
|
897537ab5ab29233dc0eb0945164551875f132c3
|
Shell
|
jakejarvis/dotfiles
|
/npm/install_globals.sh
|
UTF-8
| 1,068
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# fetch and install Volta (better nvm)
if ! command -v volta &>/dev/null; then
# curl https://get.volta.sh | bash -s -- --skip-setup
brew install volta
fi
volta install node@latest # remove when LTS officially supports arm64
volta install npm@8
volta install yarn@1
volta fetch node@lts # not native on M1 but good to have
volta list node
volta list npm
volta list yarn
# npm ls --global --parseable --depth=0 | awk '{gsub(/\/.*\//,"",$1); print}' | sort -u
packages=(
@babel/cli
@babel/core
@lhci/cli
@vercel/ncc
autoprefixer
cross-env
depcheck
dtslint
dts-gen
esbuild
eslint
gzip-size-cli
json-server
markdownlint-cli2
netlify-cli
nodemon
np
npm-check-updates
npm-name-cli
pm2
postcss
postcss-cli
prettier
release-it
rollup
stylelint
superstatic
svgo
terser
typescript
typesync
vercel
webpack
webpack-cli
)
for p in "${packages[@]}"; do
volta run --no-yarn -- npm install --global "$p" || echo "$p not found"
done
unset p packages
volta list all
| true
|
e75d360ea0117f0fb9b6a701a1ecec0c47ee8d04
|
Shell
|
preym17/csit
|
/resources/tools/disk-image-builder/centos/build.sh
|
UTF-8
| 7,943
| 3.6875
| 4
|
[
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] |
permissive
|
#!/bin/bash -e
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Purpose of this script is to build a VirtualBox or QCOW2 disk image
# for use with CSIT testing.
#
# As input parameters, the script takes:
#
# - A base Linux distribution (currently, only "ubuntu-14.04.4" is
# supported),
# - A release timestamp in the format "YYYY-MM-DD" eg. "2016-05-21".
# This timestamp MUST reference to a list of packages (APT, PIP)
# that was previously generated by the "listmaker" VM and script.
# - A path to the nested VM image.
#
# The bullk of the work is done by packer,
# while this script does some of the pre- and post-processing.
# Steps executed are:
#
# 1.) Determine if building a QCOW or VirtualBox image. Currently,
# we build a QCOW image if VIRL_* environment variables are
# present (and we therefore assume we are building for VIRL),
# or VirtualBox otherwise.
#
# 2.) Download packer, if not already installed.
#
# 3.) Download APT and PIP packages as required for the image.
# We're downloading them here, rather than letting the VM
# download them off the public internet, for two reasons:
# a.) This allows us to keep and cache packets between runs
# and download them only once,
# b.) This means only the build host needs a working proxy
# configuration and we do not need to worry about setting
# a proxy inside the VM.
#
# 4.) Link the nested VM image into the main VM's temp directory
#
# 5.) Create Version and Changelog files
#
# 6.) Run Packer. Packer, in turn, will:
# 6.1.) Download the Operating System ISO image,
# 6.2.) Start a VM using the selected hypervisor (VirtualBox or Qemu),
# 6.3.) Boot the VM using the ISO image and send initial keystrokes
# to initiate installation using a Preseed or Kickstart file,
# 6.4.) Drive the installation using until the point the VM is reachable
# over SSH,
# 6.5.) Copy the temporary directory populated in steps 3-5 above to the VM,
# 6.6.) Run a script on the VM that performs post-installation:
# 6.6.1.) Install selected .deb packages
# 6.6.2.) Install PIP packages as per requirements.txt file
# 6.6.3.) Install nested VM image and VERSION/CHANGELOG files
# 6.7.) Run a script on VM that creates a Vagrant user (VirtualBox only)
# 6.8.) Run a script on VM that performs clean-up:
# 6.8.1.) Remove any temporary files created,
# 6.8.2.) Remove SSH host-keys
# 6.8.3.) Remove root user password
# 6.8.4.) Shut down the VM
# 6.9.) [TODO]: Upload the image to VIRL (QCOW only), -or-
# Convert the image into a Vagrant box (VirtualBox only), and
# [TODO/FIX]: Upload the Vagrant box to Atlas (VirtualBox only)
#
# 7.) Clean up
###
### 0. Set constants and verify parameters.
###
cd $(dirname $0)
BUILD_DIR="$(pwd)/build"
PACKER_DIR="${BUILD_DIR}/packer"
RPM_CACHE_DIR="${BUILD_DIR}/cache/rpm"
PIP_CACHE_DIR="${BUILD_DIR}/cache/pip"
PACKER_TEMPLATE="$1.json"
LISTS_DIR="$(dirname $0)/lists"
function syntax {
echo 'Syntax: $0 <Operating System> <Release> <Nested VM image>'
echo
echo '<Operating System>: Base distro, eg. ubuntu-14.04.4'
echo '<Release>: Release timestamp, eg. 2016-05-21'
echo '<Nested VM image>: Path to nested VM image'
exit 1
}
## Parse command line options
OS=$1
RELDATE=$2
NESTED_IMG=$3
if [ "$3" = "" ]
then
syntax
fi
## Identify version by looking at topmost version statement in CHANGELOG
VERSION=$(cat $(dirname $0)/CHANGELOG | grep '^## ' | head -1 | \
sed -e 's/.*\[\(.*\)\].*/\1/')
if [ "${VERSION}" = "" ]
then
echo "Unable to determine build version from CHANGELOG file. Make sure"
echo "that there is an entry for the most recent version in CHANGELOG,"
echo "and that the entry is formated like"
echo
echo "## [1.0] - 2016-05-20"
exit 1
fi
RELEASE="csit-${OS}_${RELDATE}_${VERSION}"
OUTPUT_DIR="${BUILD_DIR}/output/${RELEASE}"
LIST="${LISTS_DIR}/${OS}_${RELDATE}_${VERSION}"
if [ ! -d "${LIST}" ]
then
echo "${LIST} not found"
syntax
exit 1
fi
if [ ! -f $NESTED_IMG ]
then
echo "Nested image $NESTED_IMG not found"
syntax
exit 1
fi
ATLAS_RELDATE=${RELDATE//-}
ATLAS_VERSION="${ATLAS_RELDATE}.${VERSION}"
# Find an MD5 checksum utility
MD5UTIL=$(which md5sum) || MD5UTIL=$(which md5)
if [ $? -ne 0 ]
then
echo "No MD5 utility found."
echo "Please make sure you either have \"md5sum\" or \"md5\" installed."
exit 1
fi
###
### 1. Determine build target.
###
if [ "$VIRL_USER" = "" ] || [ "$VIRL_PASSWORD" = "" ]
then
OUTPUT_PROVIDER="virtualbox"
else
OUTPUT_PROVIDER="qemu"
fi
echo "Building version $VERSION for ${OUTPUT_PROVIDER}"
echo "Release ${RELEASE}"
echo "Using Nested VM image: ${NESTED_IMG}"
echo
###
### 2. Download and extract packer, if not already installed
###
packer_os=$(uname -s)
if [ "$packer_os" = "Darwin" ]
then
packer_url="https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_darwin_amd64.zip"
elif [ "$packer_os" = "Linux" ]
then
packer_url="https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_linux_amd64.zip"
fi
mkdir -p $BUILD_DIR
wget -P ${PACKER_DIR} -N ${packer_url}
unzip -n ${PACKER_DIR}/packer*zip -d ${PACKER_DIR}
###
### 3. Download RPM and PIP packages, and cache them
### Verify downloaded RPM packages.
### Link required packages into a temp directory for the VM image.
###
rm -fr ${OUTPUT_DIR}
mkdir -p ${OUTPUT_DIR}/temp/rpm
mkdir -p ${RPM_CACHE_DIR}
RPM_FILE="${LIST}/rpm-packages.txt"
REPO_FILE="${LIST}/Centos-Vault.repo"
###
### Copy rpm package list to cache dir because we are going to use yum on the image
###
echo cp $RPM_FILE ${RPM_CACHE_DIR}
cp $RPM_FILE ${RPM_CACHE_DIR}
if [ -e $REPO_FILE ] ; then
echo cp $REPO_FILE ${RPM_CACHE_DIR}
cp $REPO_FILE ${RPM_CACHE_DIR}
ln ${RPM_CACHE_DIR}/Centos-Vault.repo ${OUTPUT_DIR}/temp/rpm/Centos-Vault.repo
fi
ln ${RPM_CACHE_DIR}/rpm-packages.txt ${OUTPUT_DIR}/temp/rpm/rpm-packages.txt
## PIP
mkdir -p ${PIP_CACHE_DIR}
# Let PIP do the work of downloading and verifying packages
pip download -d ${PIP_CACHE_DIR} -r ${LIST}/pip-requirements.txt
# Link packages and requirements file into VM's temp directory
mkdir -p ${OUTPUT_DIR}/temp/pip
ln ${PIP_CACHE_DIR}/* ${OUTPUT_DIR}/temp/pip/
ln ${LIST}/pip-requirements.txt ${OUTPUT_DIR}/temp/requirements.txt
###
### 4. Link the nested VM image
###
rm -fr ${OUTPUT_DIR}/temp/nested-vm
mkdir ${OUTPUT_DIR}/temp/nested-vm
ln $NESTED_IMG ${OUTPUT_DIR}/temp/nested-vm/
###
### 5. Create Version and Changelog files
###
echo ${RELEASE} > ${OUTPUT_DIR}/temp/VERSION
ln CHANGELOG ${OUTPUT_DIR}/temp/CHANGELOG
###
### 6. Run packer
###
export PACKER_LOG=1
export PACKER_LOG_PATH=${OUTPUT_DIR}/packer.log
${PACKER_DIR}/packer build -var "release=${RELEASE}" \
-only ${RELEASE}-${OUTPUT_PROVIDER} \
-var "output_dir=${OUTPUT_DIR}/packer" \
-var "temp_dir=${OUTPUT_DIR}/temp" \
-var "atlas_version=${ATLAS_VERSION}" \
-force \
-machine-readable ${PACKER_TEMPLATE}
# TODO: Need to figure out "packer push" later. Currently keeps failing.
# ${PACKER_DIR}/packer push -name ckoester/test123 -var "release=${RELEASE}" \
# -var "output_dir=${OUTPUT_DIR}/packer" \
# -var "temp_dir=${OUTPUT_DIR}/temp" \
# ${PACKER_TEMPLATE}
###
### 7. Clean up
###
rm -fr ${OUTPUT_DIR}/temp
echo "Done."
echo "Artifacts:"
echo
ls -lR ${OUTPUT_DIR}
| true
|
2bd8163f5a1fcd759e6c2f1fb253eede47193ac9
|
Shell
|
Weltraumschaf/vagrant
|
/_scripts/install-minikube.sh
|
UTF-8
| 1,772
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
# @see: http://wiki.bash-hackers.org/syntax/shellvars
[ -z "${SCRIPT_DIRECTORY:-}" ] \
&& SCRIPT_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
SCRIPT_DIR=$(dirname "${SCRIPT_DIRECTORY}")
PROJECT_DIR="/vagrant"
SCRIPT_DIR="${PROJECT_DIR}/_scripts"
# shellcheck source=lib.sh
source "${SCRIPT_DIR}/lib.sh"
log "Executing '${0}' in ${SCRIPT_DIRECTORY} ..."
log "Installing Minikube ..."
MINIKUBE_DEB_FILE="minikube_latest_amd64.deb"
MINIKUBE_DEB_PATH="${HOME}/${MINIKUBE_DEB_FILE}"
cleanup() {
rm -rfv "${MINIKUBE_DEB_PATH}"
}
# Cleanup stuff on normal exit and interuption.
trap cleanup EXIT
trap cleanup INT
# Install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-using-native-package-management)
add_kubectl_apt_source() {
add_apt_key "https://packages.cloud.google.com/apt/doc/apt-key.gpg"
add_apt_source "deb https://apt.kubernetes.io/ kubernetes-xenial main" "kubernetes"
}
# https://helm.sh/docs/intro/install/
add_helm_apt_source() {
add_apt_key "https://baltocdn.com/helm/signing.asc"
add_apt_source "deb https://baltocdn.com/helm/stable/debian/ all main" "helm"
}
add_apt_key() {
local url="${1}"
curl -fsSL "${url}" | apt-key add -
}
add_apt_source() {
local src="${1}"
local destination="${2}"
echo "${src}" > "/etc/apt/sources.list.d/${destination}.list"
}
# Install minikube (https://minikube.sigs.k8s.io/docs/start/)
donwload_and_install_minikube() {
curl -sSLo "${MINIKUBE_DEB_PATH}" "https://storage.googleapis.com/minikube/releases/latest/${MINIKUBE_DEB_FILE}"
dpkg -i "${MINIKUBE_DEB_PATH}"
}
add_kubectl_apt_source
add_helm_apt_source
apt-get update
apt-get install -y \
kubectl \
helm
donwload_and_install_minikube
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.