blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
fbea7e2a21f521b9cfc5a97da709f63a3c33ae09 | Shell | rldofilhog/ShellScript | /lista31/lista3/questao5/questao5.sh | UTF-8 | 539 | 3.640625 | 4 | [] | no_license | #! /bin/bash
while true; do
echo "Digite uma das opções abaixo!"
echo
echo "Listar apenas os diretórios (-a)"
echo "Listar apenas os arquivos executáveis (-b)"
echo "Listar apenas os links simbólicos (-c)"
echo "Listar apenas os scripts (-d)"
echo "Listar apenas os arquivos com tamanho menor que 200 Bytes (-e)"
echo
read -p "Digite uma das opções: " opc
case $opc in
"-a") ls -ld ;;
"-b") find -executable ;;
"-c") find . -type l -ls ;;
"-d") find | grep .sh ;;
"-e") find . -type f -size -200c ;;
esac
done
| true |
1fbe343723f763acf99b8e56e9c92230d6736f62 | Shell | jcap/docker-elk | /logstash/gantry | UTF-8 | 1,121 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
LOGSTASH_IMAGE="jcap/logstash"
LOGSTASH_CONTAINER="logstash"
LOGSTASH_DATA="logstash-data"
BASEIMAGE="alpine:latest"
case $1 in
build-image)
docker build --no-cache --pull -t ${LOGSTASH_IMAGE} .
;;
create-data)
docker create --name ${LOGSTASH_DATA} -v /usr/share/elasticsearch/data -u elasticsearch ${LOGSTASH_IMAGE} /bin/true
;;
start-logstash)
docker run --name ${LOGSTASH_CONTAINER} -d \
--link="elasticsearch:elasticsearch" \
--restart=always \
${LOGSTASH_IMAGE}
;;
stop-logstash)
docker stop ${LOGSTASH_CONTAINER}
;;
shell)
docker run --rm --name ${LOGSTASH_CONTAINER}-shell --link="elasticsearch:elasticsearch" -i -u root -t ${LOGSTASH_IMAGE} /bin/bash
;;
backup)
docker run --volumes-from ${LOGSTASH_DATA} --rm ${BASEIMAGE} tar -cvf - -C /usr/share/elasticsearch data | xz > $2
;;
restore)
xzcat $2 | docker run --name ${LOGSTASH_DATA} -v /usr/share/elasticsearch/data -i ${BASEIMAGE} tar -xvf - -C /usr/share/elasticsearch
;;
*)
echo "./gantry {build-image, create-data, start-logstash, stop-logstash, shell, backup, restore}"
;;
esac
| true |
bee091bc198c53f968046bbf3b554dceca6662ed | Shell | Gerhut/DLWorkspace | /src/ClusterBootstrap/scripts/pscp_role.sh | UTF-8 | 918 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
role=$1
file=$2
remote=$3
SCRIPT=`realpath $0`
SCRIPTPATH=`dirname $SCRIPT`
mkdir -p $SCRIPTPATH/../ContinuousIntegration
CLUSTER_NAME=$(grep cluster_name $SCRIPTPATH/../cluster.yaml | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
if [ ! -e $SCRIPTPATH/../ContinuousIntegration/${role}_list ];then
grep -B3 "role: ${role}" $SCRIPTPATH/../cluster.yaml | grep ${CLUSTER_NAME}- | sed 's/://g' | awk '{print $1}' | grep -v "#" | awk '{printf("%s.%s\n", $1, DOMAIN_SUFFIX)}' DOMAIN_SUFFIX=$(grep domain $SCRIPTPATH/../cluster.yaml | awk '{print $2}') | sort > $SCRIPTPATH/../ContinuousIntegration/${role}_list
fi;
NUM_role=$(cat $SCRIPTPATH/../ContinuousIntegration/${role}_list | wc -l)
parallel-scp -t 0 -p ${NUM_role} -h $SCRIPTPATH/../ContinuousIntegration/${role}_list -x "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -i $SCRIPTPATH/../deploy/sshkey/id_rsa" -l core $file $remote | true |
ff084346a6bffa9cba1cce0fd4fac6940795d64a | Shell | igorarouca/liferay-hackathon | /gradle/liferay-workspace-ee/ospackage/templates/pkg_scripts/install-liferay-bundle.sh | UTF-8 | 8,059 | 4 | 4 | [] | no_license | #!/bin/sh
set -e
## WARNING: make sure Liferay bundle is stopped _BEFORE_ running this script
##
## This script is intended to be invoked by Jenkins server, over SSH as user 'root' (or using sudo).
## It will take Liferay bundle archive as produced by Liferay Workspace's Gradle build (see LIFERAY_BUNDLE)
## and install it into known Liferay portal home (see LIFERAY_HOME).
##
## 'LIFERAY_HOME_DATA_DIR_NAME' and 'portal-setup-wizard.properties' are backed up and restored after
## installation of the new build, to preserve their content.
##
@UTILS_FILE_CONTENT@
##
## Constants
##
# The location of the Liferay bundle archive to install.
LIFERAY_BUNDLE_ARCHIVE='@LIFERAY_BUNDLE_ARCHIVE@'
# The name of OS user to own all installed bundle's files.
LIFERAY_USER='@LIFERAY_USER@'
# The name of OS group to own all installed bundle's files.
LIFERAY_GROUP='@LIFERAY_GROUP@'
# The Liferay bundle home, new bundle will be extracted there.
LIFERAY_HOME='@LIFERAY_HOME@'
# The name of data directory inside LIFERAY_HOME, typically 'data'.
# It will _not_ be deleted when removing the bundle files.
LIFERAY_HOME_DATA_DIR_NAME='@LIFERAY_HOME_DATA_DIR_NAME@'
# The name of the directory inside Liferay bundle home with the app server.
# For Tomcat server, this will most likely be like 'tomcat-8.0.32'. This is used
# to locate the validation file and set up app server, if necessary.
LIFERAY_HOME_APP_SERVER_DIR_NAME='@LIFERAY_HOME_APP_SERVER_DIR_NAME@'
# The file inside the app server (relative to the LIFERAY_HOME_APP_SERVER_DIR_NAME)
# which can be used to verify the bundle was installed correctly. Typically some script,
# like 'bin/catalina.sh' for Tomcat.
APP_SERVER_VALIDATION_FILE_PATH='@APP_SERVER_VALIDATION_FILE_PATH@'
# Make sure our custom 'die' command can be used safely
if ! command -v die >/dev/null 2>&1; then
echo "==> ERROR: util command 'die' not found, was utils.sh included by Gradle?" && exit 127
fi
for const in \
"$LIFERAY_BUNDLE_ARCHIVE" \
"$LIFERAY_USER" \
"$LIFERAY_GROUP" \
"$LIFERAY_HOME" \
"$LIFERAY_HOME_DATA_DIR_NAME" \
"$LIFERAY_HOME_APP_SERVER_DIR_NAME" \
"$APP_SERVER_VALIDATION_FILE_PATH"; do
case "$const" in
@*@) die "One of the constants was not replaced by Gradle (starts and ends with '@').";;
'') die "One of the constants has an empty value";;
esac
done
##
## Computed Constants
##
# The full path of the app server (like Tomcat) inside the bundle.
APP_SERVER_HOME="${LIFERAY_HOME}/${LIFERAY_HOME_APP_SERVER_DIR_NAME}"
# The working directory when the script was started. The zip file may be relative to this path,
# so we need to remember this path and use ('cd' into) it before unzipping the bundle
SCRIPT_PWD=`pwd`
# The timestamp
TIMESTAMP=`date +%s`
# The directory where important files from previous build will be backed before
# installing new build and up and restored.
TMP_DATA_BACKUP_DIR="/tmp/liferay.data.backup.$TIMESTAMP"
install_liferay_bundle() {
echo "================================"
echo " Installation of Liferay bundle "
echo "================================"
enforce_root_user
echo "==> Installing Liferay bundle '$LIFERAY_BUNDLE_ARCHIVE' into '$LIFERAY_HOME'..."
backup_previous_data_and_remove_bundle
extract_new_bundle
restore_previous_data
set_bundle_files_owner_and_permissions
verify_bundle_installed
echo "==> New Liferay bundle '$LIFERAY_BUNDLE_ARCHIVE' was installed into '$LIFERAY_HOME' successfully"
}
enforce_root_user() {
current_user=$(whoami)
if [ "$current_user" != "root" ]; then
die "==> ERROR: Only 'root' (or sudo) is allowed to run this script, sorry. You are '$current_user'."
fi
}
backup_previous_data_and_remove_bundle() {
if [ ! -d "$LIFERAY_HOME" ]; then
echo "==> Nothing to backup, Liferay bundle's directory '$LIFERAY_HOME' does not exist"
return
fi
echo "==> Backing up data of previous bundle ('$LIFERAY_HOME')"
# create clean directory for backup
rm -rf $TMP_DATA_BACKUP_DIR
mkdir -p $TMP_DATA_BACKUP_DIR
# Based on http://unix.stackexchange.com/a/153863
# remove all files / directories except the ones in the list
cd $LIFERAY_HOME
find . -maxdepth 1 ! -path . \
! \( -name "$LIFERAY_HOME_DATA_DIR_NAME" -o -name 'portal-setup-wizard.properties' \) -exec rm -rf {} +
# Backup data-related files and directories which were excluded from delete above
# (1) [bundle]/$LIFERAY_HOME_DATA_DIR_NAME/* (typically, LIFERAY_HOME_DATA_DIR_NAME == 'data')
# (2) [bundle]/portal-setup-wizard.properties
#
# it's important to have '/' at the end of source nd target directory
# based on: https://stackoverflow.com/questions/20300971/rsync-copy-directory-contents-but-not-directory-itself
rsync -a $LIFERAY_HOME/ $TMP_DATA_BACKUP_DIR/
echo "==> Finished backing up data from previous bundle"
echo "==> Contents of '$TMP_DATA_BACKUP_DIR':"
ls -l $TMP_DATA_BACKUP_DIR
du -h -d 1 $TMP_DATA_BACKUP_DIR
# remove old bundle
rm -rf $LIFERAY_HOME
echo "==> Previous bundle removed from '$LIFERAY_HOME'"
}
extract_new_bundle () {
echo "==> Extract new bundle into '$LIFERAY_HOME'"
mkdir -p $LIFERAY_HOME
cd $SCRIPT_PWD
# Unzip / untar Liferay bundle into target Liferay bundle home
case "$LIFERAY_BUNDLE_ARCHIVE" in
*.zip)
# -qq ~ very quiet = do not output anything
# -o ~ overwrite files = since we add overrides (from configs/ on top of e.g. tomcat-8.0.32),
# its very likely there will be duplicate entries in the archive;
# we want to overwrite without prompting
# -d ~ extract into given directory
unzip -qq -o $LIFERAY_BUNDLE_ARCHIVE -d $LIFERAY_HOME
;;
*.tar.gz)
# x ~ extract
# z ~ use GZip (un)compression
# f ~ extract from file
# --directory ~ extract into given directory
# .tar (unlike .zip above) automatically overwrites during extracting and
# does not prompt for confirmation of overwriting duplicate entries present in the archive
tar xzf $LIFERAY_BUNDLE_ARCHIVE --directory $LIFERAY_HOME
;;
*)
die "==> ERROR: Unknown format of Liferay bundle file ($LIFERAY_BUNDLE_ARCHIVE). Only .zip or .tar.gz archives are supported"
esac
# For some odd reason, Tomcat cannot create [tomcat]/logs/ directory (if missing)
# and fails to start. So create it manually.
mkdir $APP_SERVER_HOME/logs
echo "==> New bundle extracted into '$LIFERAY_HOME':"
ls -lah $LIFERAY_HOME
}
restore_previous_data () {
if [ ! -d "$TMP_DATA_BACKUP_DIR" ]; then
echo "==> Nothing to restore, backup directory '$TMP_DATA_BACKUP_DIR' does not exist"
return
fi
echo "==> Restoring previous bundle's data"
echo "==> Contents of '$TMP_DATA_BACKUP_DIR':"
ls -l $TMP_DATA_BACKUP_DIR
du -h -d 1 $TMP_DATA_BACKUP_DIR
# it's important to have '/' at the end of source and target directory
# based on: https://stackoverflow.com/questions/20300971/rsync-copy-directory-contents-but-not-directory-itself
rsync -a $TMP_DATA_BACKUP_DIR/ $LIFERAY_HOME/
rm -rf $TMP_DATA_BACKUP_DIR
echo "==> Restored previous bundle's data from '$TMP_DATA_BACKUP_DIR' into '$LIFERAY_HOME'"
echo "==> New contents of '$LIFERAY_HOME':"
ls -lah $LIFERAY_HOME
}
set_bundle_files_owner_and_permissions () {
chown --recursive $LIFERAY_USER:$LIFERAY_GROUP $LIFERAY_HOME
# The bundle files might contain passwords to DB or remote systems, which
# should be visible only to user 'liferay'.
# the properties in Liferay portal home may contain credentials to other systems
chmod 600 $LIFERAY_HOME/*.properties
echo "==> New bundle's file permissions / ownership set in '$LIFERAY_HOME':"
ls -lah $LIFERAY_HOME
}
verify_bundle_installed () {
app_server_validation_file="$APP_SERVER_HOME/$APP_SERVER_VALIDATION_FILE_PATH"
if [ -f "$app_server_validation_file" ]; then
echo "==> Liferay bundle was successfully installed, expected app server file '$app_server_validation_file' exists"
else
die "==> ERROR: Liferay bundle was not installed, expected app server file '$app_server_validation_file' does not exist"
fi
}
install_liferay_bundle | true |
2bc3d340cb1f0b2d8d90141819b8c6d0d7770737 | Shell | xircon/Scripts-dots | /wake-up.sh | UTF-8 | 698 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Ensure script is running as root. rtcwake needs sudo access
if [[ $EUID -ne 0 ]]; then
echo "This script cannot function if it is not run as root"
exit 1
fi
today=`date +%A`
if [[ " Monday Tuesday Wednesday Thursday Friday " =~ " ${today} " ]]; then
rtcwake -m no -l -t "$(date -d 'tomorrow 07:30:00' '+%s')"
fi
# Account for weekends
if [[ " Saturday Sunday " =~ " ${today} " ]]; then
rtcwake -m no -l -t "$(date -d 'Monday 07:30:00' '+%s')"
fi
# Example provided for Banshee. This will provide you a GUI to stop the music. Edit for the CLI to the player you use
# If you don't care about a GUI, play would work fine.
banshee --play /path/to/file_or_playlist
| true |
34c765a07e346f5af1ed44dd370b5740b9bcd43a | Shell | tomchaplin/glpr | /glpr | UTF-8 | 3,746 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
PRINTER_LIST=$(lpstat -p | grep enabled | awk '{print $2}')
DEFAULT_PRINTER=$(lpstat -d | awk '{print $4}')
WORKING_PRINTER="$DEFAULT_PRINTER"
DIALOG_TITLE="glpr"
COPIES=1
PAGE_RANGE=""
PAGES_PER_SHEET=1
declare -A PRINTER_OPTIONS
get_printer_defaults() {
PRINTER_OPTIONS[PageSize]=$(lpoptions -p $WORKING_PRINTER -l | grep 'PageSize.*:' | grep -Po '\*.*?\s' | cut -c 2-)
PRINTER_OPTIONS[OutputMode]=$(lpoptions -p $WORKING_PRINTER -l | grep 'OutputMode.*:' | grep -Po '\*.*?\s' | cut -c 2-)
PRINTER_OPTIONS[Duplex]=$(lpoptions -p $WORKING_PRINTER -l | grep 'Duplex.*:' | grep -Po '\*.*?\s' | cut -c 2-)
}
display_main_dialog() {
choice=$(dialog --title "$DIALOG_TITLE" \
--ok-label "Change/Accept" \
--menu "Print Options" 20 70 8 \
"Printer" "$WORKING_PRINTER" \
"Page Size" "${PRINTER_OPTIONS[PageSize]}" \
"Quality" "${PRINTER_OPTIONS[OutputMode]}" \
"Duplex" "${PRINTER_OPTIONS[Duplex]}" \
"Page Range" "$PAGE_RANGE" \
"Copies" "$COPIES" \
"Pages per sheet" "$PAGES_PER_SHEET" \
"Accept" "" 2>&1 >/dev/tty)
case $choice in
Printer)
display_printer_dialog
;;
"Page Size")
display_options_dialog "PageSize" "Page Size:"
;;
Quality)
display_options_dialog "OutputMode" "Print Quality:"
;;
Duplex)
display_options_dialog "Duplex" "Duplex Options:"
;;
Copies)
display_input_dialog "Copies:"
;;
"Page Range")
display_input_dialog "Page Range:"
;;
"Pages per sheet")
pages_per_sheet_dialog
;;
Accept)
build_print_command
;;
*)
;;
esac
}
display_printer_dialog() {
dialog_cmd=(dialog --title "$DIALOG_TITLE" --ok-label "Select" --nocancel --menu "Enabled Printers:" 20 70 8 )
options=()
while IFS= read -r printer
do
options+=("$printer" "")
done <<< "$PRINTER_LIST"
WORKING_PRINTER=$("${dialog_cmd[@]}" "${options[@]}" 2>&1 >/dev/tty)
get_printer_defaults
display_main_dialog
}
display_options_dialog() {
dialog_cmd=(dialog --title "$DIALOG_TITLE" --ok-label "Select" --nocancel --menu "$2" 20 70 12 )
possible_sizes=$(lpoptions -p $WORKING_PRINTER -l | grep "$1.*:" | cut -d':' -f2- | cut -c2- | tr " " "\n" )
options=()
while IFS= read -r option
do
# The default option has an asterix at the start, we need to remove this
if [[ $option == \** ]]; then
option="${option:1}"
fi
options+=("$option" "")
done <<< "$possible_sizes"
PRINTER_OPTIONS["$1"]=$("${dialog_cmd[@]}" "${options[@]}" 2>&1 >/dev/tty)
display_main_dialog
}
display_input_dialog() {
if [[ $1 == "Copies:" ]]; then
init_val=$COPIES
else
init_val=$PAGE_RANGE
fi
output=$(dialog --title "$DIALOG_TITLE" --nocancel --inputbox "$1" 20 70 "$init_val" 2>&1 >/dev/tty)
if [[ $1 == "Copies:" ]]; then
COPIES=$output
else
PAGE_RANGE=$output
fi
display_main_dialog
}
pages_per_sheet_dialog() {
PAGES_PER_SHEET=$(dialog --title ="$DIALOG_TITLE" --nocancel --ok-label "Select"\
--menu "Pages per sheet:" 20 70 6\
1 "" 2 "" 4 "" 6 "" 9 "" 16 "" 2>&1 >/dev/tty)
display_main_dialog
}
build_print_command() {
print_cmd=(lpr -P "$WORKING_PRINTER")
if [[ ! -z "$PAGE_RANGE" ]]; then
print_cmd+=(-o page-ranges="$PAGE_RANGE")
fi
print_cmd+=(-o PageSize="${PRINTER_OPTIONS[PageSize]}")
print_cmd+=(-o OutputMode="${PRINTER_OPTIONS[OutputMode]}")
print_cmd+=(-o Duplex="${PRINTER_OPTIONS[Duplex]}")
print_cmd+=(-o number-up="$PAGES_PER_SHEET")
print_cmd+=(-\#"$COPIES")
print_cmd+=("$FILENAME")
expanded_cmd="${print_cmd[@]}"
if dialog --stdout --title "$DIALOG_TITLE"\
--yesno "Running the following print command:\n\n$expanded_cmd" 20 70; then
eval "${print_cmd[@]}"
else
exit 1
fi
}
if [[ -z $1 ]]; then
echo "Please specify a filename"
exit 1
else
FILENAME=$1
fi
get_printer_defaults
display_main_dialog
| true |
4f86efe615f7aee02e73d6681ca2f1f2c757b12e | Shell | pensandoodireito/sislegis-ambiente-centos | /instalar | UTF-8 | 2,690 | 3.171875 | 3 | [] | no_license | #!/bin/bash
# Author: Paulo Jeronimo (email: paulojeronimo@gmail.com, twitter: @paulojeronimo)
set +x
BASEDIR=`cd $(dirname "$0"); echo -n $PWD`
source "$BASEDIR"/config || exit 1
source "$BASEDIR"/defaults || exit 1
BAIXA_ARQUIVO=`$BAIXA_INSTALADORES && echo -n --baixa-arquivo || echo -n --nao-baixa-arquivo`
X_INSTALADO=`type xhost &> /dev/null && echo -n true || echo -n false`
sudo sed -i '
s/^\(%wheel.*ALL\)/#\1/
s/^#.*\(%wheel.*NOPASSWD.*\)/\1/
' /etc/sudoers
if [ $USER != vagrant ]
then
sudo useradd -m -s /bin/bash -G wheel $SISLEGIS_USER &> /dev/null || {
echo "Falha ao criar usuário $SISLEGIS_USER."
echo "O ambiente já não está instalado?"
exit 1
}
$X_INSTALADO && {
cmd="xhost +si:localuser:$SISLEGIS_USER"; $cmd
f=~/.bashrc; grep -q "xhost.*$SISLEGIS_USER" $f || { echo "$cmd" >> $f; }
}
else
SISLEGIS_USER=vagrant
sudo sed -i 's/\(keepcache=\).*/\11/' /etc/yum.conf
fi
"$BASEDIR"/restaurar
sudo -i -u $SISLEGIS_USER bash <<EOF
executar_e_inserir_no_bashrc() {
local cmd="\$@"
eval \$cmd; echo \$cmd >> ~/.bashrc
}
$X_INSTALADO && echo 'export DISPLAY=:0' >> ~/.bashrc
if $USA_PROXY
then
f="$BASEDIR"/.proxy
sudo cp "\$f" ~/
sudo chown $SISLEGIS_USER: ~/.proxy
executar_e_inserir_no_bashrc "source ~/.proxy"
executar_e_inserir_no_bashrc "export INSTALA_OPCS+=\" --usa-proxy\""
fi
executar_e_inserir_no_bashrc "export INSTALA_OPCS+=\" $BAIXA_ARQUIVO\""
sudo yum -y update --exclude='kernel*'
if type git &> /dev/null
then
if ! grep -q 2.0.5 <(git --version)
then
sudo yum -y remove git
"$BASEDIR"/instalar-git; source /etc/profile.d/git.sh
fi
else
"$BASEDIR"/instalar-git; source /etc/profile.d/git.sh
fi
sudo yum -y install rsync tar unzip wget tree dos2unix lsof redhat-lsb-core vim java-1.8.0-openjdk-devel
git clone $GITHUB_SISLEGIS_DOTFILES
${GITHUB_SISLEGIS_DOTFILES##*/}/install
git config --global user.name '$GITHUB_NAME'
git config --global user.email $GITHUB_EMAIL
git clone $GITHUB_SISLEGIS_AMBIENTE
[ \$USER = vagrant ] && {
echo 'export PROJETOS_DIR=~/projetos' > ${GITHUB_SISLEGIS_AMBIENTE##*/}/ambiente.config
}
cat <<_EOF >> ${GITHUB_SISLEGIS_AMBIENTE##*/}/ambiente.config
APP_AMBIENTE=$APP_AMBIENTE
APP_HOST=$APP_HOST
APP_IP=$APP_IP
_EOF
cat <<_EOF > ${GITHUB_SISLEGIS_AMBIENTE##*/}/.projetos
app=$GITHUB_SISLEGIS_APP
app_frontend=$GITHUB_SISLEGIS_APP_FRONTEND
site=$GITHUB_SISLEGIS_SITE
_EOF
${GITHUB_SISLEGIS_AMBIENTE##*/}/instalar
source .ambiente
app_baixar
app_remote_add_upstream
app_update_and_deploy
app_createdb
app_frontend_baixar
app_frontend_remote_add_upstream
jboss_start
EOF
# vim: set ts=4 sw=4 expandtab:
| true |
7b18c20ddd439496531c755c38db72c8ebbbf52a | Shell | BinaryArtists/scripts-help-a-lot | /dev-ios/image/duplicated-pngs.sh | UTF-8 | 497 | 3.484375 | 3 | [] | no_license | #!/bin/bash
############################
##change current directory
############################
if [ `echo $0 | grep -c "/"` -gt 0 ];then
cd ${0%/*}
fi
cd ..
PROJ1=`find . -name '*.xib'`
PROJ2=`find . -name '*.[hm]'`
PROJ=`echo -e "${PROJ1}\n${PROJ2}"`
IFS="
"
TempResult=''
for png in `find . -name '*.png'`
do
name=`basename -s .png $png`
TempResult=`echo -e "${TempResult}\n${name}"`
done
echo -e "${TempResult}" | sort | uniq -c | sort -r | awk '$1 > 1 {print $2}' | more
| true |
4fd684182ec31b3d79fa9de9ca70f4f1b62c8a5e | Shell | talexie/openinfoman-dhis-at | /resources/scripts/publish_to_ilr.sh | UTF-8 | 6,388 | 4 | 4 | [] | no_license | #!/bin/bash
#configuration options in publish_to_ilr.cfg or in another file specified with the -c option
CONFIG=publish_to_ilr.cfg
########################################################################
# Dependencies:
# sudo apt-get install libxml2-utils jshon
#
#
# DO NOT EDIT BELOW HERE
#
# set some external programs
########################################################################
set -e
CURL=/usr/bin/curl
PRINTF=/usr/bin/printf
XMLLINT=/usr/bin/xmllint
GREP=/bin/grep
JSHON=/usr/bin/jshon
#########################################################################
# Actual work is below
#########################################################################
#help test
show_help() {
cat <<EOF
Usage: ${0##*/} [-vhfrd -c <FILE> ]
Publish DHIS2 metadata to the ILR
-h Display this help and exit
-r Reset the last exported time
-f Publish the full DHIS2 metadata (ignore the last exported time)
-d Debug mode
-e Empty the CSD document before publishing
-c <FILE> Specify configuration file for DHIS2 publication options. Defaults to $CONFIG
EOF
}
#reset the time
reset_time() {
source_config
echo "Resetting time on $DHIS2_URL"
$CURL -sv -o /dev/null -w "%{http_code}" -X DELETE $DHIS2_AUTH $DHIS2_URL/api/dataStore/CSD-Loader-Last-Export/$ILR_DOC | $GREP -cs '200\|404'
}
source_config() {
echo "Loading configuration options from $CONFIG"
source $CONFIG
#setup DHIS2 and ILR authorization
DHIS2_AUTH="-u $DHIS2_USER:$DHIS2_PASS"
if [ "$IGNORECERTS" = true ]; then
DHIS2_AUTH=" -k $DHIS2_AUTH"
fi
if [ "$ILR_USER" = false ]; then
ILR_AUTH=""
else
ILR_AUTH="-u $ILR_USER:$ILR_PASS"
fi
if [ "$IGNORECERTS" = true ]; then
ILR_AUTH=" -k $ILR_AUTH"
fi
}
#Read in some run time arguments
FULL=false
EMPTY=false
OPTS="edhrfc:"
OPTIND=1
while getopts "$OPTS" OPT; do
case "$OPT" in
c) CONFIG=$OPTARG
;;
f) FULL=true
;;
d) set -x
;;
e) EMPTY=true
;;
esac
done
OPTIND=1
while getopts "$OPTS" OPT; do
case "$OPT" in
h) show_help
exit 0
;;
r) reset_time
exit 0
;;
esac
done
#perform default action
source_config
#check if LastExported key is in CSD-Loader namespace for DHIS2 data store
echo "Checking CSD-Loader data stored contents"
set +e
NOTHASKEYOUT="`$CURL -sv -o /dev/null -w \"%{http_code}\" $DHIS2_AUTH -H \"Accept: application/json\" $DHIS2_URL/api/dataStore/CSD-Loader-Last-Export/$ILR_DOC | $GREP -qs \"200\|201\"`"
NOTHASKEY=$?
set -e
#create destitation document (if it doesn't exist)
echo "Creating $ILR_DOC on ILR at $ILR_URL (if it doesn't exist)"
$CURL -sv -o /dev/null -w "%{http_code}" -d "directory=$ILR_DOC" -X POST $ILR_AUTH $ILR_URL/createDirectory | $GREP -qcs '200\|302'
if [ "$EMPTY" = true ]; then
$CURL -sv -o /dev/null -w "%{http_code}" $ILR_AUTH $ILR_URL/emptyDirectory/$ILR_DOC | $GREP -qcs '200\|302'
reset_time
fi
#setup request variables to extract DXF from DHIS2
if [ "$FULL" = true ]; then
echo "Doing full publish"
LASTUPDATE=false
elif [ "$NOTHASKEY" = "1" ]; then
echo "Doing full publish"
LASTUPDATE=false
else
echo "Getting last export time from $DHIS2_URL"
LASTUPDATE=`$CURL -sv $DHIS2_AUTH -H 'Accept: application/json' $DHIS2_URL/api/dataStore/CSD-Loader-Last-Export/$ILR_DOC | $JSHON -e value`
#strip any beginning / ending quotes
LASTUPDATE="${LASTUPDATE%\"}"
LASTUPDATE="${LASTUPDATE#\"}"
LASTUPDATE="${LASTUPDATE%\'}"
LASTUPDATE="${LASTUPDATE#\'}"
echo "Last export performed succesfully at $LASTUPDATE"
#convert to yyyy-mm-dd format (dropping time as it is ignored by DHIS2)
LASTUPDATE=$(date --date="$LASTUPDATE" +%F)
fi
if [ "$DOSUERS" = true ]; then
UFLAG="true"
UVAL="1"
else
UFLAG="false"
UVAL="0"
fi
if [ "$DOSERVICES" = true ]; then
SFLAG="true"
SVAL=1
else
SFLAG="false"
SVAL="0"
fi
VAR=(
'assumeTrue=false'
'organisationUnits=true'
'organisationUnitGroups=true'
'organisationUnitLevels=true'
'organisationUnitGroupSets=true'
"categoryOptions=$SFLAG"
"optionSets=$SFLAG"
"dataElementGroupSets=$SFLAG"
"categoryOptionGroupSets=$SFLAG"
"categoryCombos=$SFLAG"
"options=$SFLAG"
"categoryOptionCombos=$SFLAG"
"dataSets=$SFLAG"
"dataElementGroups=$SFLAG"
"dataElements=$SFLAG"
"categoryOptionGroups=$SFLAG"
"categories=$SFLAG"
"users=$UFLAG"
"userGroups=$UFLAG"
"userRoles=$UFLAG"
)
VAR=$($PRINTF "&%s" "${VAR[@]}")
VAR="$VAR$UPDATES"
if [ "$LASTUPDATE" = false ]; then
echo "Publishing all data"
else
echo "Publishing changes since $LASTUPDATE"
VAR="$VAR&lastUpdated=$LASTUPDATE"
fi
#extract data from DHIS2
echo "Extracting DXF from DHIS2 at $DHIS2_URL"
DXF=`$CURL -sv $DHIS2_AUTH -H 'Accept: application/xml' "$DHIS2_URL/api/24/metadata?${VAR:1}" `
EXPORTED=`echo $DXF | $XMLLINT --xpath 'string((/*[local-name()="metaData"])[1]/@created)' -`
DXF=`echo $DXF | $XMLLINT --c14n -`
#Create Care Services Request Parameteres
GROUPCODES=$($PRINTF "<group>%s</group>" "${GROUPCODES[@]}")
LEVELS=$($PRINTF "<level>%s</level>" "${LEVELS[@]}")
CSR="<csd:requestParams xmlns:csd='urn:ihe:iti:csd:2013'>
<dxf>$DXF</dxf>
<groupCodes>$GROUPCODES</groupCodes>
<levels>$LEVELS</levels>
<URL>$DHIS2_EXT_URL</URL>
<usersAreHealthWorkers>$UVAL</usersAreHealthWorkers>
<dataelementsAreServices>$SVAL</dataelementsAreServices>
</csd:requestParams>"
#publish to ILR
echo "Publishing to $ILR_DOC on $ILR_URL"
echo $CSR | $CURL -sv --data-binary @- -X POST -H 'Content-Type: text/xml' $ILR_AUTH $ILR_URL/csr/$ILR_DOC/careServicesRequest/update/urn:dhis.org:extract_from_dxf:v2.19
#update last exported
echo "Updating export time in CSD-Loader data store to $EXPORTED"
if [ "$NOTHASKEY" = "1" ]; then
METHOD="POST"
else
METHOD="PUT"
fi
EXPORTED=$(date --date="$EXPORTED" +%FT%T%z | sed 's/.\{2\}$/:&/')
echo "Publishing to ILR in $ILR_DOC at $ILR_URL"
PAYLOAD="{ \"value\" : \"$EXPORTED\"}"
echo $PAYLOAD | $CURL -sv -o /dev/null -w "%{http_code}" --data-binary @- $DHIS2_AUTH -X $METHOD -H 'Content-Type: application/json' $DHIS2_URL/api/dataStore/CSD-Loader-Last-Export/$ILR_DOC | $GREP -cs '200\|201'
echo "Successfully published to ILR"
exit 0
| true |
4db408a13012e367166ff8889cd64f4629cb7a98 | Shell | mazzer2009/Python-Scripts | /dump.sh | UTF-8 | 134 | 2.78125 | 3 | [] | no_license | #!/bin/bash
i=0
while true
do
sudo tcpdump -i enp0s31f6 -w myfile_$i &
pid=$!
sleep 30
kill $pid
i=$(($i+1))
echo $pid
done
| true |
90d4ce5c66338c548b70f4e8221b37c477da5b3e | Shell | acj/aports | /community/libu2f-host/libu2f-host.post-upgrade | UTF-8 | 203 | 3.03125 | 3 | [] | no_license | #!/bin/sh
ver_old=$2
if [ "$(apk version -t "$ver_old" '1.1.10-r4')" = '<' ]; then
cat >&2 <<-EOF
*
* /usr/bin/u2f-host has been moved to package u2f-host - install it if neeed.
*
EOF
fi
exit 0
| true |
5ea8f1077f1a50b684a251e03988989884956f75 | Shell | jorgen/build_script | /bin/pull.sh | UTF-8 | 5,074 | 3.859375 | 4 | [] | no_license | #!/bin/bash
#**************************************************************************************************
# Copyright (c) 2012 Jørgen Lind
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#**************************************************************************************************/
REAL_SCRIPT_FILE=${BASH_SOURCE[0]}
if [ -L ${BASH_SOURCE[0]} ]; then
REAL_SCRIPT_FILE=$(readlink ${BASH_SOURCE[0]})
fi
BASE_SCRIPT_DIR="$( dirname "$( cd "$( dirname "$REAL_SCRIPT_FILE" )" && pwd )")"
BUILD_META_DIR=$BASE_SCRIPT_DIR/build_meta
BUILD_ODER_FILE=$BUILD_META_DIR/build_order
BUILDSET_FILE=""
SYNC_TO_SHA="no"
function print_usage {
echo "Usage for $0"
echo " $0 [options] -s directory"
echo ""
echo "Options:"
echo "-s, --src-dir Source dir (REQUIRED)"
echo "-f, --buildset Buildset file"
echo " Defaults to default_buildset"
echo "-s, --src Source dir"
echo " --sync sync to sha1 specified in buildset"
exit 1
}
function print_missing_argument {
echo ""
echo "Missing argument for $1"
echo ""
print_usage
}
function print_unknown_argument {
echo ""
echo "Unknown argument: $1"
echo ""
print_usage
}
function process_arguments {
while [ ! -z $1 ]; do
case "$1" in
-s|--src-dir)
if [ -z $2 ]; then
print_missing_argument $1
fi
BASE_SRC_DIR=$2
shift 2
;;
-f|--buildset)
if [ -z $2 ]; then
print_missing_argument $1
fi
BUILDSET_FILE=$2
shift 2
;;
--sync)
SYNC_TO_SHA="yes"
shift
;;
-h|--help)
print_usage
shift
;;
*)
print_unknown_argument $1
shift
;;
esac
done
}
function set_global_variables {
if [ -z $BASE_SRC_DIR ]; then
echo ""
echo "********************************"
echo "Please specify a src directory"
echo "********************************"
echo ""
print_usage
elif [ ! -e $BASE_SRC_DIR ]; then
echo ""
echo "Specified srd-dir '$BASE_BUILD_DIR' does not exist"
print_usage
fi
BASE_SRC_DIR="$( cd $BASE_SRC_DIR && pwd)"
source "$BUILD_META_DIR/functions/find_buildset_file.sh"
BUILDSET_FILE=$(resolve_buildset_file $BASE_SCRIPT_DIR $BUILDSET_FILE)
echo "Using buildset $BUILDSET_FILE"
}
function main {
while read line; do
if [[ $line == \#* ]]; then
continue
fi
set -- $line
local project_name=$1
local project_url=$2
local project_sha=$3
cd $BASE_SRC_DIR
if [ ! -d $project_name ] && [ -z $project_url ]; then
echo "Continuing for $project_name"
continue
fi
echo "Processing $project_name"
if [ -e $project_name ]; then
if [ ! -d $project_name ]; then
echo "File $project_name exists and conflicts with git clone target"
exit 1
else
cd $project_name
if [ -e .git ]; then
git pull --rebase
if [[ $SYNC_TO_SHA == "yes" ]]; then
git reset --hard $project_sha
fi
else
echo "Found project directory but its not a git repository"
exit 1
fi
fi
else
git clone $project_url $project_name
if [[ $SYNC_TO_SHA == "yes" ]]; then
cd $project_name
git reset --hard $project_sha
fi
fi
done < $BUILDSET_FILE
ln -sf $BUILDSET_FILE $BASE_SCRIPT_DIR/current_buildset
}
process_arguments $@
set_global_variables
main
| true |
2248d7ff18510fb8afa96c7ea12b68e319e7dbb2 | Shell | davilla/xbmc-port-depends | /base/base/portmgr/dmg/preflight | UTF-8 | 7,997 | 2.953125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
#
# Copyright (c) 2007 Juan Manuel Palacios <jmpp@macports.org>, The MacPorts Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of MacPorts Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
# dp2mp-move upgrading facility for users installing MacPorts through the pkg installer in the dmg.
# This script is meant to run as the preflight of the installer, to upgrade an existing MacPorts
# installation to the MacPorts namespace in the same way the "upgrade" target in base/Makefile
# does for users installing from source or selfupdate.
# preflight
# $Id: preflight 41907 2008-11-12 04:42:24Z ryandesign@macports.org $
###
PATH=/bin:/sbin:/usr/bin:/usr/sbin
UPGRADECHECK=/opt/local/var/macports/.mprename
[ ! -d /Library/Tcl/darwinports1.0 ] || rm -rf /Library/Tcl/darwinports1.0
[ ! -d /opt/local/share/darwinports ] || rm -rf /opt/local/share/darwinports
[ ! -f /opt/local/etc/ports/dp_version ] || rm -vf /opt/local/etc/ports/dp_version
[ ! -f /opt/local/share/man/man5/ports.conf.5 ] || rm -vf /opt/local/share/man/man5/ports.conf.5
[ ! -d /opt/local/etc/ports ] || mv -v /opt/local/etc/ports /opt/local/etc/macports
[ -d /opt/local/var/macports ] || mkdir -vp /opt/local/var/macports
for dir in distfiles packages receipts software; do
[ ! -d /opt/local/var/db/dports/${dir} ] || mv -v /opt/local/var/db/dports/${dir} /opt/local/var/macports
done
[ ! -d /opt/local/var/db/dports/sources/rsync.rsync.darwinports.org_dpupdate_dports ] || {
mkdir -vp /opt/local/var/macports/sources/rsync.macports.org/release && mv -v \
/opt/local/var/db/dports/sources/rsync.rsync.darwinports.org_dpupdate_dports /opt/local/var/macports/sources/rsync.macports.org/release/ports
}
for receipt in /opt/local/var/macports/receipts/*/*/receipt.bz2; do
[ ! \( -f ${receipt} -a ! -f ${receipt}.mpsaved \) ] || { cp -v ${receipt} ${receipt}.mpsaved && {
bzip2 -q -dc ${receipt} | sed 's/db\/dports/macports/g' | bzip2 -q -zf > ${receipt}.new
} && mv -v ${receipt}.new ${receipt}
}; done
[ ! \( -f /opt/local/etc/macports/ports.conf -a ! -f ${UPGRADECHECK} \) ] || {
mv -v /opt/local/etc/macports/ports.conf /opt/local/etc/macports/macports.conf.mpsaved
sed 's/etc\/ports/etc\/macports/g' /opt/local/etc/macports/macports.conf.mpsaved > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed 's/db\/dports/macports/g' /opt/local/etc/macports/macports.conf > /opt/local/etc//macports/macports.conf.tmp && \
mv -v /opt/local/etc//macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed 's/darwinports/macports/g' /opt/local/etc/macports/macports.conf > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed 's/dpupdate1\/base/release\/base/g' /opt/local/etc/macports/macports.conf > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed 's/dpupdate\/base\/\{0,1\}/trunk\/base\//g' /opt/local/etc/macports/macports.conf > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed '/^rsync_options/s/"\(.*\)"/\1/' /opt/local/etc/macports/macports.conf > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed 's/ --delete / /' /opt/local/etc/macports/macports.conf > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
sed 's/ ports.conf(5)/ macports.conf(5)/g' /opt/local/etc/macports/macports.conf > /opt/local/etc/macports/macports.conf.tmp && \
mv -v /opt/local/etc/macports/macports.conf.tmp /opt/local/etc/macports/macports.conf
}
[ ! \( -f /opt/local/etc/macports/sources.conf -a ! -f ${UPGRADECHECK} \) ] || {
cp -v /opt/local/etc/macports/sources.conf /opt/local/etc/macports/sources.conf.mpsaved
sed 's/darwinports/macports/g' /opt/local/etc/macports/sources.conf > /opt/local/etc/macports/sources.conf.tmp && \
mv -v /opt/local/etc/macports/sources.conf.tmp /opt/local/etc/macports/sources.conf
sed 's/dpupdate\/dports/release\/ports\//g' /opt/local/etc/macports/sources.conf > /opt/local/etc/macports/sources.conf.tmp && \
mv -v /opt/local/etc/macports/sources.conf.tmp /opt/local/etc/macports/sources.conf
}
[ ! \( -f "${HOME}/.macports/ports.conf" -a ! -f ${UPGRADECHECK} \) ] || {
mv -v "${HOME}/.macports/ports.conf" "${HOME}/.macports/macports.conf.mpsaved"
sed 's/etc\/ports/etc\/macports/g' "${HOME}/.macports/macports.conf.mpsaved" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed 's/db\/dports/macports/g' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed 's/darwinports/macports/g' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed 's/dpupdate1\/base/release\/base/g' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed 's/dpupdate\/base\/\{0,1\}/trunk\/base\//g' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed '/^rsync_options/s/"\(.*\)"/\1/' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed 's/ --delete / /' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
sed 's/ ports.conf(5)/ macports.conf(5)/g' "${HOME}/.macports/macports.conf" > "${HOME}/.macports/macports.conf.tmp" && \
mv -v "${HOME}/.macports/macports.conf.tmp" "${HOME}/.macports/macports.conf"
}
[ ! -f /opt/local/etc/macports/mp_version ] || rm -vf /opt/local/etc/macports/mp_version
[ -f ${UPGRADECHECK} ] || {
echo -e "\nMacPorts installation successfully upgraded from the old DarwinPorts namespace!\n"
echo "MacPorts rename update done!" > ${UPGRADECHECK}
}
| true |
8a05a359cc692cb70b729e7adebf30471aaceac3 | Shell | shouldwhat/linux-script | /redis/cluster.sh | UTF-8 | 864 | 2.9375 | 3 | [] | no_license | #
# https://redis.io/topics/cluster-tutorial
# http://blog.leekyoungil.com/?p=206
#
DEPEDENCY_PACKAGES=(rubygem ruby-devel)
REDIS_CLUSTER_MANAGE_SCRIPT=/root/~~~~/redis-trib.rb
REDIS_CLUSTER_HOSTS=192.168.121.68:6379 192.168.121.69:6379 192.168.121.68:6389 192.168.121.69:6389
REDIS_CLUSTER_REPLICASE=1
REDIS_CLUSTER_CREATION_CMD=$REDIS_CLUSTER_MANAGE_SCRIPT create --replicas $REDIS_CLUSTER_REPLICASE $REDIS_CLUSTER_HOSTS
function install_dependencies {
# yum installing...
for package in "${DEPEDENCY_PACKAGES[@]}"
do
#echo $package
yum install -y $package
done
}
function install_rubygems {
gem install rubygems-update
update_rubygems
}
function install_redisgem {
gem install redis -v 3.3.5
}
function create_redis_cluster {
exec $REDIS_CLUSTER_CREATION_CMD
}
install_dependencies
install_rubygems
install_redisgem
create_redis_cluster
| true |
ea86991551111a0c4f172ef5b5c60b37ea1c21e9 | Shell | harukat/pgsql-cluster-setups | /inmem-3pgsql-2pgpool.ansible/files/pgpool_remote_start | UTF-8 | 569 | 3.4375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# This script is run to start slave node after recovery.
set -o xtrace
exec &> /tmp/pgpool_remote_start < /dev/null
PGHOME=/usr/pgsql-{{pgver}}
DEST_HOST="$1"
DEST_HOST_PGDATA="$2"
date
echo start: remote start PostgreSQL@$DEST_HOST
# Start slave node
ssh -T -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null postgres@$DEST_HOST $PGHOME/bin/pg_ctl -l $DEST_HOST_PGDATA/log/pg_ctl.log -w -D $DEST_HOST_PGDATA start
if [[ $? -ne 0 ]]; then
echo $DEST_HOST start failed.
exit 1
fi
echo end: $DEST_HOST PostgreSQL started successfully.
| true |
617de505c1df6656dbef7c913c01b64ed111445d | Shell | Irishsmurf/config | /.zshrc | UTF-8 | 1,420 | 2.90625 | 3 | [] | no_license | # metc/zshrc: system-wide .zshrc file for zsh(1).
#
# This file is sourced only for interactive shells. It
# should contain commands to set up aliases, functions,
# options, key bindings, etc.
#
# Global Order: zshenv, zprofile, zshrc, zlogin
#
# This configuration file is managed by the redbrick-shell-env
# package. CHANGES MADE HERE WILL NOT BE PRESERVED.
#
# Stoopid terminals!
#
setopt automenu
setopt autocd
unsetopt cdablevars
setopt correct
setopt noclobber
setopt extendedglob
setopt histignoredups
setopt listtypes
# Set global aliases
#
_alias() {
alias $1="$2"
}
# Zsh specific aliases
alias paris='ssh paddez@paris.paddez.com'
alias rb='ssh paddez@redbrick.dcu.ie'
alias pu='pushd'
alias po='popd'
alias se='bindkey -e; MODE=emacs'
alias sv='bindkey -v; MODE=vi'
alias won='watch=(notme)'
alias woff='unset watch'
alias pon='won; mesg y'
alias poff='woff; mesg n'
alias ls='ls --color'
# Default prompt (mostly newbies get this).
#
#PS1="%n@%m (%25<..<%~) %# "
RPS1="$(print '%{\e[1;31m%}[%{\e[0;36m%}%T%{\e[1;31m%}]%{\e[0m%}')"
PS1="$(print '%{\e[0;36m%}%n%{\e[0;37m%}@%{\e[1;34m%}%m%{\e[1;31m%} (%{\e[0m%}%25<..<%~%{\e[1;31m%})%{\e[1;33m%} -> %{\e[0m%}')"
#fix some keys
bindkey "^[[3~" delete-char
bindkey "^[3;5~" delete-char
[[ -z "$terminfo[kend]" ]] || bindkey -M emacs "$terminfo[kend]" end-of-line
[[ -z "$terminfo[khome]" ]] || bindkey -M emacs "$terminfo[khome]" beginning-of-line
| true |
97dd90fbd044b4767b3ec4c9b9a49909da229cdc | Shell | rockandska/fzf-obc | /plugins/kill/__fzf_obc_finish_kill.sh | UTF-8 | 334 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
__fzf_obc_finish_kill() {
if [[ "${current_plugin:-}" == "kill/process" ]];then
##########################
# Processes fuzzy finder #
##########################
if [[ "${#COMPREPLY[@]}" -gt 0 ]];then
__fzf_compreply < <(tr '\n' '\0' < <(awk '{print $2}' <(printf '%s\n' "${COMPREPLY[@]}")))
fi
fi
}
| true |
c2294a76ae362ea9f2957f619606b3f903596e75 | Shell | wangkexiong/trystack | /apps/abase.sh | UTF-8 | 2,441 | 3.75 | 4 | [] | no_license | #!/bin/sh
TIME=`date +%s`
HOUR=`expr $TIME / 3600 % 24`
MINUTE=`expr $TIME / 60 % 60`
if [ $HOUR -eq 20 ]; then
. $PWD/keystone_trystack1 && nova delete beijing hangzhou huhhot
. $PWD/keystone_trystack2 && nova delete master1-k8s worker1-k8s worker2-k8s
fi
# Redirect stdio to LOGFILE
if [ "$LOGPATH" != "" ]; then
exec 1>"$LOGPATH"
fi
# Repeat if SSH connection fail
REPEAT=${SS_REPEAT:-2}
SLEEP_TIME=${SS_SLEEP:-3}
if [ `python scripts/openstack.py --host beijing | wc -l` -lt 2 ]; then
# Create the necessary instances
#
for i in `seq $REPEAT`
do
ansible-playbook instances-create.yml
if [ -f instances-create.retry ]
then
rm -rf instances-create.retry
else
break
fi
sleep $SLEEP_TIME
done
fi
# Not enough public IPs, use SSH bastion for private IP connections
chmod 400 roles/infrastructure/files/ansible_id*
chmod +x scripts/*.py
SSHFILE=~/.ssh/config
SSHFILESIZE=0
SSHFILEOK=False
for i in `seq $REPEAT`
do
echo "Prepare SSH bastion configuration for $i time ..."
python scripts/bastion.py --ucl --sshkey roles/infrastructure/files/ansible_id --refresh
if [ -f $SSHFILE ]; then
SSHFILESIZE=`stat -c%s $SSHFILE`
if [ $SSHFILESIZE -gt 0 ]; then
SSHFILEOK="True"
break
fi
fi
sleep $SLEEP_TIME
done
if [ "$SSHFILEOK"=="True" ]; then
rm -rf ~/.ssh/id_rsa && ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
cp roles/infrastructure/files/ansible_id ~/.ssh/id_rsa
cp roles/infrastructure/files/ansible_id.pub ~/.ssh/id_rsa.pub
SSH_CHECK=`ssh beijing -l ubuntu sudo tail -1 /etc/ddclient.conf`
if [ "$SSH_CHECK" != "$BEIJING_DOMAIN" ]; then
# Prepare the instances for ansible working
# - if no python installed, just DO IT
#
for i in `seq $REPEAT`
do
ansible-playbook -i scripts/openstack.py instances-prepare.yml -T ${SSH_TIMEOUT:-60}
if [ -f instances-prepare.retry ]; then
rm -rf instances-prepare.retry
else
break
fi
sleep $SLEEP_TIME
done
# Setup instances with playbook
#
for i in `seq $REPEAT`
do
ansible-playbook -i scripts/openstack.py instances-setup_abase.yml -T ${SSH_TIMEOUT:-60}
if [ -f instances-setup_abase.retry ]; then
rm -rf instances-setup_abase.retry
else
break
fi
sleep $SLEEP_TIME
done
fi
else
echo "SSH bastion configuration failed ..."
fi
# Restore std output
exec 1>&2
| true |
0d9644848f6de48a15a98fd5b5a84bca64ab1be2 | Shell | rubixlinux/rubixlinux | /k/kernel26-source/PKGBUILD | UTF-8 | 3,563 | 2.765625 | 3 | [] | no_license | # Maintainer: Joshua Rubin <joshua@rubixlinux.org>
## DONT FORGET TO UPDATE THE INSTALL SCRIPT!!!!
pkgname=kernel26-source
pkgver=2.6.16.20
pkgrel=1
url="http://www.kernel.org"
pkgdesc="kernel26-source (Linux kernel source) Source code for Linus Torvalds' Linux kernel. This is the complete and unmodified source code for the Linux kernel."
install=$pkgname.install
source=(http://kernel.org/pub/linux/kernel/v2.6/linux-$pkgver.tar.bz2 \
logo_linux_clut224.ppm \
rubix.diff \
config )
md5sums=('382aa4178ff79d58925622a8a97561eb' \
'8cabdaa35412908edda7b851e4bb1632' \
'2be65f36f61948f5c46f09e62d405551' \
'46410e3bfc4a5dd45e923925cc18d34e' )
## Todo:
## None
## Notes:
## None
## Changelog:
## rel1: upgraded to 2.6.16.20 2006.06.07
## rel1: upgraded to 2.6.16.19 2006.06.01
## rel1: upgraded to 2.6.16.18 2006.05.22
## rel1: upgraded to 2.6.16.16 2006.05.10
## rel1: upgraded to 2.6.16.11 2006.04.27
## rel1: upgraded to 2.6.16.9 2006.04.20
## rel1: upgraded to 2.6.15.5 2006.03.03
## rel1: upgraded to 2.6.15.4 2006.02.12
## rel1: upgraded to 2.6.15.3 2006.02.06
## rel1: upgraded to 2.6.15.2 2006.01.31
## rel1: upgraded to 2.6.15.1 2006.01.17
## rel3: upgraded to 2.6.15 (rel1, rel2 were only used for internal development) 2006.01.10
## rel1: upgraded to 2.6.14 2005.10.27
## rel2: removed ieee80211.h 2005.09.21
## rel1: upgraded to 2.6.13 2005.09.01
## rel1: upgraded to 2.6.12.1 2005.06.23
## rel2: rebuilt with new config 2005.06.22
## rel1: upgraded to 2.6.12, added make modules so we have Module.symvers 2005.06.18
## rel2: This is now the default Rubix kernel and /usr/src/linux will
## point to it 2005.05.16
## rel1: Upgraded to 2.6.11.9 2005.05.15
## rel1: Upgraded to 2.6.11.7 2005.04.13
## rel1: Upgraded to 2.6.11
## rel1: Initial rubix release
build() {
cd $startdir/src/linux-$pkgver
patch -p1 < $startdir/src/rubix.diff || return 1
cp $startdir/src/config .config
cp $startdir/src/logo_linux_clut224.ppm drivers/video/logo
make silentoldconfig
make bzImage || return 1
make modules || return 1
cp vmlinux $startdir/pkg
make clean
rm .version
mkdir -p $startdir/pkg/usr/src
cd $startdir/pkg/usr/src
cp -a $startdir/src/linux-$pkgver linux-$pkgver-Rubix
find $startdir/pkg -perm 0664 -exec chmod 0644 {} \;
find $startdir/pkg -perm 0775 -exec chmod 0755 {} \;
cd linux-$pkgver-Rubix
mv $startdir/pkg/vmlinux .
## provided by the ieee80211 package
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_module.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_crypt_ccmp.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_geo.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_crypt_tkip.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_rx.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_tx.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_wx.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_crypt.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/net/ieee80211/ieee80211_crypt_wep.c
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/include/net/ieee80211_radiotap.h
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/include/net/ieee80211_crypt.h
rm $startdir/pkg/usr/src/linux-$pkgver-Rubix/include/net/ieee80211.h
ln -s linux-$pkgver-Rubix $startdir/pkg/usr/src/linux26
find $startdir/pkg -perm 777 -exec chmod 755 {} \;
find $startdir/pkg -perm 666 -exec chmod 644 {} \;
}
| true |
75ad12d25dbfa4da861aab37de2a7f0e67ec8e73 | Shell | hre-i/hremap | /systemd/hremap.sh.vbox-kinesis | UTF-8 | 397 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
### Keyboard devices
DEV=/dev/input/by-path/platform-i8042-serio-0-event-kbd
opt="--ctrl-map --f11-to-henkan --f12-to-katakana --katakana-map --henkan-map"
if [ ! -z "$DEV" ]; then
case x"`pidof hremap`" in
x[0-9]*) sudo killall hremap ;;
*) ;;
esac
while true; do
/usr/local/bin/hremap ${opt} $DEV
sleep 2
done
else
echo "NO $DEV"
exit 1
fi
| true |
cef2a2d6cc78df5d562486c429197c1346b47859 | Shell | devopsdemo-in/Devops | /installAnt.sh | UTF-8 | 884 | 3.015625 | 3 | [] | no_license | #!/bin/bash
#title : Install Ant Build Tool
#description : Execute this script as root user
#author : Mthun Technologies
#date : 08112012
#version : 1.0
#usage : sh antInstallation.sh
#CopyRights : Mithun Technologies
#Contact : 9980923226 | devopstrainingblr@gmail.com
echo 'Ant Installation started.'
echo '-------------------------'
yum install wget zip unzip -y
cd /opt
wget http://apache.osuosl.org//ant/binaries/apache-ant-1.10.5-bin.zip -O apache-ant-1.10.5.zip
unzip apache-ant-1.10.5.zip
echo "export ANT_HOME=/opt/apache-ant-1.10.5" >> ~/.bash_profile
#To set the varibales in System wide environment , use below file
#echo "export ANT_HOME=/opt/apache-ant-1.10.5" >> /etc/profile
echo 'export PATH=$PATH:$ANT_HOME/bin' >> ~/.bash_profile
source ~/.bash_profile
ant -version
echo 'Ant Installation done.'
echo '----------------------'
| true |
c9677ce01d0c5cb67c9646d3b6385ecb28c73cca | Shell | jcbelanger/starter | /certbot/certonly.sh | UTF-8 | 570 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
PARENT_DIR=$(dirname "$(readlink -f "$0")")/../
source ${PARENT_DIR}/.env
docker run \
--interactive \
--tty \
--rm \
--volume starter_certbot-certs:/etc/letsencrypt \
--volume starter_certbot-challenges:/data/letsencrypt \
certbot/certbot certonly \
--webroot \
--webroot-path /data/letsencrypt \
--agree-tos \
--non-interactive \
--cert-name ${DOMAIN} \
--email ${ADMIN_EMAIL} \
--domain ${DOMAIN} \
--domain www.${DOMAIN} \
--domain mail.${DOMAIN} \
"$@"
cd $PARENT_DIR && docker-compose kill -s SIGHUP nginx
| true |
78ad2bc4b2c83a2b5106c3a2d2bb675359fb0f84 | Shell | neuros/neuroslink-installcd | /scripts/buildandmd5.sh | UTF-8 | 691 | 2.65625 | 3 | [] | no_license | BUILD=/srv/projects/neuros-cd/cd-image
APTCONF=/srv/projects/neuros-cd/apt-ftparchive/release.conf
DISTNAME=intrepid
pushd $BUILD
apt-ftparchive -c $APTCONF generate /srv/projects/neuros-cd/apt-ftparchive/apt-ftparchive-deb.conf
apt-ftparchive -c $APTCONF generate /srv/projects/neuros-cd/apt-ftparchive/apt-ftparchive-udeb.conf
apt-ftparchive -c $APTCONF generate /srv/projects/neuros-cd/apt-ftparchive/apt-ftparchive-extras.conf
apt-ftparchive -c $APTCONF release $BUILD/dists/$DISTNAME > $BUILD/dists/$DISTNAME/Release
gpg --default-key "7794ADB8" --output $BUILD/dists/$DISTNAME/Release.gpg -ba $BUILD/dists/$DISTNAME/Release
find . -type f -print0 | xargs -0 md5sum > md5sum.txt
popd
| true |
3617cd8e3a9a635c6facac5429d4361b41942a5b | Shell | alanvictorjp/liberabloqueia | /liberabloqueia.sh | UTF-8 | 3,774 | 3.84375 | 4 | [] | no_license | #!/bin/bash
# Script: liberabloqueia.sh
# Revisao: 0.3
# Desc: Ativa e detativa diretorio web via url, necessário o uso de SSL.
# Autor: Alan Victor, alanvictorjp gmail com
# Criado: 08/06/2019
# Modificado: 29/08/2019
# Locais
################################################################################
# url para liberar [ https://eae.com/qwert123 ]
open='farsaon'
# url para bloquear [ https://eae.com/qwert321 ]
close='farsaoff'
# Arquivo de log do apache referente ao vhost
log="/var/log/apache/eae.com.log"
# Arquivo conf do apache referente ao vhost
conf="/etc/apache2/sites-available/eae.com-ssl.conf"
# Ips locais permitidos
whitelist="192.168.200.2 192.168.200.3"
################################################################################
# Constantes
named=$(basename $0)
logfile=/var/log/$named
null=/dev/null
pidfile=/var/run/$(basename $0).pid
# Cores
codigo="\033["
vermelhoClaro="1;31m";
verdeClaro="1;32m";
finall="\033[0m"
# testes
################################################################################
[ ! -f $logfile ] && { touch $logfile ; }
trap saindo SIGINT SIGTERM SIGKILL
################################################################################
# funcoes
################################################################################
saindo() { _stop || rm -rf $pidfile &> $null ; }
eco_verde_claro() { echo -ne "${codigo}${verdeClaro}$*${finall}"; }
eco_vermelho_claro() { echo -ne "${codigo}${vermelhoClaro}$*${finall}"; }
_help() { eco_verde_claro "\n ajuda!\n\n"; }
_is_running() { [[ -f $pidfile ]] && { return 0 ; } || { return 1 ; } }
_restart() { _stop ; _start; }
_stop() {
_is_running && {
kill -9 $(cat $pidfile) &> $null;
sleep 0.5
rm -rf $pidfile &> $null && {
eco_verde_claro "\n $named parado!\n\n";
return 0;
} || {
eco_vermelho_claro "\n PIDfile nao encontrado!\n\n"
return 1;
}
} || {
eco_vermelho_claro "\n $named nao estava rodando!\n\n";
return 1;
}
}
_start() {
_is_running && {
eco_vermelho_claro "\n $named estava rodando!\n\n";
return 1;
} || {
_daemon;
sleep 0.5
eco_verde_claro "\n $named iniciado!\n\n";
return 0;
}
}
_status() {
_is_running && {
eco_verde_claro "\n $named esta rodando!\n";
eco_verde_claro " PID: $(cat $pidfile)\n\n";
} || {
eco_vermelho_claro "\n $named nao esta rodando!\n\n";
}
}
################################################################################
# daemon
################################################################################
_daemon() {
export LC_ALL=C
while : ; do
atual0=$(date "+%d/%b/%Y:%H:%M")
seg=$(date "+%S" | sed 's/.$//')
atual="$atual0:$seg"
ip_libera=$(tail -n2 $log | grep -i "${atual}.*${open}" | sed 's/ - - .*//' | sed '1!d')
if [[ $ip_libera ]] ; then
cat $conf | grep -q "^Require ip $whitelist ${ip_libera}$" || {
sed -i "s/\(Require ip $whitelist\)/\1 $ip_libera/" $conf ;
service apache2 reload &> $null;
}
fi
ip_bloqueia=$(tail -n2 $log | grep -i "${atual}.*${close}" | sed 's/ - - .*//' | sed '1!d')
if [[ $ip_bloqueia ]] ; then
cat $conf | grep -q "^Require ip $whitelist ${ip_bloqueia}$" && {
sed -i "s/\(Require ip $whitelist\).*/\1/" $conf ;
service apache2 reload &> $null;
}
fi
sleep 2
done &
echo $! > $pidfile
}
################################################################################
case $1 in
start) _start ;;
stop) _stop ;;
restart) _restart ;;
status) _status ;;
*) _help ;;
esac
################################################################################
| true |
39c6f9d0be041d0cb43858eb5035cfe6a6abc2f7 | Shell | charleshenryhugo/lpmx | /install.sh | UTF-8 | 2,223 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
get_binary(){
if [ $1 = "GNU/Linux" ];then
echo "installment script will create folder named lpmx in current directory"
ROOT=lpmx
mkdir -p $ROOT
wget -N $3/$2/libevent.so -P $ROOT
wget -N $3/$2/libfakechroot.so -P $ROOT
wget -N $3/$2/lpmx -P $ROOT
wget -N $3/$2/memcached -P $ROOT
wget -N $3/$2/patchelf -P $ROOT
chmod 755 $ROOT/lpmx $ROOT/memcached $ROOT/patchelf
fi
}
install(){
get_binary $ARCH_OS $ARCH_PLAT $SRC/linux
}
get_terminal(){
if [ -d "lpmx" ];then
ROOT=lpmx/examples/$1/terminal
mkdir -p $ROOT
wget -N $2/examples/$1/terminal/getpid.so -P $ROOT
wget -N $2/examples/$1/terminal/pid -P $ROOT
wget -N $2/examples/$1/terminal/readme.md -P $ROOT
wget -N $2/examples/$1/terminal/setting.yml -P $ROOT
wget -N $2/examples/$1/terminal/run.sh -P $ROOT
chmod 755 $ROOT/pid $ROOT/run.sh
else
echo "sorry, i can't find lpmx directory, seems the installment encountered
errors!"
exit 1
fi
}
get_rpc(){
if [ -d "lpmx" ];then
ROOT=lpmx/examples/$1/rpc
mkdir -p $ROOT
wget -N $2/examples/$1/rpc/readme.md -P $ROOT
wget -N $2/examples/$1/rpc/run.sh -P $ROOT
wget -N $2/examples/$1/rpc/loop1 -P $ROOT
wget -N $2/examples/$1/rpc/loop2 -P $ROOT
wget -N $2/examples/$1/rpc/setting.yml -P $ROOT
chmod 755 $ROOT/loop1 $ROOT/loop2 $ROOT/run.sh
else
echo "sorry, i can't find lpmx directory, seems the installment encountered
errors!"
exit 1
fi
}
download_example(){
get_terminal $ARCH_PLAT $SRC
get_rpc $ARCH_PLAT $SRC
}
SRC="https://raw.githubusercontent.com/JasonYangShadow/lpmx/master/build"
if [ -f "/usr/bin/uname" ] || [ -f "/bin/uname" ]; then
ARCH_OS=`uname -o`
ARCH_PLAT=`uname -m`
else
echo "your os doesn't have uname, it may not be compatible with this
installment script"
exit 1
fi
if [ ! -f "/usr/bin/wget" ];then
echo "wget dees not exist in your os, please install wget"
exit 1
fi
if [ -f "/usr/bin/tar" ] || [ -f "/bin/tar" ];then
FILE="lpmx_$ARCH_PLAT.tar.gz"
wget $SRC/$FILE
tar -xzvf $FILE
mkdir -p lpmx
mv linux/$ARCH_PLAT/* lpmx/
mv examples lpmx/
rm -rf linux $FILE
else
install
download_example
fi
| true |
1a6a4d2ba289295741bd945571d5cb061638a491 | Shell | ilventu/aur-mirror | /canorus-svn/PKGBUILD | UTF-8 | 940 | 3 | 3 | [] | no_license | # Contributor: Coenraad van der Westhuizen <chwesthuizen@gmail.com>
pkgname=canorus-svn
pkgver=810
pkgrel=1
pkgdesc="A free music score editor"
arch=(i686)
url="https://canorus.berlios.de"
license=('GPL')
depends=('alsa-lib' 'poppler-qt' 'qt>=4.3.0')
makedepends=(cmake swig ruby python)
source=()
md5sums=()
_svntrunk=svn://svn.berlios.de/canorus/trunk
_svnmod=canorus
build() {
cd $startdir/src
if [ -d $_svnmod/.svn ]; then
(cd $_svnmod && svn up -r $pkgver)
else
svn co $_svntrunk --config-dir ./ -r $pkgver $_svnmod
fi
msg "SVN checkout done or server timeout"
cd $_svnmod
# Scripting disabled. As of 9/10/08, will not compile w/ ruby enabled. Will not run stably with python/swig enabled.
cmake . -DCMAKE_INSTALL_PREFIX=/usr -DSWIG_DIR=/usr/share/swig -DQT_MOC_EXECUTABLE=/usr/bin/moc \
-DQT_UIC_EXECUTABLE=/usr/bin/uic -Wno-dev -DNO_PYTHON=true -DNO_RUBY=true -DNO_SWIG=true
make
make DESTDIR=$startdir/pkg install || return 1
}
| true |
aa3cd764c66990159ff75a35c7fb9bd6f8fde671 | Shell | davehughes/dotfiles | /tmux/bin/safe-reattach-to-user-namespace.sh | UTF-8 | 138 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env bash
OS=`uname -a | cut -d" " -f 1`
if [ "$OS" == "Darwin" ]; then
reattach-to-user-namespace $@
else
exec "$@"
fi
| true |
fb46d857299075808b954fb75ed82657c2e8042a | Shell | delkyd/alfheim_linux-PKGBUILDS | /incursion/PKGBUILD | UTF-8 | 1,183 | 2.96875 | 3 | [] | no_license | # Contributor: Kyle Keen <keenerd@gmail.com>
pkgname=incursion
pkgver=0.6.9H4L
#pkgver=0.6.5BL # if H4L does not work for you
pkgrel=1
pkgdesc="A freeware roguelike with very detailed characters."
arch=(i686 x86_64)
url="http://www.incursion-roguelike.net/"
license=('unknown') # closed source, none given
depends=('gcc-libs' 'libxau' 'libxfixes' 'libxrender' 'libxcursor' 'libxpm')
source=("http://www.incursion-roguelike.org/Incursion%20$pkgver.tar.gz")
md5sums=('32073f76643b3046dde068cbb6330860')
if [[ $CARCH == x86_64 ]]; then
depends=('lib32-gcc-libs' 'lib32-libxau' 'lib32-libxfixes' \
'lib32-libxpm' 'lib32-libxrender' 'lib32-libxcursor')
fi
package() {
# for now, it needs /opt
cd "$srcdir/Incursion $pkgver"
install -d "$pkgdir/opt/$pkgname"
install -d "$pkgdir/usr/bin"
# should be using 'install', but so many files
chmod -R -x *
chmod +x incursion
chmod 0775 logs mod save
chmod 0664 Incursion.cfg keyboard.dat Options.Dat
cp -ra ./ "$pkgdir/opt/$pkgname/"
chown -R root:games "$pkgdir/opt/$pkgname/"
chown root:root "$pkgdir/opt/$pkgname/incursion"
# launch link
ln -s /opt/incursion/incursion "$pkgdir/usr/bin/incursion"
}
| true |
d7022609023c3366c64f06323494c1e7483dd590 | Shell | rwx788/scripts | /i3-xephyr.sh | UTF-8 | 1,309 | 4.1875 | 4 | [] | no_license | #!/usr/bin/bash
function usage() {
cat <<EOF
USAGE i3-xephyr start|stop|restart|run [options]
start Start nested i3 in xephyr
stop Stop xephyr
restart reload i3 in xephyr
run run command in nested i3
options:
-c|--config=<PATH> Path to custom i3 configuration file
EOF
}
function i3_pid() {
/bin/pidof i3 | cut -d\ -f1
}
function xephyr_pid() {
/bin/pidof Xephyr | cut -d\ -f1
}
[ $# -lt 1 ] && usage
for i in "$@"
do
case $i in
start|stop|restart|run)
COMMAND="$i"
;;
-c=*|--config=*)
I3CONFIG="${i#*=}"
;;
*)
usage
;;
esac
done
I3=`which i3`
XEPHYR=`which Xephyr`
test -x $I3 || {echo "i3 executable not found."}
test -x $XEPHYR || {echo "Xephyr executable not found."}
case "$COMMAND" in
start)
$XEPHYR -ac -br -noreset -screen 1280x720 :1 &
sleep 1
if [ -z "$I3CONFIG" ]; then
DISPLAY=:1.0 $I3 &
else
DISPLAY=:1.0 $I3 -c $I3CONFIG &
fi
sleep 1
echo I3 ready for tests. PID is $(i3_pid)
;;
stop)
echo -n "Stopping nested i3..."
if [ -z $(xephyr_pid) ]; then
echo "Not running: not stopped :)"
exit 0
else
kill $(xephyr_pid)
echo "Done."
fi
;;
restart)
echo -n "Restarting i3..."
kill -s SIGHUP $(xephyr_pid)
;;
run)
shift
DISPLAY=:1.0 "$@" &
;;
*)
usage
;;
esac
| true |
80c88a969429e8e2c042b2e32374761d55c82410 | Shell | fjsuarez/shell_basics | /parametros.sh | UTF-8 | 215 | 3.328125 | 3 | [] | no_license | #!/bin/sh
# $0 nombre del script
# $1 arg1
# $2 arg2
#ARCHIVO=$1
ARCHIVOS=$@
for ARCHIVO in $ARCHIVOS
do
echo "Ejecutando el script $0"
echo "Imprimiendo los contenidos de $ARCHIVO"
cat $ARCHIVO
done
#cat $@
| true |
3feeef0212ea5c8a2da67cbf446ce885d3a83355 | Shell | amalik99/amitdockerazure | /deployazurevms.sh | UTF-8 | 2,353 | 3.609375 | 4 | [] | no_license | ###This will deploy the required infrastructure on Azure for this workshop##########
### Login to Azure Subscription ####
az login
###Non-Fixed Parameter#####
echo "Please enter naming convention prefix - charators only)"
read envprefix
echo "Please enter number of students for whom resources to be deployed in azure - integer only"
read numberofstudent
echo "Please enter number of docker host(azureVM's) to be created for each student"
read dockerhostperstudent
echo "Please enter azure region resources to be deployed to e.g eastus , westus"
read location
###Fixed Parameter#####
adminusername='localadmin'
adminpassword='microsoft@123'
rg='rg'
storage='strgacc'
vnet='dockervnet'
diskname='osdisk'
ipname='pip'
###Creating resources##
##Starting Outer for loop: this will create ResourceGroup, Network, and storage account##
for ((n=1;n<=$numberofstudent;n++))
do
std=std$n
###Creating Resource Group for Each Student###
az resource group create -l $location -n $envprefix$std$rg
###Creating storage account for each Student###
az storage account create -g $envprefix$std$rg -n $envprefix$std$storage -l $location --sku Standard_LRS
###Creating Virtual network for each Student###
az network vnet create -g $envprefix$std$rg --name $envprefix$std$vnet --address-prefix 10.0.0.0/16 -l $location --subnet-name subnet --subnet-prefix 10.0.0.0/24
###Creating Virtual Machine(Ubuntu) with Docker for each user###
###Starting Inner For Loop for creating virtual machines for each student###
for ((i=1;i<=$dockerhostperstudent;i++))
do
vm=vm$i
az vm create -g $envprefix$std$rg --image UbuntuLTS --name $envprefix$std$vm --authentication-type password --admin-username $adminusername --admin-password $adminpassword -l $location --nsg '' --os-disk-name $envprefix$std$vm$diskname --public-ip-address $envprefix$std$vm$ipname --size Standard_D1 --storage-account $envprefix$std$storage --vnet $envprefix$std$vnet --subnet-name Subnet > $envprefix$std$vm.txt
az vm extension set -n DockerExtension --publisher Microsoft.Azure.Extensions --version 1.2.0 --vm-name $envprefix$std$vm --resource-group $envprefix$std$rg
done
### Inner Loop Done###
done
### Outer Loop Done ###
####Finished Deployment#######
echo Deployment finished successfully, Check Current directory for text file containing IP details for each student. Thanks
| true |
67cbf506a8265250c6aca5efc19c5f4346306266 | Shell | yourant/BigData-FlieLibrary | /Shopee项目/Shopee_Operations/任务调度/调度脚本/tj_use_time.sh | UTF-8 | 2,290 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env bash
########################################################################
# Description :埋点日志数据---运营后台-产品访问分析(数据保留近半年)
# Script :tj_use_time.sh
# Version :1.0
# Date :2020-11-10
# Author :koray
# Company :menglar
#########################################################
################ 函数脚本的引进 #################
source ./function_shopee.sh
#################################################
if [ $# -lt 1 ];then
DT=$(date +%Y-%m-%d -d -0day)
else
DT=$1
fi
for ((i= 1;i<=180;i++))
do
DT[$i]=$(date -d "${DT} -$i days" "+%Y-%m-%d")
done
###############################################################################################
#mysql数据库统计表---先删除后插入
sql="delete from tj_use_time where timest='${DT[180]}' or timest='${DT[0]}'"
echo '-----------删除语句是'
echo ${sql}
ExecuteMysql "$sql"
################################################################################################
#埋点日志数据----运营后台(数据保留近半年)
hql="
select
concat_ws('_',t1.si,t1.pi,'${DT[0]}') as id
,t1.si
,t3.sys_role_id
,t1.pi
,nvl(t2.sum_duration,0)
,nvl(t2.sum_duration/t1.uv,0)
,t1.uv
,'${DT[0]}'
from
(select
si
,pi
,count(distinct(uid)) as uv
from
burying_point_log
where
dt='${DT[1]}'
and
uid!='NULL'
and
pi!='NULL'
and
pi!='0'
group by
si
,pi
) as t1
left join
(select
si
,pi
,sum(duration) as sum_duration
from
(select
si
,pi
,et as atime
,lead(et) over (partition by uid order by et asc) as etime
,cast(lead(et) over (partition by uid order by et asc) as bigint)-cast(et as bigint) as duration
from
burying_point_log
where
dt='${DT[1]}'
and
pi!='NULL'
and
uid!='NULL'
and
pi!='0'
) as b
where
duration is not null
and duration<1800
group by
si
,pi
) as t2
on t1.si=t2.si
and t1.pi=t2.pi
join
sys_product_type as t3
on t1.si=t3.app_id
and t1.pi=t3.sys_product_id
and t3.dt='${DT[1]}'
"
#ExportToMySQLByHQL "$hql" "${DT[0]}" "tj_use_time" "库"
# hql=$1 dt=$2 mysql_table=$3 hive_database=$4
#sqoop方式hive导出到MySQL表
#SqoopDirectExportToMySQLByHQL hive_db=$1 sql=$2 dir_dt=$3 mysql_table=$4
SqoopDirectExportToMySQLByHQL "shopee_operations" "$hql" "${DT[0]}" "tj_use_time"
# echo "$hql"
| true |
82128d265ba8f28b6685d1089159bcb5f73909ea | Shell | LatorreDev/recalbox | /scripts/release/generate_external_installer_assets.sh | UTF-8 | 5,991 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## FUNCTIONS ##
function exitWithUsage {
echo "Usage: $0 --images-dir <images_dir> --destination-dir <destination_dir>"
echo
echo "This script generates assets for external OS installers (imagers)"
echo "such as NOOBS, PINN or Raspberry Pi Imager."
echo
echo " --images-dir <images_dir> path to Recalbox images"
echo " --destination-dir <destination_dir> path where assets will be generated"
echo
echo "The <images_dir> expects the following file hierarchy:"
echo
echo " <images_dir>/"
echo " ├─ rpi1/"
echo " │ ├─ recalbox-rpi1.img.xz"
echo " │ └─ boot.tar.xz"
echo " ├─ rpi2/"
echo " │ ├─ recalbox-rpi2.img.xz"
echo " │ └─ boot.tar.xz"
echo " ├─ rpi3/"
echo " │ ├─ recalbox-rpi3.img.xz"
echo " │ └─ boot.tar.xz"
echo " └─ rpi4/"
echo " ├─ recalbox-rpi4.img.xz"
echo " └─ boot.tar.xz"
echo
exit 64
}
function generateNoobsAssets {
local templateDir="$(dirname $(readlink -f $0))/templates/noobs"
local destinationDir="${params[destinationDir]}/noobs"
declare -A metadata
echo ">>> Generating assets for NOOBS (in ${destinationDir})"
# Fetch generic metadata
metadata[version]=${CI_COMMIT_REF_NAME}
metadata[releaseDate]=$(date +%Y-%m-%d)
# Fetch tarball metadata
for arch in rpi1 rpi2 rpi3 rpi4; do
local tarball="${params[imagesDir]}/${arch}/recalbox-${arch}.tar.xz"
local uncompressedTarballSizeInBytes=$(xz --robot --list "${tarball}" | tail -1 | cut -f 5)
metadata["${arch}UncompressedTarballSize"]=$((${uncompressedTarballSizeInBytes} / 1024 / 1024))
# Url is rewritten to https://upgrade.recalbox.com/latest/${arch}/recalbox-${arch}.tar.xz in proxies
metadata["${arch}TarballUrl"]="https://upgrade.recalbox.com/latest/noobs/${arch}/recalbox-${arch}.tar.xz"
done
# Create assets in destination directory
mkdir -p "${destinationDir}"
cp "${templateDir}/recalbox.png" "${destinationDir}/recalbox.png"
cp "${templateDir}/marketing.tar" "${destinationDir}/marketing.tar"
cp "${templateDir}/marketing-kubii.tar" "${destinationDir}/marketing-kubii.tar"
cp "${templateDir}/os_list_kubii.json" "${destinationDir}/os_list_kubii.json"
cat "${templateDir}/os.json" \
| sed -e "s|{{version}}|${metadata[version]}|" \
-e "s|{{releaseDate}}|${metadata[releaseDate]}|" \
> "${destinationDir}/os.json"
for arch in rpi1 rpi2 rpi3 rpi4; do
mkdir -p "${destinationDir}/${arch}"
cat "${templateDir}/partitions.json" \
| sed -e "s|{{uncompressedTarballSize}}|${metadata["${arch}UncompressedTarballSize"]}|" \
-e "s|{{tarballUrl}}|${metadata["${arch}TarballUrl"]}|" \
> "${destinationDir}/${arch}/partitions.json"
done
}
function generateRaspberryPiImagerAssets {
local templateDir="$(dirname $(readlink -f $0))/templates/rpi_imager"
local destinationDir="${params[destinationDir]}/rpi-imager"
declare -A metadata
echo ">>> Generating assets for Raspberry Pi Imager (in ${destinationDir})"
# Gather required information from images directory
metadata[version]=${CI_COMMIT_REF_NAME}
metadata[releaseDate]=$(date +%Y-%m-%d)
for arch in rpi1 rpi2 rpi3 rpi4; do
local imageFile="${params[imagesDir]}/${arch}/recalbox-${arch}.img.xz"
# Fetch info regarding image downloads (XZ-compressed Recalbox image)
metadata["${arch}ImageDownloadSize"]=$(stat -c '%s' "${imageFile}")
metadata["${arch}ImageDownloadSha256"]=$(sha256sum "${imageFile}" | cut -d' ' -f1)
# Fetch info regarding extracted images (raw Recalbox image, after XZ decompression)
metadata["${arch}ExtractSize"]=$(xz --robot --list "${imageFile}" | tail -1 | cut -f 5)
metadata["${arch}ExtractSha256"]=$(xz --decompress --keep --to-stdout "${imageFile}" | sha256sum - | cut -d' ' -f1)
done
# Create assets in destination directory
mkdir -p ${destinationDir}
cat "${templateDir}/os_list_imagingutility_recalbox.json" \
| sed -e "s|{{version}}|${metadata[version]}|" \
-e "s|{{releaseDate}}|${metadata[releaseDate]}|" \
-e "s|{{rpi1ExtractSize}}|${metadata[rpi1ExtractSize]}|" \
-e "s|{{rpi2ExtractSize}}|${metadata[rpi2ExtractSize]}|" \
-e "s|{{rpi3ExtractSize}}|${metadata[rpi3ExtractSize]}|" \
-e "s|{{rpi4ExtractSize}}|${metadata[rpi4ExtractSize]}|" \
-e "s|{{rpi1ExtractSha256}}|${metadata[rpi1ExtractSha256]}|" \
-e "s|{{rpi2ExtractSha256}}|${metadata[rpi2ExtractSha256]}|" \
-e "s|{{rpi3ExtractSha256}}|${metadata[rpi3ExtractSha256]}|" \
-e "s|{{rpi4ExtractSha256}}|${metadata[rpi4ExtractSha256]}|" \
-e "s|{{rpi1ImageDownloadSize}}|${metadata[rpi1ImageDownloadSize]}|" \
-e "s|{{rpi2ImageDownloadSize}}|${metadata[rpi2ImageDownloadSize]}|" \
-e "s|{{rpi3ImageDownloadSize}}|${metadata[rpi3ImageDownloadSize]}|" \
-e "s|{{rpi4ImageDownloadSize}}|${metadata[rpi4ImageDownloadSize]}|" \
-e "s|{{rpi1ImageDownloadSha256}}|${metadata[rpi1ImageDownloadSha256]}|" \
-e "s|{{rpi2ImageDownloadSha256}}|${metadata[rpi2ImageDownloadSha256]}|" \
-e "s|{{rpi3ImageDownloadSha256}}|${metadata[rpi3ImageDownloadSha256]}|" \
-e "s|{{rpi4ImageDownloadSha256}}|${metadata[rpi4ImageDownloadSha256]}|" \
> "${destinationDir}/os_list_imagingutility_recalbox.json"
cp "${templateDir}/recalbox.png" "${destinationDir}/recalbox.png"
}
## PARAMETERS PARSING ##
declare -A params
while [ -n "$1" ]; do
case "$1" in
--images-dir)
shift
[ -n "$1" ] && params[imagesDir]=$(readlink -f "$1") || exitWithUsage
;;
--destination-dir)
shift
[ -n "$1" ] && params[destinationDir]=$(readlink -f "$1") || exitWithUsage
;;
*)
exitWithUsage
;;
esac
shift
done
if [[ ! -d ${params[imagesDir]} || ! -d ${params[destinationDir]} ]]; then
exitWithUsage
fi
## MAIN ##
generateNoobsAssets
generateRaspberryPiImagerAssets
| true |
67fdfd78bb1b2c73aa1cb6d2f65ade195d33870c | Shell | MathiasLorenz/Large_scale_project | /Testing/shell/submit.sh | UTF-8 | 2,246 | 3.984375 | 4 | [] | no_license | #!/bin/sh
# submit.sh TESTNAME [TESTNAME ...]
#
# This script works as a function which compiles and submit tests for the
# jacobiSolver implementation defined in the Poisson folder of the project.
#
# The <TESTNAME> variable refers to the appendix of the submitTESTNAME.sh files
# located in the folder of this file. These submitTESTNAME.sh files contain the
# LSF based jobscripts defining the test which should be run.
#
# Note: Jobs with identical jobnames will be terminated to avoid resubmition of
# old tests.
# Define all needed folders relative to project head.
EPATH=Poisson
DPATH=Testing/data
FPATH=Testing/figures
LPATH=Testing/logs
SPATH=Testing/shell
# Define which shell script will be executed
if [ -z "$1" ] ; then
echo "==================================================================="
echo "ERROR in submit.sh: (No test)"
echo " Please specify which test to run."
echo " Jobs are specified by the extension after \"submit\"."
echo " Possible files can be seen in $SPATH:"
echo ""
ls $SPATH
echo "==================================================================="
exit
else
TEST="$@"
fi
# Make sure the excecutable is up to date
module load cuda/9.2 mpi/3.1.1-gcc-6.4.0-cuda-9.2-without-mxm
cd $EPATH; make realclean; make -s;
if [ -f jacobiSolver.bin ]
then
cd ../
else
echo "==================================================================="
echo "ERROR in submit.sh: (No executable)"
echo " jacobiSolver.bin not found. Aborting tests."
echo " Please attempt a manual compilation of the jacobiSolver in the "
echo " folder Poisson."
echo "==================================================================="
exit
fi
# Define files needed by the execution in all tests
EXEC="$EPATH/jacobiSolver.bin"
# Create all needed folders
mkdir -p $DPATH $FPATH $LPATH
echo Submitting the following tests:
echo ' '
echo $TEST
echo ' '
for test in $TEST
do
# Make sure the test is not already running
bkill -J $test
# Create the folder needed
rm -fr $LPATH/$test
mkdir -p $LPATH/$test
# Clean and copy all files needed
cp -ft $LPATH/$test $SPATH/submit$test.sh $EXEC
# Move to the directory submit the code and return
cd $LPATH/$test
bsub < submit$test.sh
cd ../../../
done
| true |
74800a8d7f99d74709ff69d95bd0be135647c416 | Shell | manharjotkaur/B-sh_Script | /whileloop.sh | UTF-8 | 88 | 3.046875 | 3 | [
"MIT"
] | permissive |
#!/bin/bash
FILE=$1
while
read LINE;
do
echo " The line is : $LINE"
done < $FILE
| true |
fb3c994823ba4b32268d9a9760eaa8baa60eab3a | Shell | RaviHindocha/tpc-di_benchmark | /datagen_scripts/load_data.sh | UTF-8 | 276 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env sh
# Usage: ./load_data.sh $BATCH_ID
BATCH_ID=$1
BATCH_PATH="${HOME}/data/Batch${BATCH_ID}"
if [ "${BATCH_ID}" -eq 1 ]; then
gsutil -m cp -r "${BATCH_PATH}" gs://tpc-di_data/historical
else
gsutil -m cp -r "${BATCH_PATH}" gs://tpc-di_data/incremental
fi
| true |
a1db61972fe13b5a9ee78eeef5cb4ecf701af9cd | Shell | Niarch/initialize-host | /init.sh | UTF-8 | 6,868 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
declare -a PACKAGES
declare -a ADDITIONAL_PPA
declare -a ADDITIONAL_PACKAGES
declare -a REPOSITORIES
PACKAGES=(
git
curl
neovim
tmux
zsh
libsecret-1-dev
gnome-keyring
virtualenv
openconnect
network-manager-openconnect
tlp
acpi-call-dkms
openssh-server
apt-transport-https
ca-certificates
gnupg
lsb-release
libappindicator3-1
libc++1
gconf2
python
python3
)
ADDITIONAL_PPA=(
kelleyk/emacs
https://cli.github.com/packages
)
ADDITIONAL_PACKAGES=(
emacs27
spotify-client
docker-ce
docker-ce-cli
containerd.io
gh
)
REPOSITORIES=(
https://github.com/Niarch/dotfiles.git
)
function show_usage(){
echo "Usage : This is initialization script to install required packages"
echo "Options: "
echo " -l|--linux [linux variant], This will select package manager " \
"based on linux variant, Available options 'ubuntu', 'manjaro' "
}
function print_info(){
# Ansi color code variable
green="\e[0;92m"
reset="\e[0m"
echo -e "${green}[I] $1${reset}"
}
function print_error(){
# Ansi color code variable
red="\e[0;91m"
reset="\e[0m"
echo -e "${red}[E] $1${reset}"
}
function install_via_package_manager(){
packages=("$@")
for package in ${packages[@]}
do
print_info "Installing package $package"
# TODO : Check how the below command can be generalized based on PKG_MAN
sudo apt install -y $package
done
}
function is_installed(){
dpkg -s $1 | grep 'Status: install ok installed' >> /dev/null
exit_code=$?
echo $exit_code
}
function add_ppa(){
print_info "Adding ppa:$1"
sudo apt-add-repository -y ppa:$1
}
function pre_spotify_install(){
print_info "Adding Spotify Deb package to source.list"
curl -sS https://download.spotify.com/debian/pubkey_0D811D58.gpg \
| sudo apt-key add -
echo "deb http://repository.spotify.com stable non-free" \
| sudo tee /etc/apt/sources.list.d/spotify.list
}
function pre_docker_install(){
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo \
"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
}
function configuring_default(){
# Package : neovim
status="$(is_installed neovim)"
if [ $status = 0 ]; then
# set neovim as default editor
print_info "Making neovim the default editor"
sudo update-alternatives --set editor /usr/bin/nvim
fi
# Package : zsh
status="$(is_installed zsh)"
if [ $status = 0 ]; then
# Set zsh as default shell for logged in user
print_info "Changing default shell to be zsh"
chsh -s $(which zsh)
fi
# Remove sudo access for docker commands
print_info "Adding $USER to docker group"
sudo usermod -aG docker $USER
}
function update_ppa(){
pre_spotify_install
pre_docker_install
# Add key to github cli ppa
print_info "Adding keys required for Github CLI"
sudo apt-key adv --keyserver \
keyserver.ubuntu.com --recv-key C99B11DEB97541F0
for package in ${ADDITIONAL_PPA[@]}
do
add_ppa $package
done
}
function clone_to_configure(){
# OhMyZsh
print_info "Installing Oh My Zsh"
sh -c "$(curl -fsSL \
https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
# TODO Need to make my own zshrc config evnetually
# Doom-emacs
print_info "Installing Doom emacs"
rm -rf ~/.emacs.d
git clone --depth 1 https://github.com/hlissner/doom-emacs ~/.emacs.d
~/.emacs.d/bin/doom install
# Vim and Tmux conf
# Install Vim-plug first
print_info "Installing Vim-Plug"
curl -fLo ~/.local/share/nvim/site/autoload/plug.vim \
--create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
print_info "User needs to manually run 'PluginInstall' after init.vim is ready"
# Clone dotfiles repository
print_info "Cloning Dotfiles from personal repo"
git clone https://github.com/Niarch/dotfiles.git /tmp/dotfiles
# vim config
mkdir $HOME/.config/nvim
mv /tmp/dotfiles/.config/nvim/init.vim $HOME/.config/nvim/init.vim
print_info "Setup neovim config completed"
# tmux config
mv /tmp/dotfiles/.tmux.conf $HOME/.tmux.conf
print_info "Setup tmux config completed"
# TODO Run above commands with gh cli commands
# Package: Iosevka fonts
gh auth login
gh release download --repo be5invis/Iosevka \
--pattern 'ttf-iosevka-slab-*' --dir /tmp
# TODO need to make this cleaner
print_info "Extracting and staging Iosevka fonts to ~/.fonts"
cd /tmp && unzip *.zip
mkdir ~/.fonts
mv /tmp/*.ttf ~/.fonts/
}
function prompt_install_deb_package(){
#prompt user to Proceed once all the deb packages are available in /tmp dir
print_info "Please Download following deb packages under ~/Downloads folder \n 1) Slack (https://slack.com/intl/en-in/downloads/linux) \n 2) outlook (https://github.com/tomlm/electron-outlook/releases) \n 3) mailspring (https://getmailspring.com/download) \n 4) code (https://code.visualstudio.com/Download) \n 5) discord (https://discord.com/download) \n 6) virtualbox (https://www.virtualbox.org/wiki/Linux_Downloads) \n 7) steam (https://store.steampowered.com/about/) \n 8) Dropbox (https://www.dropbox.com/install-linux)"
# TODO Need to also add links to download page
read -p "Proceed [Y/N]" answer
while true
do
case $answer in
[Y]* ) print_info "Proceeding with installing deb packages"
for file in ~/Downloads/*.deb
do
print_info "Install deb $file"
sudo dpkg -i $file
done
break;;
[N]* ) print_error "You have choosen to skip deb installation, please refer to script for pending task"
exit;;
esac
done
}
function post_script_message(){
echo "Please proceed with using the packages/softwares installed and configured"\
"Following software require user login"\
"Firefox"\
"Dropbox - Registrationa and sync"\
"Mailspring"\
"VPN"\
"Microsoft Teams"\
"Outlook Office"\
"Spotify"\
"Discord"\
"Steam"
}
# TODO Choose OS from CLI arg and proceed
function main(){
# Install packages via package manager
print_info "Installing packages via Package manager"
install_via_package_manager ${PACKAGES[@]}
update_ppa
install_via_package_manager ${ADDITIONAL_PACKAGES[@]}
configuring_default
clone_to_configure
prompt_install_deb_package
post_script_message
}
# Calling main function
main
| true |
3206e4a313c223675f08b2597a112e95d08088e2 | Shell | Dmfama20/moodle_scripts | /checkaddons.sh | UTF-8 | 259 | 2.859375 | 3 | [] | no_license | #!/bin/bash
#!/usr/bin/php
#
/usr/bin/php get_plugin_names.php > addons.txt;
echo 'Add-on listing: ';
cat ./addons.txt;
echo '---------------------';
for i in `cat ./addons.txt`
do
# echo "Addon in que: $i";
moosh -n plugin-list |grep $i|grep -v 3.11
done
| true |
86a9e3ef2e6e7ccbabd82ed6d2b91762dbd3ff73 | Shell | killarbyte/dotfiles | /install.sh | UTF-8 | 593 | 2.5625 | 3 | [] | no_license | #!/bin/sh
### Dynamic Data ###
currentuser=$(who | awk 'NR==1{print $1}')
### Copy Files (do not remove "\") ###
\cp -r .bash_aliases /home/$currentuser/
\cp -r .streamlinkrc /home/$currentuser/
### install livestreamers ###
# https://github.com/begs/livestreamers.git
mkdir -p /home/$currentuser/scripts/livestreamers
\cp -r scripts /home/$currentuser/
chmod +x /home/$currentuser/scripts/livestreamers/streams.sh
### irssi
# mkdir -p /home/$currentuser/.irssi/scripts/autorun
### Other stuff ###
# tmux theme :colorscheme night
### enable stuff ###
# source /home/$currentuser/.bashrc | true |
c3b333c2d60730485b500c0356b1ed64524ba6a0 | Shell | zachomedia/cert-manager-webhook-pdns | /scripts/setup-tests.sh | UTF-8 | 976 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
cat <<EOF > _out/openssl.conf
[ req ]
distinguished_name = subject
x509_extensions = cert_ext
[ subject ]
commonName = Common Name (e.g. server FQDN or YOUR name)
commonName_max = 64
[ cert_ext ]
subjectAltName = @alternate_names
[ alternate_names ]
DNS.1 = localhost
DNS.2 = web
IP.1 = 127.0.0.1
IP.2 = ::1
EOF
openssl req -x509 -config _out/openssl.conf -newkey rsa:4096 -keyout _out/key.pem -out _out/cert.pem -sha256 -days 30 -nodes -subj '/CN=localhost'
for suite in tls tls-with-proxy tls-auth-hdr; do
mkdir -p _out/testdata/${suite}
cp testdata/pdns/test/${suite}/apikey.yml _out/testdata/${suite}/apikey.yml
sed "s#__CERT__#$(base64 -w0 _out/cert.pem)#g" testdata/pdns/test/${suite}/config.json > _out/testdata/${suite}/config.json
done
# No TLS
for suite in no-tls no-tls-with-proxy no-tls-auth-hdr; do
mkdir -p _out/testdata/${suite}
cp testdata/pdns/test/${suite}/{config.json,apikey.yml} _out/testdata/${suite}
done
| true |
50fa848be0d348be0e3eb8702f77b29f8424a623 | Shell | sjz123321/mailx-bash-shell-1 | /install.sh | UTF-8 | 2,631 | 3.390625 | 3 | [] | no_license | #!/bin/bash
echo "欢迎使用mailx自动配置脚本 ver1.3.0 by SJZ 2018-8-23(增加邮件发送脚本)"
echo "请确认在使用本脚本前没有使用过其他类似的脚本"
echo "否则请重置配置文件再运行本脚本 按回车键继续"
read temp
sver1=`cat /etc/issue | grep CentOS`
sver2=`cat /etc/issue | grep Debian`
sver3=`cat /etc/issue | grep Ubuntu`
if [ "$sver1" != "" ] ; then
echo "CentOS"
sys=1
dir=mail.rc
elif [ "$sver2" != "" ] ; then
echo "Debian"
sys=2
dir=nail.rc
elif [ "$sver3" != "" ] ; then
echo "Ubuntu"
sys=3
dir=s-nail.rc
else
echo "unknow"
sys=0
exit
fi
case $sys in
1) yum -y install mailx && cd /
bak1=`find | grep ./mail.rc.bak`
if [ "$bak1" != "" ] ; then
cp /etc/mail.rc.bak /etc/mail.rc
else
cp /etc/mail.rc /etc/mail.rc.bak
fi
;;
2) apt-get install heirloom-mailx -y && cd /
bak2=`find | grep ./nail.rc.bak`
if [ "$bak2" != "" ] ; then
cp /etc/nail.rc.bak /etc/nail.rc
else
cp /etc/nail.rc /etc/nail.rc.bak
fi
;;
3) apt-get install heirloom-mailx -y && cd /
bak3=`find | grep ./s-nail.rc.bak`
if [ "$bak3" != "" ] ; then
cp /etc/s-nail.rc.bak /etc/s-nail.rc
else
cp /etc/s-nail.rc /etc/s-nail.rc.bak
fi
;;
esac
echo "mailx 安装完成 现在进行mailx配置"
echo "请选择您所使用的邮箱"
echo ""
echo "1.新浪邮箱(推荐)"
echo ""
echo "2.163邮箱(推荐)"
echo ""
echo "3.gmail(国内邮箱时常收不到消息请选择此,注意开启gmail smtp服务)"
echo ""
echo "4.qq邮箱(不推荐,smtp登陆密码不等于qq登陆密码注意!!)"
read temp
case $temp in
1) echo "set smtp=smtp.sina.com" >> /etc/$dir
;;
2) echo "set smtp=smtp.163.com" >> /etc/$dir
;;
3) echo "set smtp=smtp.gmail.com" >> /etc/$dir
;;
4) echo "set smtp=smtp.qq.com" >> /etc/$dir
;;
esac
echo "请输入邮箱账号"
read temp
echo $temp >> /etc/mail_addr.conf
echo "set from=$temp" >> /etc/$dir
echo "set smtp-auth-user=$temp" >> /etc/$dir
echo "请输入邮箱密码"
read temp
echo "set smtp-auth-password=$temp" >> /etc/$dir
echo "set smtp-auth=login" >> /etc/$dir
echo "邮箱配置完成,下面发送测试邮件"
echo "请输入要接收测试邮件的邮箱地址"
read temp
cd /var && echo "收到这封邮件说明mailx配置成功" > attach_test.txt
echo "正在安装邮件发送脚本 以后发送邮件只需在命令行中输入 send_mail.sh 即可"
cp send_mail.sh /usr/bin
echo "收到这封邮件说明mailx配置成功" | mail -s "这是一封测试邮件" -a /var/attach_test.txt $temp
echo "请检查是否收到测试邮件 谢谢使用 bye"
| true |
331d289451195965136dee1013812dfd3aca0b9d | Shell | jayakrishnaambavarapu/CM-L | /prometheus/scripts/install_node_exporter.sh | UTF-8 | 858 | 3.375 | 3 | [] | no_license | #!/bin/bash
set -eufx
#sudo yum install -y curl
NODE_EXPORTER_VERSION="1.1.2"
sudo useradd -rs /bin/false node_exporter
cd /tmp
curl -LO https://github.com/prometheus/node_exporter/releases/download/v"$NODE_EXPORTER_VERSION"/node_exporter-"$NODE_EXPORTER_VERSION".linux-amd64.tar.gz
tar -xvf node_exporter-"$NODE_EXPORTER_VERSION".linux-amd64.tar.gz
sudo mv node_exporter-"$NODE_EXPORTER_VERSION".linux-amd64/node_exporter /usr/local/bin/
sudo cat << HERE > node_exporter.service
[Unit]
Description=Node Exporter
After=network.target
[Service]
User=node_exporter
Group=node_exporter
Type=simple
ExecStart=/usr/local/bin/node_exporter
[Install]
WantedBy=multi-user.target
HERE
sudo mv /tmp/node_exporter.service /etc/systemd/system/node_exporter.service
sudo systemctl daemon-reload
sudo systemctl start node_exporter
sudo systemctl enable node_exporter | true |
e23d4c48da71950d7c1b1b999db3cde9e3af01e2 | Shell | htrc/HTRC-Solr-EF-Ingester | /SOLR12-TEST-Solr8/PING-COL.sh | UTF-8 | 587 | 3.25 | 3 | [] | no_license | #!/bin/bash
source ./_setcol.sh
echo "****"
echo "* Pinging collection: $solrcol"
echo "* Through Solr endpoint: $solradminurl"
echo "****"
solr_cmd="$solradminurl/collections?action=list"
col_exists=`wget $opt_authenticate -q "$solr_cmd" -O - \
| python -c "import sys, json; cols=json.load(sys.stdin)['collections']; print '$solrcol' in cols" `
if [ "x$col_exists" != "x" ] ; then
# running command produced a result
if [ "$col_exists" = "True" ] ; then
echo "# Exists!"
else
echo "# Does NOT exist!"
fi
else
echo "# Does NOT exist!"
fi
echo "****"
| true |
440b78c7a00a6225f65ea7aa445514be197c634e | Shell | eCrimeLabs/Pineapple-MK4 | /2.8.1/mk4-module-interceptor-1.0/interceptor/stop_interceptor.sh | UTF-8 | 267 | 3.03125 | 3 | [] | no_license | #!/bin/sh
MYPATH="$(dirname $0)/"
LOG=${MYPATH}log
BRINT=br-interceptor
echo -e "Stopping Interceptor..." > ${LOG}
ifconfig ${BRINT} down >> ${LOG}
brctl delbr ${BRINT} >> ${LOG}
# Bring back eth0 to pineapple br-lan
brctl addif br-lan eth0 >> ${LOG}
rm ${LOG}
| true |
cf250c61b20d6ff59c223bd7c689d74b022a2d70 | Shell | PercivalZhang/AccountFactory | /deploy.sh | UTF-8 | 500 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if [ ! -n "$1" ] ;then
module=account-factory
else
module=$1
fi
if [ ! -n "$2" ] ;then
network=private
else
network=$2
fi
echo $network
echo "starting module - $module ..."
# stop all node app
echo "pm2 stop pm2-$module.json"
pm2 stop pm2-$module.json
echo "rm -rf log/$module"
rm -rf log/$module
echo "mkdir log/$module"
mkdir log/$module
# start all node apps in development mode
echo "pm2 start pm2-$module.json --env $network"
pm2 start pm2-$module.json --env $network | true |
117c82f3cd8f1ad1e924b2ef7da1a400aa9ac4ea | Shell | joshua-newhouse/SE6387-blockchain | /util/iterator.sh | UTF-8 | 439 | 4.03125 | 4 | [] | no_license | function ForEachElement() {
local thisArray="${1}[@]"
local action="${2}"; shift 2
local actionArgs="$*"
thisArray=("${!thisArray}")
local retCode=0
for element in "${thisArray[@]}"; do
echo "Performing ${action} on ${element} with ${actionArgs}"
$action "${element}" ${actionArgs}
[[ $? -ne 0 ]] && echo "Failed element: ${element}" && ((retCode++))
done
return "${retCode}"
}
| true |
7e98e95dd577c19159157640834f41e9a962ca0c | Shell | avati/mnist-plots | /create-csv.sh | UTF-8 | 700 | 2.6875 | 3 | [] | no_license | #!/bin/sh
for hdim_nlayers in 16_1 16_2 16_3 32_1 32_2 32_3 64_1 64_2 64_3 128_1 128_2 128_3; do
hdim=`echo $hdim_nlayers | cut -f1 -d_`;
nlayers=`echo $hdim_nlayers | cut -f2 -d_`;
> hdim-$hdim-nlayers-$nlayers.csv
for data_frac in `seq 0.001 0.001 0.009; seq 0.01 0.01 0.09; seq 0.1 0.1 0.9`; do
perf=`grep Best log-hdim-$hdim-nlayers-$nlayers-data_frac-$data_frac.log | cut -f3 -d' '`
echo "$data_frac, $perf" >> hdim-$hdim-nlayers-$nlayers.csv
done
done
> softmax.csv
for data_frac in `seq 0.001 0.001 0.009; seq 0.01 0.01 0.09; seq 0.1 0.1 0.9`; do
perf=`grep Best log-softmax-data_frac-$data_frac.log | cut -f3 -d' '`
echo "$data_frac, $perf" >> softmax.csv
done | true |
d22e1f8c8ba2a6908a4251cf441005050d32f4d7 | Shell | yt-zgl/bubichain | /test/env/4peers-with-slave/start.sh | UTF-8 | 158 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
PEER_DIR="$( cd "$( dirname "$0" )" && pwd )"
for i in $PEER_DIR/peer*;
do
rm -rf $i/log/*
$i/update.sh;
$i/dropdb.sh;
$i/start.sh;
done
| true |
94072f126a8aab66b75cf076e8ce04083a76eef6 | Shell | vektorcloud/mesos | /entrypoint.sh | UTF-8 | 490 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/dumb-init /bin/bash
set -e
# Libprocess MUST be able to resolve our hostname
# Some environments such as runc don't automatically
# specify this like Docker does. It can also be used
# with the --discover-ip flag
cat > /sbin/discover-ip <<-__EOF__
#!/bin/sh
ip addr |grep 'state UP' -A2 |tail -n1 | awk '{print \$2}' | sed 's/\/.*//'
__EOF__
chmod +x /sbin/discover-ip
[ -n "$DISCOVER_IP" ] && {
sleep 10
echo "$(discover-ip) $(hostname)" >> /etc/hosts
}
exec "$@"
| true |
b1acf964e5f4adf01e840812c84d49e0805f4a15 | Shell | Alex-lubo/career_online | /frontend/depoly.sh | UTF-8 | 424 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
# 确保脚本抛出遇到的错误
set -e
# 生成静态文件
npm run build
# 进入生成的文件夹
cd dist
# 如果是发布到自定义域名
# echo 'www.example.com' > CNAME
git init
git add -A
git commit -m 'depoly脚本更新'
git remote add origin https://github.com/wangtunan/vue-mooc.git
git config user.name 'wangtunan'
git config user.email 'why583440138@gmail.com'
git push -f origin master:gh-pages
| true |
9a61fc3d25181cbcad7fee69186a007fbd6131ea | Shell | rexkerr/home_bin | /bin/run_gtest_num | UTF-8 | 734 | 3.9375 | 4 | [] | no_license | #!/usr/bin/env bash
TEST_EXEC=$1
TEST_NUM=$2
Usage() {
echo ""
echo "$(basename $0) <TestExec> <TestNum>"
exit 0
}
if [ ! -e $1 ]; then
echo "The test executable does not exist: $TEST_EXEC"
Usage
fi
IS_NUMBER='^[0-9]+$'
if ! [[ $TEST_NUM =~ $IS_NUMBER ]] ; then
# invalid, so next block will show the valid range
TEST_NUM=0
fi
NUM_TESTS=$($TEST_EXEC --gtest_list_tests | grep -e "^ .*" | sed -e 's/\s*//g' | wc -l)
if [ $TEST_NUM -lt 1 ] || [ $TEST_NUM -gt $NUM_TESTS ]; then
echo "Test number must be between 1 and $NUM_TESTS"
Usage
fi
TEST_NAME=$($TEST_EXEC --gtest_list_tests | grep -e "^ .*" | sed -e 's/\s*//g' | head -$TEST_NUM | tail -1)
$TEST_EXEC --gtest_filter="*$TEST_NAME*"
| true |
b3c98d753baa4de7846949bf2590a7a7c33937a7 | Shell | alunux/nekorpm | /nekorpm | UTF-8 | 3,338 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# MIT License
#
# Copyright (c) 2016 La Ode Muh. Fadlun Akbar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# "temp_repo" must be declared in the main script
# and assigned with specific directory
temp_repo="/tmp/nekorpm"
# import function here
. /usr/share/nekorpm/functions/cleanup
. /usr/share/nekorpm/functions/check_package_corrupt
. /usr/share/nekorpm/functions/check_location
. /usr/share/nekorpm/functions/package_info
. /usr/share/nekorpm/functions/install_process
see_package_info ()
{
rm_temp "$temp_repo"
local location=''
local check_corrupt=''
package_path_info location
if [[ "$location" != '' ]]; then
package_corrupt_status check_corrupt "$location"
extract_package_info "$location" $check_corrupt "$temp_repo"
print_package_info
rm_temp "$temp_repo"
local back
while true; do
read -p "Cari nekorpm lain? [Y/n]: " back
case "$back" in
[Yy]|"" )
see_package_info
break
;;
[Nn] )
clear
break
;;
esac
done
fi
}
installation ()
{
rm_temp "$temp_repo"
local location=''
local check_corrupt=''
local user_confirm=''
package_path_info location
if [[ "$location" != '' ]]; then
package_corrupt_status check_corrupt "$location"
rm_temp "$temp_repo"
install_confirm user_confirm
extract_package_all $check_corrupt "$location" $user_confirm "$temp_repo"
print_package_info "$temp_repo"
install_process
rm_temp "$temp_repo"
ask_install_process_again installation
fi
}
main ()
{
echo "--------------------------------------"
echo "- nekorpm -"
echo "- SINGE OFFLINE INSTALLER -"
echo "--------------------------------------"
echo "- -"
echo "- 1. Install -"
echo "- 2. Lihat Keterangan Suatu nekorpm -"
echo "- 3. Exit -"
echo "- -"
echo -e "--------------------------------------\n"
read -p "Pilihan [1-3]: " menu
case "$menu" in
1 )
installation
;;
2 )
see_package_info
;;
3 )
rm_temp "$temp_repo"
clear
exit 0
;;
* )
clear
;;
esac
}
clear
while true; do
main
done
| true |
87cdfee770572826dc0a6e7817cd90e39b3a65b4 | Shell | sonata-nfv/son-qual | /qual-stress-monitoring/tests/stress_test.sh | UTF-8 | 276 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
n=$1
c=$2
logf=$3.log
url=$4
#Stress MonMan API Level
#-n requests Number of requests to perform
#-c concurrency Number of multiple requests to make at a time
ab -n $n -c $c -g $logf -q $url >res.out
cat res.out >> results.log
printf "%s" "$(<res.out)"
| true |
eb3a89dd297b181da2df010713ec2665c01f939a | Shell | godarch/distributions | /void/dracut/run-darch-hooks.sh | UTF-8 | 637 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
# Load some common helper commands.
type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
command -v unpack_archive >/dev/null || . /lib/img-lib.sh
[ -z "$darch_dir" ] && darch_dir=$(getarg darch_dir=)
darch_dir_path=`echo $darch_dir | sed 's/.*\://g'`
darch_hooks_dir="/run/initramfs/live${darch_dir_path}/hooks"
if [ -e "$darch_hooks_dir" ]; then
for hook_dir in $darch_hooks_dir/*; do
hook_name=`basename ${hook_dir}`
export DARCH_ROOT_FS="/run/rootfsbase"
export DARCH_HOOK_DIR="${darch_hooks_dir}/${hook_name}"
/usr/bin/env sh -c ". $DARCH_HOOK_DIR/hook && run"
done
fi | true |
529e2a03a482144097acc34b558d5e671d56776d | Shell | MaillPierre/SWKrimpSim | /scripts/convertTables.sh | UTF-8 | 471 | 3.171875 | 3 | [] | no_license | #!/bin/bash
# $1 is the directory where all the -outputs are stored
# $2 is the index file
for i in `ls -d $1/*-output`; do
echo Processing $i
CT_FILE=$i/ct-latest.ct
DATASET_FILE=`basename $i`
DATASET_FILE=${DATASET_FILE/-output/}
DB_FILE=$i/$DATASET_FILE.db.analysis.txt
OUT_FILE=$i/"$DATASET_FILE"-CT.dat
java -cp krimpRDF-bigdata.jar com.irisa.swpatterns.data.utils.TableFormatConverter -CT $CT_FILE -DBAnalysis $DB_FILE -index $2 -outputCT $OUT_FILE
done | true |
23ccd240b17d1a2d5d12dc96e8f8d66bfde6dc17 | Shell | norcams/himlarcli | /bin/enddate-enter-quarantine.sh | UTF-8 | 526 | 3.4375 | 3 | [] | no_license | #!/bin/bash
#
# DESCRIPTION: Put projects into quarantine, if they have reached
# their end date
#
# AUTHOR: trondham@uio.no
#
# Set proper path
PATH=/usr/bin
export PATH
# Set LC_TIME
LC_TIME=en_US.UTF-8
export LC_TIME
# Activate himlarcli virtualenv
source /opt/himlarcli/bin/activate
# Quarantine projects
for project in $(/opt/himlarcli/report.py enddate --days 0 --list | awk '{print $2}'); do
/opt/himlarcli/project.py quarantine --reason enddate -m --template notify/notify_enddate_after.txt $project
done
| true |
9d3c5116ceba63a5af57fc902a54d1309a34aae6 | Shell | VanirAOSP/vendor_vanir | /proprietary/common/bin/71-calendar.sh | UTF-8 | 1,401 | 3.59375 | 4 | [] | no_license | #!/sbin/sh
#
# /system/addon.d/71-calendar.sh
# Automagically fix duplicate calendars by removing AOSP calendar from /system when Google Calendar is installed from the play store
export OUTFD=$(ps | grep -v "grep" | grep -o -E "update_binary(.*)" | cut -d " " -f 3);
[ ! $OUTFD ] && export OUTFD=$(ps | grep -v "grep" | grep -o -E "/tmp/updater(.*)" | cut -d " " -f 3);
[ ! $OUTFD ] && export OUTFD=1 #prevent syntax error that would be caused if OUTFD were unset, but we won't be outputting to the recovery console :-(
. /tmp/backuptool.functions
case "$1" in
post-restore)
echo "Checking for duplicate (AOSP/Google) calendars..."
if [ ! -d /data/app ]; then
if mount /data; then
IMOUNTED=0
echo "Mounted /data..."
else
echo "mount /data failed! plz inform nuclearmistake"
fi
else
echo "/data already mounted..."
fi
if [ -d /data/app ] && ls /data/app | grep -q com.google.android.calendar-; then
echo "Auto-removing AOSP calendar from /system because you have installed Google calendar from the Play store"
rm -Rf $S/app/Calendar
fi
if [ $IMOUNTED ]; then
if umount /data; then
echo "Unmounted /data..."
else
echo "umount /data failed! plz inform nuclearmistake"
fi
fi
;;
*)
# no-op
;;
esac 2>&1 | while read LINE; do echo ui_print ${LINE} 1>&${OUTFD}; done
| true |
039cd5bd051c2f9ece4bffcc066ce1064506f0c2 | Shell | ZYH1120/ServerlessBench | /Testcase4-Application-breakdown/online-compiling/examples/make/scripts/run.sh | UTF-8 | 1,297 | 3.0625 | 3 | [
"LicenseRef-scancode-mulanpsl-1.0-en",
"MulanPSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
#
if [ -z "$SERVERLESSBENCH_HOME" ]; then
echo "$0: ERROR: SERVERLESSBENCH_HOME environment variable not set"
exit
fi
set -a
source $SERVERLESSBENCH_HOME/local.env
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $SCRIPTS_DIR/../
USAGE="$0 <JOBS-COUNT>"
JOBS_COUNT=${1?$USAGE}
echo GG_MODELPATH=$GG_MODELPATH
echo GG_STORAGE_URI=$GG_STORAGE_URI
printf "1. Clear workspace\n"
$SCRIPTS_DIR/clear.sh
cd src
printf "2. Initialize gg\n"
gg init
printf "3. Create thunks for building hello\n"
gg infer make hellomake
printf "4. Build hello\n"
gg force --jobs=$JOBS_COUNT --engine=openwhisk hellomake
#gg force --jobs=$JOBS_COUNT --engine=local bin/llvm-tblgen
| true |
20806f2a3f64388bfba2b2e6987ff890fec0663c | Shell | TDehaene/mlops_workshop | /local/build_trainer_image.sh | UTF-8 | 916 | 2.921875 | 3 | [] | no_license | # Set global variables
REGION="europe-west1"
PROJECT_ID="gothic-parsec-308513"
# Build trainer image
IMAGE_NAME="trainer_image"
IMAGE_TAG="latest"
IMAGE_URI="eu.gcr.io/$PROJECT_ID/$IMAGE_NAME:$IMAGE_TAG"
# Use cloud build to build the image
# This command will take all the files in the current directory (and subdirectories) and send them over to GCP
# It is therefore recommended to make sure there are no unneccesary files in the current directory (or subdirectories),
# since they also get uploaded to GCP.
# You can also use a .gcloudignore file (like a .gitignore file) to prevent file from being uploaded.
# See: https://cloud.google.com/cloud-build/docs/speeding-up-builds
# Note that because we are using a GCR address as tag, the container image will automatically be available
# in GCR and can be readily used by e.g. AI Platform Training or KFP
gcloud builds submit --tag $IMAGE_URI
| true |
3f1a531de7e6d7c837540353fe7b3cac76edba04 | Shell | spiralofhope/shell-random | /live/sh/scripts/scheduler.sh | UTF-8 | 496 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env sh
# As root:
if ! [ "$USER" = 'root' ]; then
\echo "enter root password"
/bin/su --command "$0"
else
set_scheduler() {
drive="$1"
scheduler="$2"
\echo 'The scheduler is currently'
\cat "/sys/block/$drive/queue/scheduler"
\echo "$scheduler" > "/sys/block/$drive/queue/scheduler"
}
#set_scheduler sda noop
#set_scheduler sda deadline
# 2018-04-09 - The default for Devuan-1.0.0-jessie-i386-DVD
set_scheduler sda cfq
fi # The above is run as root
| true |
bfc9fd77c10b9ad18fb5ff810c265ba9bfe6dabf | Shell | dlux/vagrant-manual-openstack | /apt/ceilometer-controller.sh | UTF-8 | 2,476 | 2.671875 | 3 | [] | no_license | #!/bin/bash
# 0. Post-installation
/root/shared/proxy.sh
source /root/shared/hostnames.sh
echo "source /root/shared/openstackrc" >> /root/.bashrc
# 1. Install OpenStack Telemetry Controller Service and dependencies
apt-get install -y ubuntu-cloud-keyring
echo "deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main" >> /etc/apt/sources.list.d/juno.list
apt-get update && apt-get dist-upgrade
apt-get install -y ceilometer-api ceilometer-collector ceilometer-agent-central ceilometer-agent-notification ceilometer-alarm-evaluator ceilometer-alarm-notifier python-ceilometerclient
# 2. Configure database connection
sed -i "s/#connection=<None>/connection = mongodb:\/\/ceilometer:secure@nosql-database:27017\/ceilometer/g" /etc/ceilometer/ceilometer.conf
# 3. Configure message broker connection
sed -i "s/#rpc_backend=rabbit/rpc_backend = rabbit/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#rabbit_host=localhost/rabbit_host = message-broker/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#rabbit_password=guest/rabbit_password = secure/g" /etc/ceilometer/ceilometer.conf
# 4. Configure OpenStack Identity service
sed -i "s/#auth_uri=<None>/auth_uri = http:\/\/identity:5000\/v2.0/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#identity_uri=<None>/identity_uri = http:\/\/identity:35357\/v2.0/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#admin_tenant_name=admin/admin_tenant_name = service/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#admin_user=<None>/admin_user = ceilometer/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#admin_password=<None>/admin_password = secure/g" /etc/ceilometer/ceilometer.conf
# 5. Configure service
sed -i "s/#os_auth_url=http:\/\/localhost:5000\/v2.0/os_auth_url=http:\/\/identity:5000\/v2.0/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#os_username=ceilometer/os_username = ceilometer/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#os_tenant_name=admin/os_tenant_name = admin/g" /etc/ceilometer/ceilometer.conf
sed -i "s/#os_password=admin/os_password = secure/g" /etc/ceilometer/ceilometer.conf
token=`openssl rand -hex 10`
sed -i "s/#metering_secret=change this or be hacked/metering_secret = ${token}/g" /etc/ceilometer/ceilometer.conf
# 6. Restart service
service ceilometer-agent-central restart
service ceilometer-agent-notification restart
service ceilometer-api restart
service ceilometer-collector restart
service ceilometer-alarm-evaluator restart
service ceilometer-alarm-notifier restart
| true |
e7cfbe8d73e05922b8026dcc532e0a33ea2ab345 | Shell | chenbk85/mySoft | /benchmark/file/perf.sh | UTF-8 | 914 | 3.046875 | 3 | [] | no_license | ( iostat -xm 30 & while true; do date; sleep 30; done ) >iostat.log 2>&1 &
iopid=$!
( vmstat 30 & while true; do date; sleep 30; done ) >vmstat.log 2>&1 &
vmpid=$!
FILE="/data/test.data"
PDISK="/dev/mapper/vg_data-lv_data"
BC=1047586
BS=65536
for i in 0 1 2; do
date
echo "do free mem for dd input[$i]..."
./freemem
date
echo "do dd (writing) [$i]..."
dd if=/dev/zero of=$FILE bs=$BS count=$BC
date
echo "do free mem for dd output[$i]..."
./freemem
date
echo "do dd (reading) [$i]..."
dd of=/dev/null if=$FILE bs=$BS count=$BC
done
date
echo "do free mem for file-random I/O..."
./freemem
TC=`expr $BC \* $BC / 4096`
BS=4096
date
echo "do file-random I/O ..."
./file_perf_test -r 0 -t 2 -b $BS -R -D -T $TC -f $FILE
for i in 0 1 2 3; do
date
echo "do hdparam -tT[$i] ..."
/sbin/hdparm -tT $PDISK
done
sleep 30
kill $iopid $vmpid
sleep 1
kill $iopid $vmpid
sleep 1
kill -9 $iopid $vmpid
| true |
94cde15623215bf714a4810eadc7420f40393716 | Shell | dev-osrose/osIROSE-new | /cmake/scripts/patch.sh | UTF-8 | 1,565 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# check if we have given a patch file
if [ ! -f "$1" ]; then
echo "=== no patch file specified ===" >&2
exit 1
fi
# define a location for the patch file backup
PATCH_BAK=$(pwd)/bak_$(basename "$1")
# creating a backup if not exist
if [ ! -f "$PATCH_BAK" ]; then
echo "=== creating patch backup to $PATCH_BAK ==="
cp "$1" "$PATCH_BAK"
fi
# first we try to merge the given patch
patch -p0 -N --merge --dry-run -i "$1"
TRY_PATCH_STATUS=$?
echo "=== fake patch step returns with $TRY_PATCH_STATUS ==="
# we revert the patched sources with the backed up file
if [ $TRY_PATCH_STATUS = "1" ]; then
echo "=== try to revert patch ==="
patch -p0 -N -R --dry-run -i "$PATCH_BAK"
TRY_REVERT_PATCH_STATUS=$?
echo "=== fake revert patch returns with $TRY_REVERT_PATCH_STATUS ==="
if [ $TRY_REVERT_PATCH_STATUS = "0" ]; then
echo "=== revert patch ==="
patch -p0 -N -R -i "$PATCH_BAK"
echo "=== reapplying patch ==="
patch -p0 -N -i "$1"
# if reverting goes wrong we print a message
else
echo "=== reverting patch failed, please remove all files from this subproject ===" >&2
exit 1
fi
# everything was okay in the test run, we will apply this patch
elif [ $TRY_PATCH_STATUS = "0" ]; then
echo "=== test succeeded. Apply patch ==="
patch -p0 -N --merge -i "$1"
# if everything goes wrong, we print a message
else
echo "=== patch step failed, please remove all files from this subproject ===" >&2
exit 1
fi
# after everything is okay, we should backup the new patch file
echo "=== backing up new patch file ==="
cp "$1" "$PATCH_BAK"
| true |
b1ce22bfe4b1954d863589b4fc1bcb3a93917bb9 | Shell | ozitraveller/diy-devuan | /diy-source/config/hooks/normal/terminal_alternative.hook.chroot | UTF-8 | 567 | 2.53125 | 3 | [] | no_license | #!/bin/sh
# set xfce4-terminal as default X terminal emulator.
#############################################
set -e
echo DIY-LIVE-Hook: "$0"
echo ""
# update-alternatives --install /usr/bin/x-terminal-emulator x-terminal-emulator /usr/bin/xfce4-terminal 99
update-alternatives --install /usr/bin/x-terminal-emulator x-terminal-emulator /usr/bin/lxterminal 99
# update-alternatives --install /usr/bin/x-terminal-emulator x-terminal-emulator /usr/bin/rxvt-unicode 99
# update-alternatives --install /usr/bin/x-terminal-emulator x-terminal-emulator /usr/bin/xterm 99
| true |
0ea7caf726c3c61085e8ee5a533cca33e2417d44 | Shell | kvazimoda24/openmediavault | /deb/openmediavault/usr/share/openmediavault/mkconf/proftpd.d/mod_tls | UTF-8 | 3,107 | 2.765625 | 3 | [] | no_license | #!/bin/sh
#
# This file is part of OpenMediaVault.
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <volker.theile@openmediavault.org>
# @copyright Copyright (c) 2009-2018 Volker Theile
#
# OpenMediaVault is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMediaVault is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMediaVault. If not, see <http://www.gnu.org/licenses/>.
# Documentation/Howto:
# http://wiki.ubuntuusers.de/ProFTPD
# http://www.proftpd.de/HowTo-Server-Config.42.0.html
# http://wiki.ubuntu-forum.de/index.php/ProFTPd
set -e
. /etc/default/openmediavault
. /usr/share/openmediavault/scripts/helper-functions
OMV_SSL_CERTIFICATE_DIR=${OMV_SSL_CERTIFICATE_DIR:-"/etc/ssl"}
OMV_SSL_CERTIFICATE_PREFIX=${OMV_SSL_CERTIFICATE_PREFIX:-"openmediavault-"}
OMV_PROFTPD_MODTLS_CONFIG=${OMV_PROFTPD_MODTLS_CONFIG:-"/etc/proftpd/tls.conf"}
OMV_PROFTPD_MODTLS_TLSLOG=${OMV_PROFTPD_MODTLS_TLSLOG:-"/var/log/proftpd/tls.log"}
OMV_PROFTPD_MODTLS_TLSPROTOCOL=${OMV_PROFTPD_MODTLS_TLSPROTOCOL:-"TLSv1.2"}
OMV_PROFTPD_MODTLS_TLSVERIFYCLIENT=${OMV_PROFTPD_MODTLS_TLSVERIFYCLIENT:-"off"}
OMV_PROFTPD_MODTLS_TLSRENEGOTIATE=${OMV_PROFTPD_MODTLS_TLSRENEGOTIATE:-"required off"}
# Create SSL/TLS certificates and configuration file
# Create a SSL certificate:
# openssl req -new -x509 -days 365 -nodes -out /etc/ssl/certs/proftpd.crt -keyout /etc/ssl/private/proftpd.key
xmlstarlet sel -t -m "//services/ftp/modules/mod_tls" \
-o "<IfModule mod_tls.c>" -n \
-i "enable[. = '0']" -o " TLSEngine off" -n -b \
-i "enable[. = '1']" -o " TLSEngine on" -n -b \
-o " TLSLog ${OMV_PROFTPD_MODTLS_TLSLOG}" -n \
-o " TLSProtocol ${OMV_PROFTPD_MODTLS_TLSPROTOCOL}" -n \
-i "nocertrequest[. = '1'] | nosessionreuserequired[. = '1'] | useimplicitssl[. = '1']" \
-o " TLSOptions " \
-i "nocertrequest[. = '1']" -o "NoCertRequest " -b \
-i "nosessionreuserequired[. = '1']" -o "NoSessionReuseRequired " -b \
-i "useimplicitssl[. = '1']" -o "UseImplicitSSL " -b \
-n \
-b \
-v "concat(' TLSRSACertificateFile ${OMV_SSL_CERTIFICATE_DIR}/certs/${OMV_SSL_CERTIFICATE_PREFIX}',sslcertificateref,'.crt')" -n \
-v "concat(' TLSRSACertificateKeyFile ${OMV_SSL_CERTIFICATE_DIR}/private/${OMV_SSL_CERTIFICATE_PREFIX}',sslcertificateref,'.key')" -n \
-o " TLSVerifyClient ${OMV_PROFTPD_MODTLS_TLSVERIFYCLIENT}" -n \
-o " TLSRenegotiate ${OMV_PROFTPD_MODTLS_TLSRENEGOTIATE}" -n \
-i "required[. = '0']" -o " TLSRequired off" -n -b \
-i "required[. = '1']" -o " TLSRequired on" -n -b \
-i "string-length(extraoptions) > 0" -v extraoptions -n -b \
-o "</IfModule>" -n \
${OMV_CONFIG_FILE} | xmlstarlet unesc > ${OMV_PROFTPD_MODTLS_CONFIG}
| true |
7e65e5cc58e0212b7195791b266da564d16a2203 | Shell | 0xecho/zhull | /subdomain_finder.sh | UTF-8 | 352 | 2.640625 | 3 | [] | no_license | # httprobe can also be used
./inputter.sh $1 hosts | while read host;
do
echo $host;
host_file="results_`echo $host | tr :\/\\\\ _`"
python3 /home/joeking/local/OneForAll/oneforall.py run --takeover False --port high --brute True --target $host --path "results/$host_file";
cat "results/$host_file" | httpx -silent >> "$1/initial_subdomains"
done
| true |
922741d1a9f1d2d19be674651485e4c22318f567 | Shell | damienmckenna/pantheon | /TGenshi/mercury/S050ec2.sh/template.newtxt.G00_centos | UTF-8 | 405 | 2.5625 | 3 | [] | no_license | #!/bin/bash
# Move mysql and varnish to /mnt
# TODO support for EBS and RDS
#fix /tmp
chmod 1777 /tmp
# Mysql:
mv /var/log/mysqld.log /mnt/mysql/
ln -s /mnt/mysql/mysqld.log /var/log/mysqld.log
mv /var/lib/mysql /mnt/mysql/lib
ln -s /mnt/mysql/lib /var/lib/mysql
# Varnish:
mv /var/lib/varnish /mnt/varnish/lib
ln -s /mnt/varnish/lib /var/lib/varnish
chown varnish:varnish /mnt/varnish/lib/pressflow/
| true |
9d23d4f8cc1b8df3ce3ae6801c3c43d1271d036e | Shell | lqmoonxiaobaibai/liuqiang-dailycode | /shellcode4/Countstring.sh | UTF-8 | 186 | 3.234375 | 3 | [] | no_license | #!/bin/bash
read -p "please input your word:" string
[ -z "$string" ] && { echo "please input again!" ; exit 30;}
echo $string | awk '{i=0;while(i<=NF){if(length($i)<=6)print $i;i++}}'
| true |
38f55d662b26d7f8a083feec3106662d35ec69c2 | Shell | deepfryed/webserver-benchmarks | /bechmark.sh | UTF-8 | 1,196 | 2.96875 | 3 | [] | no_license | #!/bin/bash
iter=50000
function memcheck() {
ppid=$1
echo
echo "##########################################################################################"
echo "memory use (rss)"
echo "##########################################################################################"
echo
echo "parent:"
ps -o rss= --pid $ppid
echo "workers:"
ps -o rss= --ppid $ppid
echo
}
echo "##########################################################################################"
echo "testing puma"
echo "##########################################################################################"
./bin/puma -w 2 -q config.ru > /dev/null &
pid=$!
sleep 2
ab -n$iter -c10 -q -k http://127.0.0.1:9292/ | head -n24 | tail -n11
sleep 1
memcheck $pid
kill -TERM $pid
sleep 1
echo "##########################################################################################"
echo "testing thin (with pastry)"
echo "##########################################################################################"
./bin/pastry -p 9292 -R config.ru start > /dev/null &
pid=$!
sleep 2
ab -n$iter -c10 -q -k http://127.0.0.1:9292/ | head -n24 | tail -n11
sleep 1
memcheck $pid
kill -TERM $pid
| true |
c26600d8028fcef647989263bf48f0d21ca96086 | Shell | Vinotha16/WIN_ROLLBACK | /templates/linux_actualfacts/rhel8/nolegacyshadow_624_actual.fact | UTF-8 | 333 | 3.171875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
failed=$(grep '^\+:' /etc/shadow | sed 's/+://g' | cut -d: -f1 | paste -sd ",")
if [ "$(grep -o '^\+:' /etc/shadow | wc -l )" -ne "0" ]; then
echo "{ \"nolegacyshadow_624_actual\" : \"failed $failed\" }"
else
echo "{ \"nolegacyshadow_624_actual\" : \"legacy "+" entries not exist in /etc/shadow\" }"
exit 1
fi
| true |
fcec65833382864abd8254d2d96f020ad78dfe5e | Shell | AhnLab-OSS/mmlspark | /tools/tests/tags.sh | UTF-8 | 2,024 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
. "$(dirname "${BASH_SOURCE[0]}")/../../runme"
map deftag a b c
num=0 fail=0
try() {
local test="$*"
local flags="${test%% => *}" expect="${test##* => }"
flags=$(echo $flags)
local res=$(TESTS="$flags"; unset _test_info; declare -A _test_info
_parse_TESTS
should test a && echo A
should test b && echo B
should test c && echo C
should test a b && echo AB)
res=$(echo { $res })
((num++))
if [[ "$expect" != "$res" ]]; then
((fail++))
echo "FAIL: TEST=\"$flags\": expected $expect, got $res"
fi
}
report() {
if ((fail == 0)); then echo "All tests passed"; exit 0
else echo "$fail/$num tests failed"; exit 1; fi
}
# The following is an exhaustive list of all a/b/c options, verified with
# scalatest. To try it:
# import org.scalatest.{FunSuite, Tag}
# object A extends Tag("a"); object B extends Tag("b"); object C extends Tag("c")
# class ExampleSpec extends FunSuite {
# test("A", A) {}; test("B", B) {}; test("C", C) {}; test("AB", A, B) {}
# }
# and then in sbt use -n for + and -l for -, eg: test-only * -- -n a -n b -l c
try " => { A B C AB }"
try "+a +b +c => { A B C AB }"
try "+a +b => { A B AB }"
try "+a +b -c => { A B AB }"
try " -c => { A B AB }"
try "+a +c => { A C AB }"
try " +b +c => { B C AB }"
try "+a => { A AB }"
try "+a -c => { A AB }"
try " +b => { B AB }"
try " +b -c => { B AB }"
try "-a => { B C }"
try "-a +b +c => { B C }"
try " -b => { A C }"
try "+a -b +c => { A C }"
try "+a -b => { A }"
try " -b -c => { A }"
try "+a -b -c => { A }"
try "-a +b => { B }"
try "-a -c => { B }"
try "-a +b -c => { B }"
try "-a -b => { C }"
try " +c => { C }"
try "-a +c => { C }"
try " -b +c => { C }"
try "-a -b +c => { C }"
try "-a -b -c => { }"
report
| true |
a5a55f3921d64ddb21fee7437e5fa1c22891e43f | Shell | 417-72KI/Utilities4Development | /GitLab/build_machine_maintainer/install.sh | UTF-8 | 502 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
SHELL_DEST_FILE=/usr/local/bin/maintenance_build_machine
PLIST_DEST_FILE=/Library/LaunchDaemons/maintenance_build_machine.plist
if [ -e $PLIST_DEST_FILE ]; then
sudo launchctl unload $PLIST_DEST_FILE
sudo rm -f $PLIST_DEST_FILE
fi
sudo cp -r maintenance_build_machine.sh $SHELL_DEST_FILE
sudo chmod +x $SHELL_DEST_FILE
sudo cp -r maintenance_build_machine.plist $PLIST_DEST_FILE
plutil -lint $PLIST_DEST_FILE
sudo chown root $PLIST_DEST_FILE
sudo launchctl load $PLIST_DEST_FILE
| true |
b50978cc11a6949e430fe6ea76d4a5398142f390 | Shell | BL-JaspreetSinghGill/Shell-Script-Programs | /ifelifProg/arithmeticMinMax.sh | UTF-8 | 857 | 3.890625 | 4 | [] | no_license | #!/bin/bash -x
minimum=0;
maximum=0;
read -p "ENTER THE FIRST NUMBER: " a
read -p "ENTER THE SECOND NUMBER: " b
read -p "ENTER THE THIRD NUMBER: " c
arithmeticOperation1 () {
echo $(( a+b*c ));
}
arithmeticOperation2 () {
echo $(( a%b+c ));
}
arithmeticOperation3 () {
echo $(( c+a/b ));
}
arithmeticOperation4 () {
echo $(( a*b+c ));
}
checkMinimumMaximum () {
local result=$1;
if [ $result -lt $minimum ]
then
minimum=$result;
fi;
if [ $result -gt $maximum ]
then
maximum=$result;
fi;
}
result=$(arithmeticOperation1);
minimum=$result;
maximum=$result;
checkMinimumMaximum $result;
result=$(arithmeticOperation2);
checkMinimumMaximum $result;
result=$(arithmeticOperation3);
checkMinimumMaximum $result;
result=$(arithmeticOperation4);
checkMinimumMaximum $result;
echo "MINIMUM : "$minimum;
echo "MAXIMUM : "$maximum;
| true |
10c76134d03de56290ec103cb4341d5a9d2e5991 | Shell | NeeharikaSompalli/Data_Pipeline | /run_scripts/install_kafka.sh | UTF-8 | 351 | 2.6875 | 3 | [] | no_license | #!/bin/bash
mkdir DIC
cd DIC
echo "Git clone"
git clone https://github.ncsu.edu/araja2/DIC_Data_Pipeline.git
echo "Download Nifi"
wget http://apache.claz.org/kafka/2.0.0/kafka-2.0.0-src.tgz
tar -xzf kafka-2.0.0-src.tgz
BASEDIR=$(dirname "$0")
cp $BASEDIR/DIC_Data_Pipeline/Kafka_Config/server.properties $BASEDIR/kafka-2.0.0-src/config/
| true |
e81fc9eab83a650ef822a64c2d1e41975a9ddbee | Shell | The-OMG/rclone-jacktheripper | /sub/rclone_drive-chuck-size.sh | UTF-8 | 373 | 3.375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Calculate best chunksize for trasnfer speed.
AvailableRam=$(free --giga -w | grep Mem | awk '{print $8}')
case "$AvailableRam" in
[1-9][0-9] | [1-9][0-9][0-9]) driveChunkSize="1G" ;;
[6-9]) driveChunkSize="512M" ;;
5) driveChunkSize="256M" ;;
4) driveChunkSize="128M" ;;
3) driveChunkSize="64M" ;;
2) driveChunkSize="32M" ;;
[0-1]) driveChunkSize="8M" ;;
esac
| true |
63007599e243a311a4139d89a6d9ec44bf1f7280 | Shell | dutchmeister33/ownSysroot | /users_files/install_cc_rtl8188eu.sh | UTF-8 | 2,501 | 3.203125 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
show_current_task
#--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --
exportdefvar rtl8188eu_GITURL "https://github.com/lwfinger"
exportdefvar rtl8188eu_GITREPO "rtl8188eu"
exportdefvar rtl8188eu_BRANCH "v5.2.2.4"
exportdefvar rtl8188eu_REVISION ""
exportdefvar rtl8188eu_RECOMPILE n
#--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- -
# GET PACKAGES --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
if ! ( get_git_pkg "${rtl8188eu_GITURL}" "${rtl8188eu_GITREPO}" "${rtl8188eu_BRANCH}" "${rtl8188eu_REVISION}" ) ; then goto_exit 1 ; fi
#--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- -
# INSTALL PACKAGES - --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- -
if ! pushd "${CACHE}/${rtl8188eu_GITREPO}-${rtl8188eu_BRANCH}" ; then goto_exit 2 ; fi
KVER=$(cd "${SYSROOT}/lib/" && ls "modules")
KERNEL_DIR=$(realpath -s "${SYSROOT}/lib/modules/${KVER}/build")
# MAKE
if ( [ "${rtl8188eu_RECOMPILE}" != "n" ] || ! [ -f "8188eu.ko" ] ) ; then
sed -i 's|KSRC :=|KSRC ?=|g' Makefile
sed -i 's|KERNEL_SRC :=|KERNEL_SRC ?=|g' Makefile
make clean
if ! make ARCH="${ARCH}" CROSS_COMPILE="${TOOLCHAIN_PREFIX}" KSRC="${KERNEL_DIR}" KERNEL_SRC="${KERNEL_DIR}" ${NJ} ; then exit 3 ; fi
fi
# INSTALL
preAuthRoot && sudo cp "8188eu.ko" "${SYSROOT}/opt/"
preAuthRoot && sudo mkdir -p "${SYSROOT}/lib/modules/${KVER}/kernel/drivers/net/wireless/"
if ! ( preAuthRoot && sudo chroot "${SYSROOT}" install -p -m 644 "/opt/8188eu.ko" "/lib/modules/${KVER}/kernel/drivers/net/wireless/" ) ; then exit 4 ; fi
if ! ( preAuthRoot && sudo chroot "${SYSROOT}" depmod -a "${KVER}" ) ; then exit 5 ; fi
preAuthRoot && sudo rm "${SYSROOT}/opt/8188eu.ko"
preAuthRoot && sudo mkdir -p "${SYSROOT}/lib/firmware/rtlwifi"
preAuthRoot && sudo cp "rtl8188eufw.bin" "${SYSROOT}/lib/firmware/rtlwifi/"
preAuthRoot && sudo chroot "${SYSROOT}" chmod -R +x "/lib/firmware/rtlwifi"
preAuthRoot && echo "blacklist r8188eu" | sudo tee "${SYSROOT}/etc/modprobe.d/50-8188eu.conf"
popd
#--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- -
show_message "RTL8188EU WAS SUCCESSFULLY INSTALLED!"
| true |
fed5b1422623ef91eedca6cdb035b61939d2bb03 | Shell | jrsdead/dotfiles | /scripts/bin/startvpn.sh | UTF-8 | 3,307 | 3.375 | 3 | [] | no_license | #!/bin/bash
function ask {
while true; do
if [ "${2:-}" = "Y" ]; then
prompt="Y/n"
default=Y
elif [ "${2:-}" = "N" ]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
# Ask the question
read -p "$1 [$prompt] " REPLY
# Default?
if [ -z "$REPLY" ]; then
REPLY=$default
fi
# Check if the reply is valid
case "$REPLY" in
Y*|y*) return 0 ;;
N*|n*) return 1 ;;
esac
done
}
/usr/bin/osascript <<-EOF
tell application "Tunnelblick"
connect "strongvpn"
get state of first configuration where name = "strongvpn"
repeat until result = "CONNECTED"
delay 1
get state of first configuration where name = "strongvpn"
end repeat
end tell
EOF
echo "Your IP is now `lynx -dump -hiddenlinks=ignore -nolist http://checkip.dyndns.org:8245/ | awk '{ print $4 }' | sed '/^$/d; s/^[ ]*//g; s/[ ]*$//g'`"
if ask "Start Apps?" Y; then
if ask "Start General Apps?" Y; then
if ask "Start Textual IRC Client?" Y; then
echo "starting Textual"
/usr/bin/osascript <<-ENDRUNTEXTUAL
tell application "Textual 5"
activate
end tell
tell application "Terminal"
activate
end tell
ENDRUNTEXTUAL
fi
if ask "Start Chrome?" Y; then
echo "starting Chrome"
/usr/bin/osascript <<-ENDRUNCHROME
tell application "Google Chrome"
activate
set theUrl to "https://awesome-hd.net"
if (count every window) = 0 then
make new window
end if
set found to false
set theTabIndex to -1
repeat with theWindow in every window
set theTabIndex to 0
repeat with theTab in every tab of theWindow
set theTabIndex to theTabIndex + 1
if theTab's URL = theUrl then
set found to true
exit
end if
end repeat
if found then
exit repeat
end if
end repeat
if found then
tell theTab to reload
set theWindow's active tab index to theTabIndex
set index of theWindow to 1
else
tell window 1 to make new tab with properties {URL:theUrl}
end if
end tell
tell application "Terminal"
activate
end tell
ENDRUNCHROME
fi
fi
if ask "Start Dev Apps?" Y; then
if ask "Start Coda 2?" Y; then
echo "starting Coda 2"
/usr/bin/osascript <<-ENDCODA
tell application "Coda 2"
activate
end tell
tell application "Terminal"
activate
end tell
ENDCODA
fi
if ask "Start PHPStorm?" Y; then
echo "starting PHPStorm"
/usr/bin/osascript <<-ENDPHPSTORM
tell application "PhpStorm"
activate
end tell
tell application "Terminal"
activate
end tell
ENDPHPSTORM
fi
if ask "Start PyCharm?" Y; then
echo "starting PHPStorm"
/usr/bin/osascript <<-ENDPYCHARM
tell application "PyCharm"
activate
end tell
tell application "Terminal"
activate
end tell
ENDPYCHARM
fi
if ask "Start AppCode?" Y; then
echo "starting AppCode"
/usr/bin/osascript <<-ENDAPPCODE
tell application "AppCode"
activate
end tell
tell application "Terminal"
activate
end tell
ENDAPPCODE
fi
fi
else
echo "All Finished"
fi
| true |
d50320d5be47fd95f611cd6b4b016159311c753b | Shell | mjijeesh/pcie-ctu_can_fd | /scripts/test-ip-read | UTF-8 | 999 | 3.21875 | 3 | [] | no_license | #!/bin/sh
echo 1 > /sys/bus/pci/rescan
pci_dev="$(lspci -n -d 1172: | sed -e 's/\([0-9:]\+\) .*$/\1/')"
if [ -z "$pci_dev" ] ; then
pci_dev="$(lspci -n -d 1760:ff00 | sed -e 's/\([0-9:]\+\) .*$/\1/')"
if [ -z "$pci_dev" ] ; then
echo "PCI device not found"
exit 1
fi
fi
echo "pci_dev=$pci_dev"
pci_dev_dir="$(find /sys/devices/ -name "*$pci_dev" | sed -n -e "s/\(\/sys\/devices\/pci.*\/[0:]*$pci_dev\)/\1/p" )"
if [ -z "$pci_dev_dir" ] ; then
echo "PCI device directory not found"
exit 1
fi
echo "pci_dev_dir=$pci_dev_dir"
echo 1 >"$pci_dev_dir"/remove
echo 1 > /sys/bus/pci/rescan
lspci -nn -v -s "$pci_dev"
echo 1 >"$pci_dev_dir"/enable
pci_dev_bar1_addr="$(cat "$pci_dev_dir"/resource | sed -n -s '2s/^\(0x[0-9a-fA-Fx]\+\) .*$/\1/p')"
echo "pci_dev_bar1_addr=$pci_dev_bar1_addr"
if [ -z "$pci_dev_bar1_addr" ] ; then
echo "PCI device bar 1 address not found"
exit 1
fi
echo rdwrmem -b 4 -s "$pci_dev_bar1_addr" -l 0x100 -m
rdwrmem -b 4 -s "$pci_dev_bar1_addr" -l 0x100 -m
| true |
6798fd43f10dd57eac4cfcacd7f73da2e60d0f1c | Shell | kekenalog/tgs | /install.sh | UTF-8 | 242 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ ! -f install.sh ]; then
echo 'install must be run within its container folder' 1>&2
exit 1
fi
CURDIR=`pwd`
OLDGOPATH="$GOPATH"
export GOPATH="$CURDIR"
gofmt -w src
go install tgs
export GOPATH="$OLDGOPATH"
echo 'finished'
| true |
021033dddea80c887a940b38852eca8ba1907d21 | Shell | William-Hill/esgf-docker | /data-node/scripts/change_data_node_password.sh | UTF-8 | 788 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# script to change the ESGF password used for TDS re-initialization
if [ "${ESGF_PASSWORD}" = "" ] || [ "${CATALINA_HOME}" = "" ];
then
echo "All env variables: ESGF_PASSWORD, CATALINA_HOME must be set "
exit -1
fi
# digest the user password
password_hash=$($CATALINA_HOME/bin/digest.sh -a SHA ${ESGF_PASSWORD} | cut -d ":" -f 2)
echo "Setting digested password=$password_hash"
# replace digested password in tomcat-users.xml
sed -i -- 's/password=\"[^\"]*\"/password=\"'"${password_hash}"'\"/g' /esg/config/tomcat/tomcat-users.xml
# replace clear-text passwords in esg.ini
sed -i -- 's/dbsuper:changeit/dbsuper:'"${ESGF_PASSWORD}"'/g' /esg/config/esgcet/esg.ini
sed -i -- 's/thredds_password.*/thredds_password = '"${ESGF_PASSWORD}"'/g' /esg/config/esgcet/esg.ini
| true |
65541c043e729c73c772addd81e3f44d6732e281 | Shell | agounaris/terraform-aws-azure-devops-agent | /templates/provision | UTF-8 | 1,058 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
snap start amazon-ssm-agent
apt-get update
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
apt-key fingerprint 0EBFCD88
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io unzip
usermod -aG docker ubuntu
mkdir azure-devops-agent && cd azure-devops-agent
curl -fkSL -o vstsagent.tar.gz https://vstsagentpackage.azureedge.net/agent/2.154.1/vsts-agent-linux-x64-2.154.1.tar.gz;tar -zxvf vstsagent.tar.gz
./bin/installdependencies.sh
export AGENT_ALLOW_RUNASROOT=1
./config.sh --unattended --url https://dev.azure.com/MAT-OCS/ --auth pat --token ${azure_devops_agent_token} --projectname ${project_name} --runasservice --pool ${project_name}
chown ubuntu:ubuntu -R /azure-devops-agent
sudo ./svc.sh install
sudo ./svc.sh start
| true |
74565b9a73dc5f7fb05518a15c18fec4139acc96 | Shell | watson-developer-cloud/doc-tutorial-downloads | /discovery-data/latest/src/elastic-mt-migration.sh | UTF-8 | 3,109 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
show_help(){
cat << EOS
Usage: $0 [options]
Options:
-h, --help Print help info
-s, --source Source name where create data
-t, --target Loop count to create data
EOS
}
replica=0
while (( $# > 0 )); do
case "$1" in
-h | --help )
show_help
exit 0
;;
-s | --source )
shift
source="$1"
;;
-t | --target )
shift
target="$1"
;;
--template)
shift
template="$1"
;;
--replica)
shift
replica="$1"
;;
* )
if [[ -z "$action" ]]; then
action="$1"
else
echo "Invalid argument."
show_help
exit 1
fi
;;
esac
shift
done
if [ -z "${source+UNDEF}" ] ; then
echo "Source tenant ID not defined"
exit 1
fi
if [ -z "${target+UNDEF}" ] ; then
echo "Target tenant ID not defined"
exit 1
fi
ELASTIC_OPTIONS=(
"-k"
"-s"
"-u"
"${ELASTIC_USER}:${ELASTIC_PASSWORD}"
"-H"
"Content-Type: application/json"
)
ELASTIC_ENDPOINT=https://localhost:9200
source_index="tenant_${source}_notice"
target_index="tenant_${target}_notice"
json_disable_read_write='{
"settings": {
"index.blocks.write": "true"
}
}'
json_clone_settings='{
"settings": {
"index.blocks.write": null
}
}'
indices=$(curl "${ELASTIC_OPTIONS[@]}" "${ELASTIC_ENDPOINT}/_cat/indices?h=index")
if echo "${indices}" | grep "${source_index}" > /dev/null ; then
echo "Migrate ${source_index} to ${target_index}"
curl "${ELASTIC_OPTIONS[@]}" -X PUT "${ELASTIC_ENDPOINT}/${source_index}/_settings" -d"${json_disable_read_write}"
curl "${ELASTIC_OPTIONS[@]}" -X POST "${ELASTIC_ENDPOINT}/${source_index}/_clone/${target_index}" -d"${json_clone_settings}"
MAX_RETRY_COUNT=5
retry_count=0
while :
do
curl "${ELASTIC_OPTIONS[@]}" "${ELASTIC_ENDPOINT}/_cluster/health/${target_index}?wait_for_status=green&timeout=30s" | grep -e "yellow" -e "green" && break
((retry_count))
if [ ${retry_count} -ge ${MAX_RETRY_COUNT} ] ; then
curl "${ELASTIC_OPTIONS[@]}" -X POST "${ELASTIC_ENDPOINT}/_cluster/reroute?retry_failed=true"
retry_count=0
fi
done
curl "${ELASTIC_OPTIONS[@]}" -X DELETE "${ELASTIC_ENDPOINT}/${source_index}"
else
echo "Source index ${source_index} not found. Create index for ${target_index}."
sed -e "s/#tenant_id#/${target}/g" "${template}" > /tmp/index_request.json
sed -e "s/#replica_size#/${replica}/g" "${template}" > /tmp/index_request.json
curl "${ELASTIC_OPTIONS[@]}" -X PUT "${ELASTIC_ENDPOINT}/${target_index}" -d@/tmp/index_request.json
echo
rm -f /tmp/index_request.json
fi
for index in ${indices}
do
tenant_id=$(curl "${ELASTIC_OPTIONS[@]}" "${ELASTIC_ENDPOINT}/${index}/_settings" | jq -r .\"${index}\".settings.index.tenant_id)
if echo "${tenant_id}" | grep "${source}" > /dev/null ; then
echo "Update tenant ID in ${index}"
curl "${ELASTIC_OPTIONS[@]}" -XPUT ${ELASTIC_ENDPOINT}/${index}/_settings --data-raw "{\"index.tenant_id\": \"${target}\"}"
echo
fi
done | true |
55dda8b2a742d6f0dc1e81a469a3f26b1f79c843 | Shell | tzhenghao/DatPortal | /backup-files.sh | UTF-8 | 1,510 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Name: Zheng Hao Tan
# Email: tanzhao@umich.edu
LOCAL_USERNAME=<your local home path>
REMOTE_SERVER_USERNAME=<your remote server username>
REMOTE_SERVER_IP_ADDRESS=<your ip address here>
# Local home directories, where these files/folders will be synced to the server.
LOCAL_DROPBOX_PATH=/Users/$LOCAL_USERNAME/Dropbox
LOCAL_GOOGLE_DRIVE_PATH="/Users/$LOCAL_USERNAME/Google Drive"
LOCAL_BOX_SYNC_PATH="/Users/$LOCAL_USERNAME/Box Sync"
LOCAL_DESKTOP_PATH=/Users/$LOCAL_USERNAME/Desktop
# Remote folders in which synced files will be located.
# You may change it to whichever path you like.
REMOTE_DROPBOX_PATH=/home/dropbox-backup
REMOTE_GOOGLE_DRIVE_PATH=/home/google-drive-backup
REMOTE_BOX_SYNC_PATH=/home/box-backup
REMOTE_DESKTOP_PATH=/home/desktop-backup
echo "Starting to back up Dropbox files"
rsync -azP $LOCAL_DROPBOX_PATH "$REMOTE_SERVER_USERNAME@$REMOTE_SERVER_IP_ADDRESS:$REMOTE_DROPBOX_PATH"
echo "Done backing up Dropbox files"
echo "Starting to back up Google Drive files"
rsync -azP "$LOCAL_GOOGLE_DRIVE_PATH" "$REMOTE_SERVER_USERNAME@$REMOTE_SERVER_IP_ADDRESS:$REMOTE_GOOGLE_DRIVE_PATH"
echo "Done backing up Google Drive files"
echo "Starting to back up Box files"
rsync -azP "$LOCAL_BOX_SYNC_PATH" "$REMOTE_SERVER_USERNAME@$REMOTE_SERVER_IP_ADDRESS:$REMOTE_BOX_SYNC_PATH"
echo "Done backing up Box files"
echo "Starting to back up Desktop files"
rsync -azP $LOCAL_DESKTOP_PATH "$REMOTE_SERVER_USERNAME@$REMOTE_SERVER_IP_ADDRESS:$REMOTE_DESKTOP_PATH"
echo "Done backing up Desktop files"
| true |
da5815a12ae256df46ff3b12c8d8c8de68ae8640 | Shell | SpragueLab/preproc_shFiles | /temporal_task.sh | UTF-8 | 2,770 | 3.34375 | 3 | [] | no_license | ##!/bin/bash
#
# temporal_task.sh
#
# GOALS:
# [assumes afni_proc.py already run, we're working w/ volreg files]
# This script:
# 1) resample to RAI orientation
# 2) saves out a voxel-wise mean
# 3) linear detrend
# 4) convert to PSC
# can process either "func" or "surf" files (4th arg), which have already been pre-processed
#DATAROOT="/deathstar/data"
EXPTDIR=$1
SUBJ=$2
SESS=$3
DATATYPE=$4 # surf or ${DATATYPE}
ROOT=$DATAROOT/$EXPTDIR
# get in the right directory - root directory of a given subj
cd $ROOT/$SUBJ
# figure out how many cores...
RUN=`ls -l $DATAROOT/$EXPTDIR/$SUBJ/$SESS/${DATATYPE}*_volreg.nii.gz | wc -l`
rm ./list.txt; for ((i=1;i<=$RUN;i++)); do printf "%02.f\n" $i >> ./list.txt; done
CORES=$RUN
# convert to RAI
cat ./list.txt | parallel -P $CORES \
3dresample -prefix $SESS/${DATATYPE}{}_volreg.nii.gz \
-orient rai \
-overwrite \
-inset $SESS/${DATATYPE}{}_volreg.nii.gz
# Linear detrend (for mean estimation)
cat ./list.txt | parallel -P $CORES \
3dDetrend -prefix $SESS/${DATATYPE}_tmp_det{}.nii.gz \
-polort 1 $SESS/${DATATYPE}{}_volreg.nii.gz
cat ./list.txt | parallel -P $CORES \
3dTstat -prefix $SESS/${DATATYPE}_tmp_mean{}.nii.gz \
-mean $SESS/${DATATYPE}{}_volreg.nii.gz
cat ./list.txt | parallel -P $CORES \
3dTstat -prefix $SESS/${DATATYPE}_tmp_detMean{}.nii.gz \
-mean $SESS/${DATATYPE}_tmp_det{}.nii.gz
# add back in original mean
cat ./list.txt | parallel -P $CORES \
3dcalc -prefix $SESS/${DATATYPE}_volreg_detrend{}.nii.gz \
-a $SESS/${DATATYPE}_tmp_det{}.nii.gz \
-b $SESS/${DATATYPE}_tmp_mean{}.nii.gz \
-c $SESS/${DATATYPE}_tmp_detMean{}.nii.gz \
-overwrite \
-expr "'a+b-c'"
rm $SESS/*${DATATYPE}_tmp*
## here, we're left with linear-detrended data at its original mean
## Voxel-wise mean over timeseries (this is identical tot he tmp-mean file removed above...)
cat ./list.txt | parallel -P $CORES \
3dTstat -prefix $SESS/${DATATYPE}_volreg_mean{}.nii.gz -overwrite \
-mean $SESS/${DATATYPE}_volreg_detrend{}.nii.gz
# ensure surfanat_brainmask_master.nii.gz is in RAI
3dresample -prefix $DATAROOT/$EXPTDIR/$SUBJ/surfanat_brainmask_master.nii.gz \
-inset $DATAROOT/$EXPTDIR/$SUBJ/surfanat_brainmask_master.nii.gz \
-overwrite -orient rai
# percent signal change - computed w/ detrended rather than hi-pass'd data; also remove 1
cat ./list.txt | parallel -P $CORES \
3dcalc -prefix $SESS/${DATATYPE}_volreg_normPctDet{}.nii.gz \
-a $SESS/${DATATYPE}_volreg_detrend{}.nii.gz \
-b $SESS/${DATATYPE}_volreg_mean{}.nii.gz \
-c surfanat_brainmask_master.nii.gz \
-overwrite \
-expr "' and(ispositive(c),ispositive(b)) * ((a/b) - 1) * 100'"
| true |
1556a14b87ded1f8e138de90b737c225c35887f8 | Shell | PexMor/raspbian-rootfs | /30-bash.sh | UTF-8 | 266 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source cfg
if [ "$ROOTFS" == "" ]; then
echo "No directory to install to given."
exit 1
fi
if [ $EUID -ne 0 ]; then
echo "This tool must be run as root."
exec sudo /bin/bash "$0" "$@"
# exit 1
fi
chroot $ROOTFS $QEMU_PATH /bin/sh
| true |
d868993c8c1e0a8b440ee8158cda29c20f783fe5 | Shell | docker-related/lxde-vnc-ubuntu | /run.sh | UTF-8 | 2,048 | 3.4375 | 3 | [] | no_license | #!/bin/bash
stop_service()
{
# ps aux | grep "lxdm.*-d" | grep -v grep | awk {'print $2'} | while read i ; do kill -9 ${i}; done
for i in lxsession x11vnc Xvfb; do pkill ${i}; done
}
initialize()
{
num=$(sed -n '/lxde-logout/=' /usr/share/lxde/openbox/menu.xml)
for i in $(expr $num - 1) $num $(expr $num - 1)
do
sed -i "${i}d" /usr/share/lxde/openbox/menu.xml
done
echo "# ban" > /usr/bin/lxde-logout
mkdir -p /var/run/sshd
mknod -m 600 /dev/console c 5 1
mknod /dev/tty0 c 4 0
if
[ -n "$USER_NAME" ]
then
result=0 && for name in $(cat /etc/passwd | cut -d ":" -f1)
do
[ "$USER_NAME" = "${name}" ] && result=$(expr $result + 1) && break
done
[ $result -ne 0 ] && USER_NAME=user
else
USER_NAME=user
fi
[ -n "$USER_PASSWORD" ] || USER_PASSWORD="pass"
useradd --create-home --shell /bin/bash --user-group --groups adm,sudo $USER_NAME
passwd $USER_NAME <<EOF >/dev/null 2>&1
$USER_PASSWORD
$USER_PASSWORD
EOF
stop_service
export DISPLAY=:1
export HOME="/home/$USER_NAME"
echo "$USER_NAME@$USER_PASSWORD" > /home/$USER_NAME/.vncpass
}
username=$(ls /home/ | sed -n 1p)
if
[ -n "$username" ]
then
USER_NAME="$username"
else
initialize
fi
ps aux | grep -q lxdm || start-stop-daemon --background --quiet --pidfile /var/run/lxdm.pid --background --exec /usr/sbin/lxdm -- -d
su $USER_NAME <<EOF
export DISPLAY=:1
export HOME="/home/$USER_NAME"
pidof /usr/bin/Xvfb || start-stop-daemon --start --background --pidfile /var/run/Xvfb.pid --background --exec /usr/bin/Xvfb -- :1 -screen 0 1024x640x16
pidof /usr/bin/lxsession || start-stop-daemon --start --background --pidfile /var/run/lxsession.pid --background --exec /usr/bin/lxsession -- -s LXDE -e LXDE
pidof /usr/bin/x11vnc || start-stop-daemon --start --background --pidfile /var/run/x11vnc.pid --background --exec /usr/bin/x11vnc -- -xkb -forever -display :1 -passwdfile /home/$USER_NAME/.vncpass
EOF
ps aux | grep -v grep | grep -q "/noVNC/utils/launch.sh" || start-stop-daemon --start --quiet --pidfile /var/run/noVNC.pid --background --exec /noVNC/utils/launch.sh
exec /usr/sbin/sshd -D
| true |
96dece14a0bad0c62df205cf3f6cc549356993e4 | Shell | hhvm/packaging | /aws/ondemand/cleanup.sh | UTF-8 | 1,421 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
echo
echo ---------------------------------------------------------------------------
echo
set -ex
date
FILE=/home/ubuntu/.touchme
if [ ! -f "$FILE" ]; then
echo "Missing $FILE"
exit 1
fi
Y=$(stat -c %Y "$FILE")
Z=$(stat -c %Z "$FILE")
LAST_ACCESS=$(( Y > Z ? Y : Z ))
NOW=$(date +%s)
SEC_SINCE=$(( NOW - LAST_ACCESS ))
if [ "$SEC_SINCE" -lt 259200 ]; then
echo "Less than 3 days since last access, not cleaning up."
exit
fi
source /home/ubuntu/.ondemand/config.inc.sh
BACKUP_NAME="${GITHUB_USER}_$(date +%Y-%m-%d_%H-%M-%S)"
aws configure set default.region us-west-2
# backup home directory
tar cz /home/ubuntu | aws s3 cp - "s3://ondemand-backup/$BACKUP_NAME.tar.gz"
# backup Docker container(s)
if which docker; then
$(aws ecr get-login --no-include-email)
for CONTAINER in $(docker ps -aq); do
REPO=$(docker inspect --format='{{.Config.Image}}' "$CONTAINER" | cut -d : -f 1)
IMAGE="${REPO}:backup_${BACKUP_NAME}_${CONTAINER}"
docker stop "$CONTAINER"
docker commit "$CONTAINER" "$IMAGE"
docker push "$IMAGE"
# there may not be enough disk space for multiple images
docker rmi "$IMAGE"
done
fi
# backup was successful, kill the instance
/sbin/shutdown -h now
| true |
077d461e6c914504f3ad930e9ea29ce2a7467f3e | Shell | BDERT666/boxeehack-cigamit | /hack/xbmc.sh | UTF-8 | 857 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
if [ `ps -A | grep -c xbmc.bin` -eq 1 ]; then
exit
fi
while true
do
for m in /tmp/mnt/*; do
if [ -f ${m}/xbmc.bin ] || [ -f ${m}/xbmc/xbmc.bin ]; then
# If XBMC is in a folder called xbmc instead of the root
p=${m}
if [ -f ${m}/xbmc/xbmc.bin ]; then
p=${m}/xbmc
fi
cd ${p}
chmod +x ${p}/xbmc.bin
HOME=${p} GCONV_PATH=${p}/gconv AE_ENGINE=active PYTHONPATH=${p}/python2.7:${p}/python2.7/lib-dynload PYTHONHOME=${p}/python2.7:${p}/python2.7/lib-dynload XBMC_HOME=${p} ${p}/xbmc.bin --standalone -p -l /var/run/lirc/lircd 2>>/tmp/xbmc.log
ret=$?
break
fi
done
case "${ret}" in
0 ) # Quit
;;
64 ) # Shutdown System
poweroff
break 2
;;
65 ) # Warm Reboot
;;
66 ) # Reboot System
reboot
break 2
;;
139 ) # Crashed so reboot
reboot
break 2
;;
* ) ;;
esac
done
| true |
f6443fa377b5beb480635c80d6357eb3d3631e9b | Shell | exasol/integration-test-docker-environment | /exasol_integration_test_docker_environment/certificate_resources/container/create_certificates.sh | UTF-8 | 1,254 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
#shellcheck disable=SC1083
NAME=$1
echo SubjectName: "$NAME"
#shellcheck disable=SC1083
certs_dir=$2
echo Certificate Dir: "$certs_dir"
if [ ! -d "$certs_dir" ]
then
mkdir -p "$certs_dir"
fi
pushd "$certs_dir"
# Creae rootCA key and certificate
openssl genrsa -out rootCA.key 2048
openssl req -x509 -nodes -new -key rootCA.key -out rootCA.crt -subj "/C=US/ST=CA/O=Self-signed certificate/CN=$NAME"
# Create server key, certificate request and certificate
echo "[req]
default_bits = 2048
distinguished_name = req_distinguished_name
req_extensions = req_ext
x509_extensions = v3_req
prompt = no
[req_distinguished_name]
countryName = XX
stateOrProvinceName = N/A
localityName = N/A
organizationName = Self-signed certificate
commonName = $NAME
[req_ext]
subjectAltName = @alt_names
[v3_req]
subjectAltName = @alt_names
[alt_names]
DNS.1 = $NAME
DNS.2 = exasol-test-database
" > san.cnf
openssl genrsa -out cert.key 2048
openssl req -new -sha256 -key cert.key -out cert.csr -config san.cnf
echo "
subjectAltName = DNS.1:$NAME, DNS.2:exasol-test-database
" > alt_names.ext
openssl x509 -req -in cert.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out cert.crt -sha256 -extfile alt_names.ext
ls "$certs_dir" | true |
5650d685ad187cfc70f56b3fbf68f68b4f07bdb3 | Shell | Ooscaar/ADMINUX | /scripts/shellinfo.sh | UTF-8 | 402 | 4 | 4 | [] | no_license | #!/bin/bash
# AUTHOR: teacher
# DATE: 4/10/2011
# NAME: shellinfo.sh
# SYNOPSIS: shellinfo.sh [arg1 arg2 ... argN]
# DESCRIPTION: Provides information about the script.
# HISTORY: First version
echo "My PID is $$"
echo "The name of the script is $0"
echo "The number of parameters received is $#"
if [ $# -gt 0 ]; then
I=1
for PARAM in "$@"
do
echo "Parameter \$$I is $PARAM"
((I++))
done
fi
| true |
e3698422c525435c3d6fb4624299d60bf9517eca | Shell | d367wang/fenum-inference | /infer.sh | UTF-8 | 804 | 2.515625 | 3 | [] | no_license | #!/bin/bash
mydir="`dirname $BASH_SOURCE`"
cfDir="${mydir}"/../checker-framework-inference
. "${cfDir}"/scripts/runtime-env-setup.sh
CHECKER=fenum.FenumChecker
SOLVER=fenum.solver.FenumSolverEngine
IS_HACK=true
DEBUG_SOLVER=checkers.inference.solver.DebugSolver
#SOLVER="$DEBUG_SOLVER"
# IS_HACK=false
# DEBUG_CLASSPATH=""
FENUMPATH=$ROOT/fenum-inference/build/classes/java/main
export CLASSPATH=$FENUMPATH:$DEBUG_CLASSPATH:.
export external_checker_classpath=$FENUMPATH
$CFI/scripts/inference-dev --checker "$CHECKER" --solver "$SOLVER" --solverArgs="collectStatistics=true" --hacks="$IS_HACK" -m ROUNDTRIP -afud ./debug "$@"
# TYPE CHECKING
# $CFI/scripts/inference-dev --checker "$CHECKER" --solver "$SOLVER" --solverArgs="collectStatistics=true,solver=z3" --hacks="$IS_HACK" -m TYPECHECK "$@"
| true |
51788caf673698b8168592972de072e10158848f | Shell | sspbft/BFTList-client | /start_shell.sh | UTF-8 | 149 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Run as ./start_shell [abs_path_to_hosts_file]"
exit 1
fi
HOSTS_PATH=$1 python3.7 client/shell.py
| true |
1f3856c8eb1ac7e0e1e1270b17cb8cda2046f6df | Shell | dantrevino/Bitcoin-Standup | /StandUp/Scripts/UpgradeBitcoin.command | UTF-8 | 1,285 | 3.65625 | 4 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | #!/bin/sh
# UpgradeBitcoin.command
# StandUp
#
# Created by Peter on 19/11/19.
# Copyright © 2019 Peter. All rights reserved.
echo "Updating to $VERSION"
echo "Removing ~/StandUp"
rm -R ~/StandUp
mkdir ~/StandUp
mkdir ~/StandUp/BitcoinCore
echo "Downloading $SHA_URL"
curl $SHA_URL -o ~/StandUp/BitcoinCore/SHA256SUMS.asc -s
echo "Saved to ~/StandUp/BitcoinCore/SHA256SUMS.asc"
echo "Downloading Laanwj PGP signature from https://bitcoin.org/laanwj-releases.asc..."
curl https://bitcoin.org/laanwj-releases.asc -o ~/StandUp/BitcoinCore/laanwj-releases.asc -s
echo "Saved to ~/StandUp/BitcoinCore/laanwj-releases.asc"
echo "Downloading Bitcoin Core $VERSION from $MACOS_URL"
cd ~/StandUp/BitcoinCore
curl $MACOS_URL -o ~/StandUp/BitcoinCore/$BINARY_NAME --progress-bar
echo "Checking sha256 checksums $BINARY_NAME against SHA256SUMS.asc"
ACTUAL_SHA=$(shasum -a 256 $BINARY_NAME | awk '{print $1}')
EXPECTED_SHA=$(grep osx64 SHA256SUMS.asc | awk '{print $1}')
echo "See two signatures (they should match):"
echo $ACTUAL_SHA
echo $EXPECTED_SHA
if [ "$ACTUAL_SHA" == "$EXPECTED_SHA" ];
then
echo "Signatures match"
echo "Unpacking $BINARY_NAME"
tar -zxvf $BINARY_NAME
echo "You have upgraded to Bitcoin Core $VERSION"
exit
else
echo "Signatures do not match! Terminating..."
fi
exit
| true |
43e580dfa49ac6d376af857bc010b176fa9b77b6 | Shell | medns/sentry-python | /scripts/download-semaphore.sh | UTF-8 | 641 | 3.328125 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
if { [ "$TRAVIS" == "true" ] || [ "$TF_BUILD" == "True" ]; } && [ -z "$GITHUB_API_TOKEN" ]; then
echo "Not running on external pull request"
exit 0;
fi
target=semaphore
# Download the latest semaphore release for Travis
output="$(
curl -s \
https://api.github.com/repos/getsentry/semaphore/releases/latest?access_token=$GITHUB_API_TOKEN
)"
echo "$output"
output="$(echo "$output" \
| grep "$(uname -s)" \
| grep -v "\.zip" \
| grep "download" \
| cut -d : -f 2,3 \
| tr -d , \
| tr -d \")"
echo "$output"
echo "$output" | wget -i - -O $target
[ -s $target ]
chmod +x $target
| true |
7d337b5a07629764dc2cf03b8bf6a65c3af08072 | Shell | qengli/neouzabox | /scripts/gentree.x86_64.server | UTF-8 | 2,075 | 3.09375 | 3 | [] | no_license | #!/bin/sh
. config/options
echo "|||||||||||||||||||||||||||| install dir = $INSTALL |||||||||||||||||||||||||||"
if [ "$2" != boot ]; then
mkdir -p $INSTALL/sbin
mkdir -p $INSTALL/etc/init.d
mkdir -p $INSTALL/firmwares
cp $CONFIG/init $INSTALL/sbin
echo "++++++++++++++++++++gentree with $1+++++++++++++++++++++++++++++++"
# cp -i $PACKAGES/*/init.d/* $INSTALL/etc/init.d/
# modified by Leo 070606 to avoid the cp problem of above in Redhat ES AS 4
# the following is comment by Leo
#for d in $(find $PACKAGES -name 'init.d' -type d -maxdepth 2)
#do
#cp $d/* $INSTALL/etc/init.d/
#done
cp $CONFIG/file_ext $INSTALL/etc
cp $CONFIG/list_ext $INSTALL/etc
#$SCRIPTS/install linux modules $1
if [ "$NETWORK" = yes ]; then
$SCRIPTS/install webgui $1 #modified by Leo 20060227
$SCRIPTS/install bftpd
fi
#$SCRIPTS/build dialog
#$SCRIPTS/build util-linux
[ "$DEBUG" = yes ] && $SCRIPTS/build gdb
$SCRIPTS/install udev $1
$SCRIPTS/install openssl $1
$SCRIPTS/install httpd $1
echo "|||||||||||||||||||||||||||||||||| gentree lzma ||||||||||||||||||||||"
tar cf - -C $INSTALL lib usr/bin usr/lib | lzma e $INSTALL/bin.tar.lzma -si -a2
fi
if [ $INSTALLATOR = "yes" ]; then
#Leo 2007-07-15 added
echo " ======================== make cramfs .....============="
mkdir -p $INSTALL/installator/
$SCRIPTS/install installator $1
chmod 755 $INSTALL/usr/bin/* #Leo added 20080129
mkcramfs $INSTALL/usr $INSTALL/installator/usr.img
#mkcramfs $INSTALL/lib $INSTALL/installator/lib.img
fi
if [ "$2" = boot -o "$2" = full ]; then
echo "|||||||||||||||||||| install linux image ||||||||||"
$SCRIPTS/install linux image $1
echo "|||||||||||||||||||| install initrd ||||||||||"
if [ $INSTALLATOR = "yes" ]; then
echo "===== install installed initrd ============="
$SCRIPTS/install initrd installed
fi
$SCRIPTS/install initrd $1
echo "|||||||||||||||||||| install syslinux ||||||||||"
$SCRIPTS/install syslinux $1
echo "|||||||||||||||||||| install yaboot ||||||||||"
$SCRIPTS/install yaboot $1
fi
| true |
47846dd37fae9eaae159f2147dc11e8150010149 | Shell | ysoftman/test_code | /sh/expect_kinit.sh | UTF-8 | 604 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/expect
# ysoftman
# bash 가 아닌 expect 로 실행해야 한다.
# kinit 암호 자동 입력
# 참고로 linux 에선 echo 파이프로 처리할 수 있다.
# echo "aaa\!23" | kinit ysoftman
# expect 환경에서 프로세스 실행
spawn kinit ysoftman
# password 비슷한 스트링이 나오면
expect "*password*"
# kinit password 입력 프롬프트에서 암호 보내기(입력)
# mac 에선 newline \r (carriage return)
# linux 에선 newline \n (line feed)
# windows 에선 newline \r\n (carriage return + line feed)
send "aaa\!23\r"
# spawn 프로세스 닫기
expect eof
| true |
86ae3d8abb3e11d38394d7b905ba8fab9ca52ddb | Shell | leandrodelsole/aws-lambda-http-api | /scripts/build_and_deploy.sh | UTF-8 | 313 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
cd "${0%/*}"
cd ../
printf '\n\nClean Verify do Projeto\n\n'
mvn clean verify
if [ $? -ne 0 ]; then
printf '\n\nClean Verify falhou, deploy abortado\n'
exit -1
fi
printf '\n\nLambda: Iniciando Terraform !\n\n'
cd terraform
terraform init
printf '\n\n\n\n'
terraform apply -auto-approve | true |
70259177de4a94e686841905fb842452d6c8f890 | Shell | edulution/edulution_scripts | /setup.sh | UTF-8 | 1,028 | 3.25 | 3 | [] | no_license | #!/bin/bash
# exit if anything returns a non-zero code
# set -e
# shellcheck source=/dev/null
# source helper function to create or replace config files
source ~/.scripts/config/check_file_and_replace.sh
# shellcheck source=/dev/null
source ~/.scripts/config/check_or_create_dirs.sh
# shellcheck source=/dev/null
source ~/.scripts/config/test_report_submission.sh
# List of directories to be checked for
DIRECTORIES=( ~/.reports ~/backups )
# Make backups and reports directories if they don't exist
check_or_create_dirs "${DIRECTORIES[@]}"
# Create or replace the bash colors file
check_file_and_replace ~/.bash_colors ~/.scripts/config/.bash_colors 1
# Create or replace the upgrade script
check_file_and_replace ~/upgrade ~/.scripts/upgrade 0
# Create or replace the bash aliases
check_file_and_replace ~/.bash_aliases ~/.scripts/config/.bash_aliases 0
# Run flyway migrations
~/.scripts/config/flyway_bl.sh migrate
# Run backup script
~/.scripts/backupdb/backup.sh
# Test report submission
test_report_submission | true |
c36b3b43d281614703594a2321cbd816d1e4d263 | Shell | garethbrickman/holberton-system_engineering-devops | /0x14-mysql/5-mysql_backup | UTF-8 | 213 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env bash
### Dumps all MySQL databases, compresses to tar.gz
### $1 = database password
now=$(date +"%d-%m-%Y")
mysqldump -u root -p"$1" --all-databases > backup.sql
tar -zcf "$now".tar.gz ./backup.sql
| true |
d6005e885d629ab3d75d11165f95932bfc6c30ab | Shell | liolok/dotfiles | /.local/bin/boot-params-grub | UTF-8 | 1,362 | 3.9375 | 4 | [] | no_license | #!/bin/bash
# Test reading user-specific kernel parameters config file for current machine
config_file=${XDG_CONFIG_HOME:=$HOME/.config}/boot-params/$HOSTNAME.conf
if [[ ! -r $config_file ]]; then echo "Unable to read $config_file"; exit 1; fi
# Extract parameters from config file into one line
regexp_params='^[^#]*' # ignore sharp and the rest of line behind sharp
# Use grep to match parameters, use echo to remove newlines and trim whitespaces
params=$(echo $(grep --only-matching "$regexp_params" "$config_file"))
# Test parameters length for validation
[[ -z $params ]] && echo "No valid parameters read from $config_file" && exit 2
echo "[1] Parameters read from $config_file:"
echo "$params"
# Configure GRUB
# (https://wiki.archlinux.org/index.php/Kernel_parameters#GRUB)
# Replace whole line of original variable with name="parameters"
regexp_grub="^GRUB_CMDLINE_LINUX_DEFAULT=.*"
grub=$(grep --only-matching "$regexp_grub" /etc/default/grub)
grub_new="GRUB_CMDLINE_LINUX_DEFAULT=\"$params\""
[[ $grub_new == $grub ]] && echo "[2] Same old parameters, nothing to do." && exit
backup_suffix=".$(date +%F.%T)"
sudo sed "s/$grub/$grub_new/g" /etc/default/grub --in-place=$backup_suffix
echo "[2] /etc/default/grub updated, backup saved as /etc/default/grub$backup_suffix"
sudo grub-mkconfig --output=/boot/grub/grub.cfg # generate configuration file
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.